miri/borrow_tracker/stacked_borrows/
mod.rs

1//! Implements "Stacked Borrows".  See <https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md>
2//! for further information.
3
4pub mod diagnostics;
5mod item;
6mod stack;
7
8use std::fmt::Write;
9use std::sync::atomic::AtomicBool;
10use std::{cmp, mem};
11
12use rustc_abi::Size;
13use rustc_data_structures::fx::FxHashSet;
14use rustc_middle::mir::{Mutability, RetagKind};
15use rustc_middle::ty::layout::HasTypingEnv;
16use rustc_middle::ty::{self, Ty};
17
18use self::diagnostics::{RetagCause, RetagInfo};
19pub use self::item::{Item, Permission};
20pub use self::stack::Stack;
21use crate::borrow_tracker::stacked_borrows::diagnostics::{
22    AllocHistory, DiagnosticCx, DiagnosticCxBuilder,
23};
24use crate::borrow_tracker::{AccessKind, GlobalStateInner, ProtectorKind};
25use crate::concurrency::data_race::{NaReadType, NaWriteType};
26use crate::*;
27
28pub type AllocState = Stacks;
29
30/// Extra per-allocation state.
31#[derive(Clone, Debug)]
32pub struct Stacks {
33    // Even reading memory can have effects on the stack, so we need a `RefCell` here.
34    stacks: DedupRangeMap<Stack>,
35    /// Stores past operations on this allocation
36    history: AllocHistory,
37    /// The set of tags that have been exposed inside this allocation.
38    exposed_tags: FxHashSet<BorTag>,
39}
40
41/// Indicates which permissions to grant to the retagged pointer.
42#[derive(Clone, Debug)]
43enum NewPermission {
44    Uniform {
45        perm: Permission,
46        access: Option<AccessKind>,
47        protector: Option<ProtectorKind>,
48    },
49    FreezeSensitive {
50        freeze_perm: Permission,
51        freeze_access: Option<AccessKind>,
52        freeze_protector: Option<ProtectorKind>,
53        nonfreeze_perm: Permission,
54        nonfreeze_access: Option<AccessKind>,
55        // nonfreeze_protector must always be None
56    },
57}
58
59impl NewPermission {
60    /// A key function: determine the permissions to grant at a retag for the given kind of
61    /// reference/pointer.
62    fn from_ref_ty<'tcx>(ty: Ty<'tcx>, kind: RetagKind, cx: &crate::MiriInterpCx<'tcx>) -> Self {
63        let protector = (kind == RetagKind::FnEntry).then_some(ProtectorKind::StrongProtector);
64        match ty.kind() {
65            ty::Ref(_, pointee, Mutability::Mut) => {
66                if kind == RetagKind::TwoPhase {
67                    // We mostly just give up on 2phase-borrows, and treat these exactly like raw pointers.
68                    assert!(protector.is_none()); // RetagKind can't be both FnEntry and TwoPhase.
69                    NewPermission::Uniform {
70                        perm: Permission::SharedReadWrite,
71                        access: None,
72                        protector: None,
73                    }
74                } else if pointee.is_unpin(*cx.tcx, cx.typing_env()) {
75                    // A regular full mutable reference. On `FnEntry` this is `noalias` and `dereferenceable`.
76                    NewPermission::Uniform {
77                        perm: Permission::Unique,
78                        access: Some(AccessKind::Write),
79                        protector,
80                    }
81                } else {
82                    // `!Unpin` dereferences do not get `noalias` nor `dereferenceable`.
83                    NewPermission::Uniform {
84                        perm: Permission::SharedReadWrite,
85                        access: None,
86                        protector: None,
87                    }
88                }
89            }
90            ty::RawPtr(_, Mutability::Mut) => {
91                assert!(protector.is_none()); // RetagKind can't be both FnEntry and Raw.
92                // Mutable raw pointer. No access, not protected.
93                NewPermission::Uniform {
94                    perm: Permission::SharedReadWrite,
95                    access: None,
96                    protector: None,
97                }
98            }
99            ty::Ref(_, _pointee, Mutability::Not) => {
100                // Shared references. If frozen, these get `noalias` and `dereferenceable`; otherwise neither.
101                NewPermission::FreezeSensitive {
102                    freeze_perm: Permission::SharedReadOnly,
103                    freeze_access: Some(AccessKind::Read),
104                    freeze_protector: protector,
105                    nonfreeze_perm: Permission::SharedReadWrite,
106                    // Inside UnsafeCell, this does *not* count as an access, as there
107                    // might actually be mutable references further up the stack that
108                    // we have to keep alive.
109                    nonfreeze_access: None,
110                    // We do not protect inside UnsafeCell.
111                    // This fixes https://github.com/rust-lang/rust/issues/55005.
112                }
113            }
114            ty::RawPtr(_, Mutability::Not) => {
115                assert!(protector.is_none()); // RetagKind can't be both FnEntry and Raw.
116                // `*const T`, when freshly created, are read-only in the frozen part.
117                NewPermission::FreezeSensitive {
118                    freeze_perm: Permission::SharedReadOnly,
119                    freeze_access: Some(AccessKind::Read),
120                    freeze_protector: None,
121                    nonfreeze_perm: Permission::SharedReadWrite,
122                    nonfreeze_access: None,
123                }
124            }
125            _ => unreachable!(),
126        }
127    }
128
129    fn from_box_ty<'tcx>(ty: Ty<'tcx>, kind: RetagKind, cx: &crate::MiriInterpCx<'tcx>) -> Self {
130        // `ty` is not the `Box` but the field of the Box with this pointer (due to allocator handling).
131        let pointee = ty.builtin_deref(true).unwrap();
132        if pointee.is_unpin(*cx.tcx, cx.typing_env()) {
133            // A regular box. On `FnEntry` this is `noalias`, but not `dereferenceable` (hence only
134            // a weak protector).
135            NewPermission::Uniform {
136                perm: Permission::Unique,
137                access: Some(AccessKind::Write),
138                protector: (kind == RetagKind::FnEntry).then_some(ProtectorKind::WeakProtector),
139            }
140        } else {
141            // `!Unpin` boxes do not get `noalias` nor `dereferenceable`.
142            NewPermission::Uniform {
143                perm: Permission::SharedReadWrite,
144                access: None,
145                protector: None,
146            }
147        }
148    }
149
150    fn protector(&self) -> Option<ProtectorKind> {
151        match self {
152            NewPermission::Uniform { protector, .. } => *protector,
153            NewPermission::FreezeSensitive { freeze_protector, .. } => *freeze_protector,
154        }
155    }
156}
157
158// # Stacked Borrows Core Begin
159
160/// We need to make at least the following things true:
161///
162/// U1: After creating a `Uniq`, it is at the top.
163/// U2: If the top is `Uniq`, accesses must be through that `Uniq` or remove it.
164/// U3: If an access happens with a `Uniq`, it requires the `Uniq` to be in the stack.
165///
166/// F1: After creating a `&`, the parts outside `UnsafeCell` have our `SharedReadOnly` on top.
167/// F2: If a write access happens, it pops the `SharedReadOnly`.  This has three pieces:
168///     F2a: If a write happens granted by an item below our `SharedReadOnly`, the `SharedReadOnly`
169///          gets popped.
170///     F2b: No `SharedReadWrite` or `Unique` will ever be added on top of our `SharedReadOnly`.
171/// F3: If an access happens with an `&` outside `UnsafeCell`,
172///     it requires the `SharedReadOnly` to still be in the stack.
173///
174/// Core relation on `Permission` to define which accesses are allowed
175impl Permission {
176    /// This defines for a given permission, whether it permits the given kind of access.
177    fn grants(self, access: AccessKind) -> bool {
178        // Disabled grants nothing. Otherwise, all items grant read access, and except for SharedReadOnly they grant write access.
179        self != Permission::Disabled
180            && (access == AccessKind::Read || self != Permission::SharedReadOnly)
181    }
182}
183
184/// Determines whether an item was invalidated by a conflicting access, or by deallocation.
185#[derive(Copy, Clone, Debug)]
186enum ItemInvalidationCause {
187    Conflict,
188    Dealloc,
189}
190
191/// Core per-location operations: access, dealloc, reborrow.
192impl<'tcx> Stack {
193    /// Find the first write-incompatible item above the given one --
194    /// i.e, find the height to which the stack will be truncated when writing to `granting`.
195    fn find_first_write_incompatible(&self, granting: usize) -> usize {
196        let perm = self.get(granting).unwrap().perm();
197        match perm {
198            Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
199            Permission::Disabled => bug!("Cannot use Disabled for anything"),
200            Permission::Unique => {
201                // On a write, everything above us is incompatible.
202                granting + 1
203            }
204            Permission::SharedReadWrite => {
205                // The SharedReadWrite *just* above us are compatible, to skip those.
206                let mut idx = granting + 1;
207                while let Some(item) = self.get(idx) {
208                    if item.perm() == Permission::SharedReadWrite {
209                        // Go on.
210                        idx += 1;
211                    } else {
212                        // Found first incompatible!
213                        break;
214                    }
215                }
216                idx
217            }
218        }
219    }
220
221    /// The given item was invalidated -- check its protectors for whether that will cause UB.
222    fn item_invalidated(
223        item: &Item,
224        global: &GlobalStateInner,
225        dcx: &DiagnosticCx<'_, '_, 'tcx>,
226        cause: ItemInvalidationCause,
227    ) -> InterpResult<'tcx> {
228        if !global.tracked_pointer_tags.is_empty() {
229            dcx.check_tracked_tag_popped(item, global);
230        }
231
232        if !item.protected() {
233            return interp_ok(());
234        }
235
236        // We store tags twice, once in global.protected_tags and once in each call frame.
237        // We do this because consulting a single global set in this function is faster
238        // than attempting to search all call frames in the program for the `FrameExtra`
239        // (if any) which is protecting the popped tag.
240        //
241        // This duplication trades off making `end_call` slower to make this function faster. This
242        // trade-off is profitable in practice for a combination of two reasons.
243        // 1. A single protected tag can (and does in some programs) protect thousands of `Item`s.
244        //    Therefore, adding overhead in function call/return is profitable even if it only
245        //    saves a little work in this function.
246        // 2. Most frames protect only one or two tags. So this duplicative global turns a search
247        //    which ends up about linear in the number of protected tags in the program into a
248        //    constant time check (and a slow linear, because the tags in the frames aren't contiguous).
249        if let Some(&protector_kind) = global.protected_tags.get(&item.tag()) {
250            // The only way this is okay is if the protector is weak and we are deallocating with
251            // the right pointer.
252            let allowed = matches!(cause, ItemInvalidationCause::Dealloc)
253                && matches!(protector_kind, ProtectorKind::WeakProtector);
254            if !allowed {
255                return Err(dcx.protector_error(item, protector_kind)).into();
256            }
257        }
258        interp_ok(())
259    }
260
261    /// Test if a memory `access` using pointer tagged `tag` is granted.
262    /// If yes, return the index of the item that granted it.
263    /// `range` refers the entire operation, and `offset` refers to the specific offset into the
264    /// allocation that we are currently checking.
265    fn access(
266        &mut self,
267        access: AccessKind,
268        tag: ProvenanceExtra,
269        global: &GlobalStateInner,
270        dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
271        exposed_tags: &FxHashSet<BorTag>,
272    ) -> InterpResult<'tcx> {
273        // Two main steps: Find granting item, remove incompatible items above.
274
275        // Step 1: Find granting item.
276        let granting_idx =
277            self.find_granting(access, tag, exposed_tags).map_err(|()| dcx.access_error(self))?;
278
279        // Step 2: Remove incompatible items above them.  Make sure we do not remove protected
280        // items.  Behavior differs for reads and writes.
281        // In case of wildcards/unknown matches, we remove everything that is *definitely* gone.
282        if access == AccessKind::Write {
283            // Remove everything above the write-compatible items, like a proper stack. This makes sure read-only and unique
284            // pointers become invalid on write accesses (ensures F2a, and ensures U2 for write accesses).
285            let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
286                // The granting_idx *might* be approximate, but any lower idx would remove more
287                // things. Even if this is a Unique and the lower idx is an SRW (which removes
288                // less), there is an SRW group boundary here so strictly more would get removed.
289                self.find_first_write_incompatible(granting_idx)
290            } else {
291                // We are writing to something in the unknown part.
292                // There is a SRW group boundary between the unknown and the known, so everything is incompatible.
293                0
294            };
295            self.pop_items_after(first_incompatible_idx, |item| {
296                Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
297                dcx.log_invalidation(item.tag());
298                interp_ok(())
299            })?;
300        } else {
301            // On a read, *disable* all `Unique` above the granting item.  This ensures U2 for read accesses.
302            // The reason this is not following the stack discipline (by removing the first Unique and
303            // everything on top of it) is that in `let raw = &mut *x as *mut _; let _val = *x;`, the second statement
304            // would pop the `Unique` from the reborrow of the first statement, and subsequently also pop the
305            // `SharedReadWrite` for `raw`.
306            // This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
307            // reference and use that.
308            // We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
309            let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
310                // The granting_idx *might* be approximate, but any lower idx would disable more things.
311                granting_idx + 1
312            } else {
313                // We are reading from something in the unknown part. That means *all* `Unique` we know about are dead now.
314                0
315            };
316            self.disable_uniques_starting_at(first_incompatible_idx, |item| {
317                Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
318                dcx.log_invalidation(item.tag());
319                interp_ok(())
320            })?;
321        }
322
323        // If this was an approximate action, we now collapse everything into an unknown.
324        if granting_idx.is_none() || matches!(tag, ProvenanceExtra::Wildcard) {
325            // Compute the upper bound of the items that remain.
326            // (This is why we did all the work above: to reduce the items we have to consider here.)
327            let mut max = BorTag::one();
328            for i in 0..self.len() {
329                let item = self.get(i).unwrap();
330                // Skip disabled items, they cannot be matched anyway.
331                if !matches!(item.perm(), Permission::Disabled) {
332                    // We are looking for a strict upper bound, so add 1 to this tag.
333                    max = cmp::max(item.tag().succ().unwrap(), max);
334                }
335            }
336            if let Some(unk) = self.unknown_bottom() {
337                max = cmp::max(unk, max);
338            }
339            // Use `max` as new strict upper bound for everything.
340            trace!(
341                "access: forgetting stack to upper bound {max} due to wildcard or unknown access",
342                max = max.get(),
343            );
344            self.set_unknown_bottom(max);
345        }
346
347        // Done.
348        interp_ok(())
349    }
350
351    /// Deallocate a location: Like a write access, but also there must be no
352    /// active protectors at all because we will remove all items.
353    fn dealloc(
354        &mut self,
355        tag: ProvenanceExtra,
356        global: &GlobalStateInner,
357        dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
358        exposed_tags: &FxHashSet<BorTag>,
359    ) -> InterpResult<'tcx> {
360        // Step 1: Make a write access.
361        // As part of this we do regular protector checking, i.e. even weakly protected items cause UB when popped.
362        self.access(AccessKind::Write, tag, global, dcx, exposed_tags)?;
363
364        // Step 2: Pretend we remove the remaining items, checking if any are strongly protected.
365        for idx in (0..self.len()).rev() {
366            let item = self.get(idx).unwrap();
367            Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Dealloc)?;
368        }
369
370        interp_ok(())
371    }
372
373    /// Derive a new pointer from one with the given tag.
374    ///
375    /// `access` indicates which kind of memory access this retag itself should correspond to.
376    fn grant(
377        &mut self,
378        derived_from: ProvenanceExtra,
379        new: Item,
380        access: Option<AccessKind>,
381        global: &GlobalStateInner,
382        dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
383        exposed_tags: &FxHashSet<BorTag>,
384    ) -> InterpResult<'tcx> {
385        dcx.start_grant(new.perm());
386
387        // Compute where to put the new item.
388        // Either way, we ensure that we insert the new item in a way such that between
389        // `derived_from` and the new one, there are only items *compatible with* `derived_from`.
390        let new_idx = if let Some(access) = access {
391            // Simple case: We are just a regular memory access, and then push our thing on top,
392            // like a regular stack.
393            // This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
394            self.access(access, derived_from, global, dcx, exposed_tags)?;
395
396            // We insert "as far up as possible": We know only compatible items are remaining
397            // on top of `derived_from`, and we want the new item at the top so that we
398            // get the strongest possible guarantees.
399            // This ensures U1 and F1.
400            self.len()
401        } else {
402            // The tricky case: creating a new SRW permission without actually being an access.
403            assert!(new.perm() == Permission::SharedReadWrite);
404
405            // First we figure out which item grants our parent (`derived_from`) this kind of access.
406            // We use that to determine where to put the new item.
407            let granting_idx = self
408                .find_granting(AccessKind::Write, derived_from, exposed_tags)
409                .map_err(|()| dcx.grant_error(self))?;
410
411            let (Some(granting_idx), ProvenanceExtra::Concrete(_)) = (granting_idx, derived_from)
412            else {
413                // The parent is a wildcard pointer or matched the unknown bottom.
414                // This is approximate. Nobody knows what happened, so forget everything.
415                // The new thing is SRW anyway, so we cannot push it "on top of the unknown part"
416                // (for all we know, it might join an SRW group inside the unknown).
417                trace!(
418                    "reborrow: forgetting stack entirely due to SharedReadWrite reborrow from wildcard or unknown"
419                );
420                self.set_unknown_bottom(global.next_ptr_tag);
421                return interp_ok(());
422            };
423
424            // SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
425            // access.  Instead of popping the stack, we insert the item at the place the stack would
426            // be popped to (i.e., we insert it above all the write-compatible items).
427            // This ensures F2b by adding the new item below any potentially existing `SharedReadOnly`.
428            self.find_first_write_incompatible(granting_idx)
429        };
430
431        // Put the new item there.
432        trace!("reborrow: adding item {:?}", new);
433        self.insert(new_idx, new);
434        interp_ok(())
435    }
436}
437// # Stacked Borrows Core End
438
439/// Integration with the BorTag garbage collector
440impl Stacks {
441    pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<BorTag>) {
442        for (_stack_range, stack) in self.stacks.iter_mut_all() {
443            stack.retain(live_tags);
444        }
445        self.history.retain(live_tags);
446    }
447}
448
449impl VisitProvenance for Stacks {
450    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
451        for tag in self.exposed_tags.iter().copied() {
452            visit(None, Some(tag));
453        }
454    }
455}
456
457/// Map per-stack operations to higher-level per-location-range operations.
458impl<'tcx> Stacks {
459    /// Creates a new stack with an initial tag. For diagnostic purposes, we also need to know
460    /// the [`AllocId`] of the allocation this is associated with.
461    fn new(
462        size: Size,
463        perm: Permission,
464        tag: BorTag,
465        id: AllocId,
466        machine: &MiriMachine<'_>,
467    ) -> Self {
468        let item = Item::new(tag, perm, false);
469        let stack = Stack::new(item);
470
471        Stacks {
472            stacks: DedupRangeMap::new(size, stack),
473            history: AllocHistory::new(id, item, machine),
474            exposed_tags: FxHashSet::default(),
475        }
476    }
477
478    /// Call `f` on every stack in the range.
479    fn for_each(
480        &mut self,
481        range: AllocRange,
482        mut dcx_builder: DiagnosticCxBuilder<'_, 'tcx>,
483        mut f: impl FnMut(
484            &mut Stack,
485            &mut DiagnosticCx<'_, '_, 'tcx>,
486            &mut FxHashSet<BorTag>,
487        ) -> InterpResult<'tcx>,
488    ) -> InterpResult<'tcx> {
489        for (stack_range, stack) in self.stacks.iter_mut(range.start, range.size) {
490            let mut dcx = dcx_builder.build(&mut self.history, Size::from_bytes(stack_range.start));
491            f(stack, &mut dcx, &mut self.exposed_tags)?;
492            dcx_builder = dcx.unbuild();
493        }
494        interp_ok(())
495    }
496}
497
498/// Glue code to connect with Miri Machine Hooks
499impl Stacks {
500    pub fn new_allocation(
501        id: AllocId,
502        size: Size,
503        state: &mut GlobalStateInner,
504        kind: MemoryKind,
505        machine: &MiriMachine<'_>,
506    ) -> Self {
507        let (base_tag, perm) = match kind {
508            // New unique borrow. This tag is not accessible by the program,
509            // so it will only ever be used when using the local directly (i.e.,
510            // not through a pointer). That is, whenever we directly write to a local, this will pop
511            // everything else off the stack, invalidating all previous pointers,
512            // and in particular, *all* raw pointers.
513            MemoryKind::Stack => (state.root_ptr_tag(id, machine), Permission::Unique),
514            // Everything else is shared by default.
515            _ => (state.root_ptr_tag(id, machine), Permission::SharedReadWrite),
516        };
517        Stacks::new(size, perm, base_tag, id, machine)
518    }
519
520    #[inline(always)]
521    pub fn before_memory_read<'ecx, 'tcx>(
522        &mut self,
523        alloc_id: AllocId,
524        tag: ProvenanceExtra,
525        range: AllocRange,
526        machine: &'ecx MiriMachine<'tcx>,
527    ) -> InterpResult<'tcx>
528    where
529        'tcx: 'ecx,
530    {
531        trace!(
532            "read access with tag {:?}: {:?}, size {}",
533            tag,
534            interpret::Pointer::new(alloc_id, range.start),
535            range.size.bytes()
536        );
537        let dcx = DiagnosticCxBuilder::read(machine, tag, range);
538        let state = machine.borrow_tracker.as_ref().unwrap().borrow();
539        self.for_each(range, dcx, |stack, dcx, exposed_tags| {
540            stack.access(AccessKind::Read, tag, &state, dcx, exposed_tags)
541        })
542    }
543
544    #[inline(always)]
545    pub fn before_memory_write<'tcx>(
546        &mut self,
547        alloc_id: AllocId,
548        tag: ProvenanceExtra,
549        range: AllocRange,
550        machine: &MiriMachine<'tcx>,
551    ) -> InterpResult<'tcx> {
552        trace!(
553            "write access with tag {:?}: {:?}, size {}",
554            tag,
555            interpret::Pointer::new(alloc_id, range.start),
556            range.size.bytes()
557        );
558        let dcx = DiagnosticCxBuilder::write(machine, tag, range);
559        let state = machine.borrow_tracker.as_ref().unwrap().borrow();
560        self.for_each(range, dcx, |stack, dcx, exposed_tags| {
561            stack.access(AccessKind::Write, tag, &state, dcx, exposed_tags)
562        })
563    }
564
565    #[inline(always)]
566    pub fn before_memory_deallocation<'tcx>(
567        &mut self,
568        alloc_id: AllocId,
569        tag: ProvenanceExtra,
570        size: Size,
571        machine: &MiriMachine<'tcx>,
572    ) -> InterpResult<'tcx> {
573        trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, size.bytes());
574        let dcx = DiagnosticCxBuilder::dealloc(machine, tag);
575        let state = machine.borrow_tracker.as_ref().unwrap().borrow();
576        self.for_each(alloc_range(Size::ZERO, size), dcx, |stack, dcx, exposed_tags| {
577            stack.dealloc(tag, &state, dcx, exposed_tags)
578        })?;
579        interp_ok(())
580    }
581}
582
583/// Retagging/reborrowing.  There is some policy in here, such as which permissions
584/// to grant for which references, and when to add protectors.
585impl<'tcx, 'ecx> EvalContextPrivExt<'tcx, 'ecx> for crate::MiriInterpCx<'tcx> {}
586trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
587    /// Returns the provenance that should be used henceforth.
588    fn sb_reborrow(
589        &mut self,
590        place: &MPlaceTy<'tcx>,
591        size: Size,
592        new_perm: NewPermission,
593        new_tag: BorTag,
594        retag_info: RetagInfo, // diagnostics info about this retag
595    ) -> InterpResult<'tcx, Option<Provenance>> {
596        let this = self.eval_context_mut();
597        // Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
598        this.check_ptr_access(place.ptr(), size, CheckInAllocMsg::Dereferenceable)?;
599
600        // It is crucial that this gets called on all code paths, to ensure we track tag creation.
601        let log_creation = |this: &MiriInterpCx<'tcx>,
602                            loc: Option<(AllocId, Size, ProvenanceExtra)>| // alloc_id, base_offset, orig_tag
603         -> InterpResult<'tcx> {
604            let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
605            let ty = place.layout.ty;
606            if global.tracked_pointer_tags.contains(&new_tag) {
607                let mut kind_str = String::new();
608                match new_perm {
609                    NewPermission::Uniform { perm, .. } =>
610                        write!(kind_str, "{perm:?} permission").unwrap(),
611                    NewPermission::FreezeSensitive { freeze_perm, .. } if ty.is_freeze(*this.tcx, this.typing_env()) =>
612                        write!(kind_str, "{freeze_perm:?} permission").unwrap(),
613                    NewPermission::FreezeSensitive { freeze_perm, nonfreeze_perm, .. }  =>
614                        write!(kind_str, "{freeze_perm:?}/{nonfreeze_perm:?} permission for frozen/non-frozen parts").unwrap(),
615                }
616                write!(kind_str, " (pointee type {ty})").unwrap();
617                this.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(
618                    new_tag.inner(),
619                    Some(kind_str),
620                    loc.map(|(alloc_id, base_offset, orig_tag)| (alloc_id, alloc_range(base_offset, size), orig_tag)),
621                ));
622            }
623            drop(global); // don't hold that reference any longer than we have to
624
625            let Some((alloc_id, base_offset, orig_tag)) = loc else {
626                return interp_ok(())
627            };
628
629            let alloc_kind = this.get_alloc_info(alloc_id).kind;
630            match alloc_kind {
631                AllocKind::LiveData => {
632                    // This should have alloc_extra data, but `get_alloc_extra` can still fail
633                    // if converting this alloc_id from a global to a local one
634                    // uncovers a non-supported `extern static`.
635                    let extra = this.get_alloc_extra(alloc_id)?;
636                    let mut stacked_borrows = extra
637                        .borrow_tracker_sb()
638                        .borrow_mut();
639                    // Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag.
640                    // FIXME: can this be done cleaner?
641                    let dcx = DiagnosticCxBuilder::retag(
642                        &this.machine,
643                        retag_info,
644                        new_tag,
645                        orig_tag,
646                        alloc_range(base_offset, size),
647                    );
648                    let mut dcx = dcx.build(&mut stacked_borrows.history, base_offset);
649                    dcx.log_creation();
650                    if new_perm.protector().is_some() {
651                        dcx.log_protector();
652                    }
653                },
654                AllocKind::Function | AllocKind::VTable | AllocKind::TypeId | AllocKind::Dead => {
655                    // No stacked borrows on these allocations.
656                }
657            }
658            interp_ok(())
659        };
660
661        if size == Size::ZERO {
662            trace!(
663                "reborrow of size 0: reference {:?} derived from {:?} (pointee {})",
664                new_tag,
665                place.ptr(),
666                place.layout.ty,
667            );
668            // Don't update any stacks for a zero-sized access; borrow stacks are per-byte and this
669            // touches no bytes so there is no stack to put this tag in.
670            // However, if the pointer for this operation points at a real allocation we still
671            // record where it was created so that we can issue a helpful diagnostic if there is an
672            // attempt to use it for a non-zero-sized access.
673            // Dangling slices are a common case here; it's valid to get their length but with raw
674            // pointer tagging for example all calls to get_unchecked on them are invalid.
675            if let Ok((alloc_id, base_offset, orig_tag)) = this.ptr_try_get_alloc_id(place.ptr(), 0)
676            {
677                log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
678                // Still give it the new provenance, it got retagged after all. If this was a
679                // wildcard pointer, this will fix the AllocId and make future accesses with this
680                // reference to other allocations UB, but that's fine: due to subobject provenance,
681                // *all* future accesses with this reference should be UB!
682                return interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }));
683            } else {
684                // This pointer doesn't come with an AllocId. :shrug:
685                log_creation(this, None)?;
686                // Provenance unchanged. Ideally we'd make this pointer UB to use like above,
687                // but there's no easy way to do that.
688                return interp_ok(place.ptr().provenance);
689            }
690        }
691
692        // The pointer *must* have a valid AllocId to continue, so we want to resolve this to
693        // a concrete ID even for wildcard pointers.
694        let (alloc_id, base_offset, orig_tag) = this.ptr_get_alloc_id(place.ptr(), 0)?;
695        log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
696
697        trace!(
698            "reborrow: reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
699            new_tag,
700            orig_tag,
701            place.layout.ty,
702            interpret::Pointer::new(alloc_id, base_offset),
703            size.bytes()
704        );
705
706        if let Some(protect) = new_perm.protector() {
707            // See comment in `Stack::item_invalidated` for why we store the tag twice.
708            this.frame_mut()
709                .extra
710                .borrow_tracker
711                .as_mut()
712                .unwrap()
713                .protected_tags
714                .push((alloc_id, new_tag));
715            this.machine
716                .borrow_tracker
717                .as_mut()
718                .unwrap()
719                .get_mut()
720                .protected_tags
721                .insert(new_tag, protect);
722        }
723
724        // Update the stacks, according to the new permission information we are given.
725        match new_perm {
726            NewPermission::Uniform { perm, access, protector } => {
727                assert!(perm != Permission::SharedReadOnly);
728                // Here we can avoid `borrow()` calls because we have mutable references.
729                // Note that this asserts that the allocation is mutable -- but since we are creating a
730                // mutable pointer, that seems reasonable.
731                let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?;
732                let stacked_borrows = alloc_extra.borrow_tracker_sb_mut().get_mut();
733                let item = Item::new(new_tag, perm, protector.is_some());
734                let range = alloc_range(base_offset, size);
735                let global = machine.borrow_tracker.as_ref().unwrap().borrow();
736                let dcx = DiagnosticCxBuilder::retag(
737                    machine,
738                    retag_info,
739                    new_tag,
740                    orig_tag,
741                    alloc_range(base_offset, size),
742                );
743                stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
744                    stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
745                })?;
746                drop(global);
747                if let Some(access) = access {
748                    assert_eq!(access, AccessKind::Write);
749                    // Make sure the data race model also knows about this.
750                    // FIXME(genmc): Ensure this is still done in GenMC mode. Check for other places where GenMC may need to be informed.
751                    if let Some(data_race) = alloc_extra.data_race.as_vclocks_mut() {
752                        data_race.write_non_atomic(
753                            alloc_id,
754                            range,
755                            NaWriteType::Retag,
756                            Some(place.layout.ty),
757                            machine,
758                        )?;
759                    }
760                }
761            }
762            NewPermission::FreezeSensitive {
763                freeze_perm,
764                freeze_access,
765                freeze_protector,
766                nonfreeze_perm,
767                nonfreeze_access,
768            } => {
769                // The permission is not uniform across the entire range!
770                // We need a frozen-sensitive reborrow.
771                // We have to use shared references to alloc/memory_extra here since
772                // `visit_freeze_sensitive` needs to access the global state.
773                let alloc_extra = this.get_alloc_extra(alloc_id)?;
774                let mut stacked_borrows = alloc_extra.borrow_tracker_sb().borrow_mut();
775                this.visit_freeze_sensitive(place, size, |mut range, frozen| {
776                    // Adjust range.
777                    range.start += base_offset;
778                    // We are only ever `SharedReadOnly` inside the frozen bits.
779                    let (perm, access, protector) = if frozen {
780                        (freeze_perm, freeze_access, freeze_protector)
781                    } else {
782                        (nonfreeze_perm, nonfreeze_access, None)
783                    };
784                    let item = Item::new(new_tag, perm, protector.is_some());
785                    let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
786                    let dcx = DiagnosticCxBuilder::retag(
787                        &this.machine,
788                        retag_info,
789                        new_tag,
790                        orig_tag,
791                        alloc_range(base_offset, size),
792                    );
793                    stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
794                        stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
795                    })?;
796                    drop(global);
797                    if let Some(access) = access {
798                        assert_eq!(access, AccessKind::Read);
799                        // Make sure the data race model also knows about this.
800                        if let Some(data_race) = alloc_extra.data_race.as_vclocks_ref() {
801                            data_race.read_non_atomic(
802                                alloc_id,
803                                range,
804                                NaReadType::Retag,
805                                Some(place.layout.ty),
806                                &this.machine,
807                            )?;
808                        }
809                    }
810                    interp_ok(())
811                })?;
812            }
813        }
814
815        interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }))
816    }
817
818    fn sb_retag_place(
819        &mut self,
820        place: &MPlaceTy<'tcx>,
821        new_perm: NewPermission,
822        info: RetagInfo, // diagnostics info about this retag
823    ) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
824        let this = self.eval_context_mut();
825        let size = this.size_and_align_of_val(place)?.map(|(size, _)| size);
826        // FIXME: If we cannot determine the size (because the unsized tail is an `extern type`),
827        // bail out -- we cannot reasonably figure out which memory range to reborrow.
828        // See https://github.com/rust-lang/unsafe-code-guidelines/issues/276.
829        let size = match size {
830            Some(size) => size,
831            None => {
832                static DEDUP: AtomicBool = AtomicBool::new(false);
833                if !DEDUP.swap(true, std::sync::atomic::Ordering::Relaxed) {
834                    this.emit_diagnostic(NonHaltingDiagnostic::ExternTypeReborrow);
835                }
836                return interp_ok(place.clone());
837            }
838        };
839
840        // Compute new borrow.
841        let new_tag = this.machine.borrow_tracker.as_mut().unwrap().get_mut().new_ptr();
842
843        // Reborrow.
844        let new_prov = this.sb_reborrow(place, size, new_perm, new_tag, info)?;
845
846        // Adjust place.
847        // (If the closure gets called, that means the old provenance was `Some`, and hence the new
848        // one must also be `Some`.)
849        interp_ok(place.clone().map_provenance(|_| new_prov.unwrap()))
850    }
851
852    /// Retags an individual pointer, returning the retagged version.
853    /// `kind` indicates what kind of reference is being created.
854    fn sb_retag_reference(
855        &mut self,
856        val: &ImmTy<'tcx>,
857        new_perm: NewPermission,
858        info: RetagInfo, // diagnostics info about this retag
859    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
860        let this = self.eval_context_mut();
861        let place = this.ref_to_mplace(val)?;
862        let new_place = this.sb_retag_place(&place, new_perm, info)?;
863        interp_ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
864    }
865}
866
867impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
868pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
869    fn sb_retag_ptr_value(
870        &mut self,
871        kind: RetagKind,
872        val: &ImmTy<'tcx>,
873    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
874        let this = self.eval_context_mut();
875        let new_perm = NewPermission::from_ref_ty(val.layout.ty, kind, this);
876        let cause = match kind {
877            RetagKind::TwoPhase => RetagCause::TwoPhase,
878            RetagKind::FnEntry => unreachable!(),
879            RetagKind::Raw | RetagKind::Default => RetagCause::Normal,
880        };
881        this.sb_retag_reference(val, new_perm, RetagInfo { cause, in_field: false })
882    }
883
884    fn sb_retag_place_contents(
885        &mut self,
886        kind: RetagKind,
887        place: &PlaceTy<'tcx>,
888    ) -> InterpResult<'tcx> {
889        let this = self.eval_context_mut();
890        let retag_cause = match kind {
891            RetagKind::TwoPhase => unreachable!(), // can only happen in `retag_ptr_value`
892            RetagKind::FnEntry => RetagCause::FnEntry,
893            RetagKind::Default | RetagKind::Raw => RetagCause::Normal,
894        };
895        let mut visitor = RetagVisitor { ecx: this, kind, retag_cause, in_field: false };
896        return visitor.visit_value(place);
897
898        // The actual visitor.
899        struct RetagVisitor<'ecx, 'tcx> {
900            ecx: &'ecx mut MiriInterpCx<'tcx>,
901            kind: RetagKind,
902            retag_cause: RetagCause,
903            in_field: bool,
904        }
905        impl<'ecx, 'tcx> RetagVisitor<'ecx, 'tcx> {
906            #[inline(always)] // yes this helps in our benchmarks
907            fn retag_ptr_inplace(
908                &mut self,
909                place: &PlaceTy<'tcx>,
910                new_perm: NewPermission,
911            ) -> InterpResult<'tcx> {
912                let val = self.ecx.read_immediate(&self.ecx.place_to_op(place)?)?;
913                let val = self.ecx.sb_retag_reference(
914                    &val,
915                    new_perm,
916                    RetagInfo { cause: self.retag_cause, in_field: self.in_field },
917                )?;
918                self.ecx.write_immediate(*val, place)?;
919                interp_ok(())
920            }
921        }
922        impl<'ecx, 'tcx> ValueVisitor<'tcx, MiriMachine<'tcx>> for RetagVisitor<'ecx, 'tcx> {
923            type V = PlaceTy<'tcx>;
924
925            #[inline(always)]
926            fn ecx(&self) -> &MiriInterpCx<'tcx> {
927                self.ecx
928            }
929
930            fn visit_box(&mut self, box_ty: Ty<'tcx>, place: &PlaceTy<'tcx>) -> InterpResult<'tcx> {
931                // Only boxes for the global allocator get any special treatment.
932                if box_ty.is_box_global(*self.ecx.tcx) {
933                    // Boxes get a weak protectors, since they may be deallocated.
934                    let new_perm = NewPermission::from_box_ty(place.layout.ty, self.kind, self.ecx);
935                    self.retag_ptr_inplace(place, new_perm)?;
936                }
937                interp_ok(())
938            }
939
940            fn visit_value(&mut self, place: &PlaceTy<'tcx>) -> InterpResult<'tcx> {
941                // If this place is smaller than a pointer, we know that it can't contain any
942                // pointers we need to retag, so we can stop recursion early.
943                // This optimization is crucial for ZSTs, because they can contain way more fields
944                // than we can ever visit.
945                if place.layout.is_sized() && place.layout.size < self.ecx.pointer_size() {
946                    return interp_ok(());
947                }
948
949                // Check the type of this value to see what to do with it (retag, or recurse).
950                match place.layout.ty.kind() {
951                    ty::Ref(..) | ty::RawPtr(..) => {
952                        if matches!(place.layout.ty.kind(), ty::Ref(..))
953                            || self.kind == RetagKind::Raw
954                        {
955                            let new_perm =
956                                NewPermission::from_ref_ty(place.layout.ty, self.kind, self.ecx);
957                            self.retag_ptr_inplace(place, new_perm)?;
958                        }
959                    }
960                    ty::Adt(adt, _) if adt.is_box() => {
961                        // Recurse for boxes, they require some tricky handling and will end up in `visit_box` above.
962                        // (Yes this means we technically also recursively retag the allocator itself
963                        // even if field retagging is not enabled. *shrug*)
964                        self.walk_value(place)?;
965                    }
966                    _ => {
967                        // Not a reference/pointer/box. Recurse.
968                        let in_field = mem::replace(&mut self.in_field, true); // remember and restore old value
969                        self.walk_value(place)?;
970                        self.in_field = in_field;
971                    }
972                }
973
974                interp_ok(())
975            }
976        }
977    }
978
979    /// Protect a place so that it cannot be used any more for the duration of the current function
980    /// call.
981    ///
982    /// This is used to ensure soundness of in-place function argument/return passing.
983    fn sb_protect_place(&mut self, place: &MPlaceTy<'tcx>) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
984        let this = self.eval_context_mut();
985
986        // Retag it. With protection! That is the entire point.
987        let new_perm = NewPermission::Uniform {
988            perm: Permission::Unique,
989            access: Some(AccessKind::Write),
990            protector: Some(ProtectorKind::StrongProtector),
991        };
992        this.sb_retag_place(
993            place,
994            new_perm,
995            RetagInfo { cause: RetagCause::InPlaceFnPassing, in_field: false },
996        )
997    }
998
999    /// Mark the given tag as exposed. It was found on a pointer with the given AllocId.
1000    fn sb_expose_tag(&self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx> {
1001        let this = self.eval_context_ref();
1002
1003        // Function pointers and dead objects don't have an alloc_extra so we ignore them.
1004        // This is okay because accessing them is UB anyway, no need for any Stacked Borrows checks.
1005        // NOT using `get_alloc_extra_mut` since this might be a read-only allocation!
1006        let kind = this.get_alloc_info(alloc_id).kind;
1007        match kind {
1008            AllocKind::LiveData => {
1009                // This should have alloc_extra data, but `get_alloc_extra` can still fail
1010                // if converting this alloc_id from a global to a local one
1011                // uncovers a non-supported `extern static`.
1012                let alloc_extra = this.get_alloc_extra(alloc_id)?;
1013                trace!("Stacked Borrows tag {tag:?} exposed in {alloc_id:?}");
1014                alloc_extra.borrow_tracker_sb().borrow_mut().exposed_tags.insert(tag);
1015            }
1016            AllocKind::Function | AllocKind::VTable | AllocKind::TypeId | AllocKind::Dead => {
1017                // No stacked borrows on these allocations.
1018            }
1019        }
1020        interp_ok(())
1021    }
1022
1023    fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> {
1024        let this = self.eval_context_mut();
1025        let alloc_extra = this.get_alloc_extra(alloc_id)?;
1026        let stacks = alloc_extra.borrow_tracker_sb().borrow();
1027        for (range, stack) in stacks.stacks.iter_all() {
1028            print!("{range:?}: [");
1029            if let Some(bottom) = stack.unknown_bottom() {
1030                print!(" unknown-bottom(..{bottom:?})");
1031            }
1032            for i in 0..stack.len() {
1033                let item = stack.get(i).unwrap();
1034                print!(" {:?}{:?}", item.perm(), item.tag());
1035            }
1036            println!(" ]");
1037        }
1038        interp_ok(())
1039    }
1040}