rustc_codegen_ssa/mir/
place.rs

1use rustc_abi::{
2    Align, BackendRepr, FieldIdx, FieldsShape, Size, TagEncoding, VariantIdx, Variants,
3};
4use rustc_middle::mir::PlaceTy;
5use rustc_middle::mir::interpret::Scalar;
6use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
7use rustc_middle::ty::{self, Ty};
8use rustc_middle::{bug, mir};
9use tracing::{debug, instrument};
10
11use super::operand::OperandValue;
12use super::{FunctionCx, LocalRef};
13use crate::common::IntPredicate;
14use crate::size_of_val;
15use crate::traits::*;
16
17/// The location and extra runtime properties of the place.
18///
19/// Typically found in a [`PlaceRef`] or an [`OperandValue::Ref`].
20///
21/// As a location in memory, this has no specific type. If you want to
22/// load or store it using a typed operation, use [`Self::with_type`].
23#[derive(Copy, Clone, Debug)]
24pub struct PlaceValue<V> {
25    /// A pointer to the contents of the place.
26    pub llval: V,
27
28    /// This place's extra data if it is unsized, or `None` if null.
29    pub llextra: Option<V>,
30
31    /// The alignment we know for this place.
32    pub align: Align,
33}
34
35impl<V: CodegenObject> PlaceValue<V> {
36    /// Constructor for the ordinary case of `Sized` types.
37    ///
38    /// Sets `llextra` to `None`.
39    pub fn new_sized(llval: V, align: Align) -> PlaceValue<V> {
40        PlaceValue { llval, llextra: None, align }
41    }
42
43    /// Allocates a stack slot in the function for a value
44    /// of the specified size and alignment.
45    ///
46    /// The allocation itself is untyped.
47    pub fn alloca<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx, Value = V>>(
48        bx: &mut Bx,
49        size: Size,
50        align: Align,
51    ) -> PlaceValue<V> {
52        let llval = bx.alloca(size, align);
53        PlaceValue::new_sized(llval, align)
54    }
55
56    /// Creates a `PlaceRef` to this location with the given type.
57    pub fn with_type<'tcx>(self, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
58        assert!(
59            layout.is_unsized() || layout.is_uninhabited() || self.llextra.is_none(),
60            "Had pointer metadata {:?} for sized type {layout:?}",
61            self.llextra,
62        );
63        PlaceRef { val: self, layout }
64    }
65
66    /// Gets the pointer to this place as an [`OperandValue::Immediate`]
67    /// or, for those needing metadata, an [`OperandValue::Pair`].
68    ///
69    /// This is the inverse of [`OperandValue::deref`].
70    pub fn address(self) -> OperandValue<V> {
71        if let Some(llextra) = self.llextra {
72            OperandValue::Pair(self.llval, llextra)
73        } else {
74            OperandValue::Immediate(self.llval)
75        }
76    }
77}
78
79#[derive(Copy, Clone, Debug)]
80pub struct PlaceRef<'tcx, V> {
81    /// The location and extra runtime properties of the place.
82    pub val: PlaceValue<V>,
83
84    /// The monomorphized type of this place, including variant information.
85    ///
86    /// You probably shouldn't use the alignment from this layout;
87    /// rather you should use the `.val.align` of the actual place,
88    /// which might be different from the type's normal alignment.
89    pub layout: TyAndLayout<'tcx>,
90}
91
92impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
93    pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
94        PlaceRef::new_sized_aligned(llval, layout, layout.align.abi)
95    }
96
97    pub fn new_sized_aligned(
98        llval: V,
99        layout: TyAndLayout<'tcx>,
100        align: Align,
101    ) -> PlaceRef<'tcx, V> {
102        assert!(layout.is_sized());
103        PlaceValue::new_sized(llval, align).with_type(layout)
104    }
105
106    // FIXME(eddyb) pass something else for the name so no work is done
107    // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
108    pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
109        bx: &mut Bx,
110        layout: TyAndLayout<'tcx>,
111    ) -> Self {
112        Self::alloca_size(bx, layout.size, layout)
113    }
114
115    pub fn alloca_size<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
116        bx: &mut Bx,
117        size: Size,
118        layout: TyAndLayout<'tcx>,
119    ) -> Self {
120        assert!(layout.is_sized(), "tried to statically allocate unsized place");
121        PlaceValue::alloca(bx, size, layout.align.abi).with_type(layout)
122    }
123
124    /// Returns a place for an indirect reference to an unsized place.
125    // FIXME(eddyb) pass something else for the name so no work is done
126    // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
127    pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
128        bx: &mut Bx,
129        layout: TyAndLayout<'tcx>,
130    ) -> Self {
131        assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
132        let ptr_ty = Ty::new_mut_ptr(bx.cx().tcx(), layout.ty);
133        let ptr_layout = bx.cx().layout_of(ptr_ty);
134        Self::alloca(bx, ptr_layout)
135    }
136
137    pub fn len<Cx: ConstCodegenMethods<Value = V>>(&self, cx: &Cx) -> V {
138        if let FieldsShape::Array { count, .. } = self.layout.fields {
139            if self.layout.is_unsized() {
140                assert_eq!(count, 0);
141                self.val.llextra.unwrap()
142            } else {
143                cx.const_usize(count)
144            }
145        } else {
146            bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
147        }
148    }
149}
150
151impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
152    /// Access a field, at a point when the value's case is known.
153    pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
154        self,
155        bx: &mut Bx,
156        ix: usize,
157    ) -> Self {
158        let field = self.layout.field(bx.cx(), ix);
159        let offset = self.layout.fields.offset(ix);
160        let effective_field_align = self.val.align.restrict_for_offset(offset);
161
162        // `simple` is called when we don't need to adjust the offset to
163        // the dynamic alignment of the field.
164        let mut simple = || {
165            let llval = if offset.bytes() == 0 {
166                self.val.llval
167            } else {
168                bx.inbounds_ptradd(self.val.llval, bx.const_usize(offset.bytes()))
169            };
170            let val = PlaceValue {
171                llval,
172                llextra: if bx.cx().tcx().type_has_metadata(field.ty, bx.cx().typing_env()) {
173                    self.val.llextra
174                } else {
175                    None
176                },
177                align: effective_field_align,
178            };
179            val.with_type(field)
180        };
181
182        // Simple cases, which don't need DST adjustment:
183        //   * known alignment - sized types, `[T]`, `str`
184        //   * offset 0 -- rounding up to alignment cannot change the offset
185        // Note that looking at `field.align` is incorrect since that is not necessarily equal
186        // to the dynamic alignment of the type.
187        match field.ty.kind() {
188            _ if field.is_sized() => return simple(),
189            ty::Slice(..) | ty::Str => return simple(),
190            _ if offset.bytes() == 0 => return simple(),
191            _ => {}
192        }
193
194        // We need to get the pointer manually now.
195        // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
196        // We do this instead of, say, simply adjusting the pointer from the result of a GEP
197        // because the field may have an arbitrary alignment in the LLVM representation.
198        //
199        // To demonstrate:
200        //
201        //     struct Foo<T: ?Sized> {
202        //         x: u16,
203        //         y: T
204        //     }
205        //
206        // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
207        // the `y` field has 16-bit alignment.
208
209        let meta = self.val.llextra;
210
211        let unaligned_offset = bx.cx().const_usize(offset.bytes());
212
213        // Get the alignment of the field
214        let (_, mut unsized_align) = size_of_val::size_and_align_of_dst(bx, field.ty, meta);
215
216        // For packed types, we need to cap alignment.
217        if let ty::Adt(def, _) = self.layout.ty.kind()
218            && let Some(packed) = def.repr().pack
219        {
220            let packed = bx.const_usize(packed.bytes());
221            let cmp = bx.icmp(IntPredicate::IntULT, unsized_align, packed);
222            unsized_align = bx.select(cmp, unsized_align, packed)
223        }
224
225        // Bump the unaligned offset up to the appropriate alignment
226        let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align);
227
228        debug!("struct_field_ptr: DST field offset: {:?}", offset);
229
230        // Adjust pointer.
231        let ptr = bx.inbounds_ptradd(self.val.llval, offset);
232        let val =
233            PlaceValue { llval: ptr, llextra: self.val.llextra, align: effective_field_align };
234        val.with_type(field)
235    }
236
237    /// Sets the discriminant for a new value of the given case of the given
238    /// representation.
239    pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
240        &self,
241        bx: &mut Bx,
242        variant_index: VariantIdx,
243    ) {
244        match codegen_tag_value(bx.cx(), variant_index, self.layout) {
245            Err(UninhabitedVariantError) => {
246                // We play it safe by using a well-defined `abort`, but we could go for immediate UB
247                // if that turns out to be helpful.
248                bx.abort();
249            }
250            Ok(Some((tag_field, imm))) => {
251                let tag_place = self.project_field(bx, tag_field.as_usize());
252                OperandValue::Immediate(imm).store(bx, tag_place);
253            }
254            Ok(None) => {}
255        }
256    }
257
258    pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
259        &self,
260        bx: &mut Bx,
261        llindex: V,
262    ) -> Self {
263        // Statically compute the offset if we can, otherwise just use the element size,
264        // as this will yield the lowest alignment.
265        let layout = self.layout.field(bx, 0);
266        let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
267            layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
268        } else {
269            layout.size
270        };
271
272        let llval = bx.inbounds_nuw_gep(bx.cx().backend_type(layout), self.val.llval, &[llindex]);
273        let align = self.val.align.restrict_for_offset(offset);
274        PlaceValue::new_sized(llval, align).with_type(layout)
275    }
276
277    pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
278        &self,
279        bx: &mut Bx,
280        variant_index: VariantIdx,
281    ) -> Self {
282        let mut downcast = *self;
283        downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
284        downcast
285    }
286
287    pub fn project_type<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
288        &self,
289        bx: &mut Bx,
290        ty: Ty<'tcx>,
291    ) -> Self {
292        let mut downcast = *self;
293        downcast.layout = bx.cx().layout_of(ty);
294        downcast
295    }
296
297    pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
298        bx.lifetime_start(self.val.llval, self.layout.size);
299    }
300
301    pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
302        bx.lifetime_end(self.val.llval, self.layout.size);
303    }
304}
305
306impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
307    #[instrument(level = "trace", skip(self, bx))]
308    pub fn codegen_place(
309        &mut self,
310        bx: &mut Bx,
311        place_ref: mir::PlaceRef<'tcx>,
312    ) -> PlaceRef<'tcx, Bx::Value> {
313        let cx = self.cx;
314        let tcx = self.cx.tcx();
315
316        let mut base = 0;
317        let mut cg_base = match self.locals[place_ref.local] {
318            LocalRef::Place(place) => place,
319            LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx),
320            LocalRef::Operand(..) => {
321                if place_ref.is_indirect_first_projection() {
322                    base = 1;
323                    let cg_base = self.codegen_consume(
324                        bx,
325                        mir::PlaceRef { projection: &place_ref.projection[..0], ..place_ref },
326                    );
327                    cg_base.deref(bx.cx())
328                } else {
329                    bug!("using operand local {:?} as place", place_ref);
330                }
331            }
332            LocalRef::PendingOperand => {
333                bug!("using still-pending operand local {:?} as place", place_ref);
334            }
335        };
336        for elem in place_ref.projection[base..].iter() {
337            cg_base = match *elem {
338                mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()),
339                mir::ProjectionElem::Field(ref field, _) => {
340                    assert!(
341                        !cg_base.layout.ty.is_any_ptr(),
342                        "Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
343                         but tried to access field {field:?} of pointer {cg_base:?}",
344                    );
345                    cg_base.project_field(bx, field.index())
346                }
347                mir::ProjectionElem::OpaqueCast(ty) => {
348                    bug!("encountered OpaqueCast({ty}) in codegen")
349                }
350                mir::ProjectionElem::Subtype(ty) => cg_base.project_type(bx, self.monomorphize(ty)),
351                mir::ProjectionElem::UnwrapUnsafeBinder(ty) => {
352                    cg_base.project_type(bx, self.monomorphize(ty))
353                }
354                mir::ProjectionElem::Index(index) => {
355                    let index = &mir::Operand::Copy(mir::Place::from(index));
356                    let index = self.codegen_operand(bx, index);
357                    let llindex = index.immediate();
358                    cg_base.project_index(bx, llindex)
359                }
360                mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => {
361                    let lloffset = bx.cx().const_usize(offset);
362                    cg_base.project_index(bx, lloffset)
363                }
364                mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => {
365                    let lloffset = bx.cx().const_usize(offset);
366                    let lllen = cg_base.len(bx.cx());
367                    let llindex = bx.sub(lllen, lloffset);
368                    cg_base.project_index(bx, llindex)
369                }
370                mir::ProjectionElem::Subslice { from, to, from_end } => {
371                    let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from));
372                    let projected_ty =
373                        PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, *elem).ty;
374                    subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty));
375
376                    if subslice.layout.is_unsized() {
377                        assert!(from_end, "slice subslices should be `from_end`");
378                        subslice.val.llextra = Some(
379                            bx.sub(cg_base.val.llextra.unwrap(), bx.cx().const_usize(from + to)),
380                        );
381                    }
382
383                    subslice
384                }
385                mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
386            };
387        }
388        debug!("codegen_place(place={:?}) => {:?}", place_ref, cg_base);
389        cg_base
390    }
391
392    pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
393        let tcx = self.cx.tcx();
394        let place_ty = place_ref.ty(self.mir, tcx);
395        self.monomorphize(place_ty.ty)
396    }
397}
398
399fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
400    bx: &mut Bx,
401    value: Bx::Value,
402    align: Bx::Value,
403) -> Bx::Value {
404    // In pseudo code:
405    //
406    //     if value & (align - 1) == 0 {
407    //         value
408    //     } else {
409    //         (value & !(align - 1)) + align
410    //     }
411    //
412    // Usually this is written without branches as
413    //
414    //     (value + align - 1) & !(align - 1)
415    //
416    // But this formula cannot take advantage of constant `value`. E.g. if `value` is known
417    // at compile time to be `1`, this expression should be optimized to `align`. However,
418    // optimization only holds if `align` is a power of two. Since the optimizer doesn't know
419    // that `align` is a power of two, it cannot perform this optimization.
420    //
421    // Instead we use
422    //
423    //     value + (-value & (align - 1))
424    //
425    // Since `align` is used only once, the expression can be optimized. For `value = 0`
426    // its optimized to `0` even in debug mode.
427    //
428    // NB: The previous version of this code used
429    //
430    //     (value + align - 1) & -align
431    //
432    // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for
433    // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559
434    let one = bx.const_usize(1);
435    let align_minus_1 = bx.sub(align, one);
436    let neg_value = bx.neg(value);
437    let offset = bx.and(neg_value, align_minus_1);
438    bx.add(value, offset)
439}
440
441/// Calculates the value that needs to be stored to mark the discriminant.
442///
443/// This might be `None` for a `struct` or a niched variant (like `Some(&3)`).
444///
445/// If it's `Some`, it returns the value to store and the field in which to
446/// store it. Note that this value is *not* the same as the discriminant, in
447/// general, as it might be a niche value or have a different size.
448///
449/// It might also be an `Err` because the variant is uninhabited.
450pub(super) fn codegen_tag_value<'tcx, V>(
451    cx: &impl CodegenMethods<'tcx, Value = V>,
452    variant_index: VariantIdx,
453    layout: TyAndLayout<'tcx>,
454) -> Result<Option<(FieldIdx, V)>, UninhabitedVariantError> {
455    // By checking uninhabited-ness first we don't need to worry about types
456    // like `(u32, !)` which are single-variant but weird.
457    if layout.for_variant(cx, variant_index).is_uninhabited() {
458        return Err(UninhabitedVariantError);
459    }
460
461    Ok(match layout.variants {
462        Variants::Empty => unreachable!("we already handled uninhabited types"),
463        Variants::Single { index } => {
464            assert_eq!(index, variant_index);
465            None
466        }
467
468        Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
469            let discr = layout.ty.discriminant_for_variant(cx.tcx(), variant_index);
470            let to = discr.unwrap().val;
471            let tag_layout = layout.field(cx, tag_field.as_usize());
472            let tag_llty = cx.immediate_backend_type(tag_layout);
473            let imm = cx.const_uint_big(tag_llty, to);
474            Some((tag_field, imm))
475        }
476        Variants::Multiple {
477            tag_encoding: TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
478            tag_field,
479            ..
480        } => {
481            if variant_index != untagged_variant {
482                let niche_layout = layout.field(cx, tag_field.as_usize());
483                let niche_llty = cx.immediate_backend_type(niche_layout);
484                let BackendRepr::Scalar(scalar) = niche_layout.backend_repr else {
485                    bug!("expected a scalar placeref for the niche");
486                };
487                // We are supposed to compute `niche_value.wrapping_add(niche_start)` wrapping
488                // around the `niche`'s type.
489                // The easiest way to do that is to do wrapping arithmetic on `u128` and then
490                // masking off any extra bits that occur because we did the arithmetic with too many bits.
491                let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
492                let niche_value = (niche_value as u128).wrapping_add(niche_start);
493                let niche_value = niche_value & niche_layout.size.unsigned_int_max();
494
495                let niche_llval = cx.scalar_to_backend(
496                    Scalar::from_uint(niche_value, niche_layout.size),
497                    scalar,
498                    niche_llty,
499                );
500                Some((tag_field, niche_llval))
501            } else {
502                None
503            }
504        }
505    })
506}
507
508#[derive(Debug)]
509pub(super) struct UninhabitedVariantError;