rustc_codegen_ssa/mir/
operand.rs

1use std::fmt;
2
3use rustc_abi as abi;
4use rustc_abi::{
5    Align, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, TagEncoding, VariantIdx, Variants,
6};
7use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
8use rustc_middle::mir::{self, ConstValue};
9use rustc_middle::ty::Ty;
10use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
11use rustc_middle::{bug, span_bug};
12use rustc_session::config::OptLevel;
13use tracing::{debug, instrument};
14
15use super::place::{PlaceRef, PlaceValue};
16use super::rvalue::transmute_immediate;
17use super::{FunctionCx, LocalRef};
18use crate::common::IntPredicate;
19use crate::traits::*;
20use crate::{MemFlags, size_of_val};
21
22/// The representation of a Rust value. The enum variant is in fact
23/// uniquely determined by the value's type, but is kept as a
24/// safety check.
25#[derive(Copy, Clone, Debug)]
26pub enum OperandValue<V> {
27    /// A reference to the actual operand. The data is guaranteed
28    /// to be valid for the operand's lifetime.
29    /// The second value, if any, is the extra data (vtable or length)
30    /// which indicates that it refers to an unsized rvalue.
31    ///
32    /// An `OperandValue` *must* be this variant for any type for which
33    /// [`LayoutTypeCodegenMethods::is_backend_ref`] returns `true`.
34    /// (That basically amounts to "isn't one of the other variants".)
35    ///
36    /// This holds a [`PlaceValue`] (like a [`PlaceRef`] does) with a pointer
37    /// to the location holding the value. The type behind that pointer is the
38    /// one returned by [`LayoutTypeCodegenMethods::backend_type`].
39    Ref(PlaceValue<V>),
40    /// A single LLVM immediate value.
41    ///
42    /// An `OperandValue` *must* be this variant for any type for which
43    /// [`LayoutTypeCodegenMethods::is_backend_immediate`] returns `true`.
44    /// The backend value in this variant must be the *immediate* backend type,
45    /// as returned by [`LayoutTypeCodegenMethods::immediate_backend_type`].
46    Immediate(V),
47    /// A pair of immediate LLVM values. Used by wide pointers too.
48    ///
49    /// # Invariants
50    /// - For `Pair(a, b)`, `a` is always at offset 0, but may have `FieldIdx(1..)`
51    /// - `b` is not at offset 0, because `V` is not a 1ZST type.
52    /// - `a` and `b` will have a different FieldIdx, but otherwise `b`'s may be lower
53    ///   or they may not be adjacent, due to arbitrary numbers of 1ZST fields that
54    ///   will not affect the shape of the data which determines if `Pair` will be used.
55    /// - An `OperandValue` *must* be this variant for any type for which
56    /// [`LayoutTypeCodegenMethods::is_backend_scalar_pair`] returns `true`.
57    /// - The backend values in this variant must be the *immediate* backend types,
58    /// as returned by [`LayoutTypeCodegenMethods::scalar_pair_element_backend_type`]
59    /// with `immediate: true`.
60    Pair(V, V),
61    /// A value taking no bytes, and which therefore needs no LLVM value at all.
62    ///
63    /// If you ever need a `V` to pass to something, get a fresh poison value
64    /// from [`ConstCodegenMethods::const_poison`].
65    ///
66    /// An `OperandValue` *must* be this variant for any type for which
67    /// `is_zst` on its `Layout` returns `true`. Note however that
68    /// these values can still require alignment.
69    ZeroSized,
70}
71
72impl<V: CodegenObject> OperandValue<V> {
73    /// Treat this value as a pointer and return the data pointer and
74    /// optional metadata as backend values.
75    ///
76    /// If you're making a place, use [`Self::deref`] instead.
77    pub(crate) fn pointer_parts(self) -> (V, Option<V>) {
78        match self {
79            OperandValue::Immediate(llptr) => (llptr, None),
80            OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
81            _ => bug!("OperandValue cannot be a pointer: {self:?}"),
82        }
83    }
84
85    /// Treat this value as a pointer and return the place to which it points.
86    ///
87    /// The pointer immediate doesn't inherently know its alignment,
88    /// so you need to pass it in. If you want to get it from a type's ABI
89    /// alignment, then maybe you want [`OperandRef::deref`] instead.
90    ///
91    /// This is the inverse of [`PlaceValue::address`].
92    pub(crate) fn deref(self, align: Align) -> PlaceValue<V> {
93        let (llval, llextra) = self.pointer_parts();
94        PlaceValue { llval, llextra, align }
95    }
96
97    pub(crate) fn is_expected_variant_for_type<'tcx, Cx: LayoutTypeCodegenMethods<'tcx>>(
98        &self,
99        cx: &Cx,
100        ty: TyAndLayout<'tcx>,
101    ) -> bool {
102        match self {
103            OperandValue::ZeroSized => ty.is_zst(),
104            OperandValue::Immediate(_) => cx.is_backend_immediate(ty),
105            OperandValue::Pair(_, _) => cx.is_backend_scalar_pair(ty),
106            OperandValue::Ref(_) => cx.is_backend_ref(ty),
107        }
108    }
109}
110
111/// An `OperandRef` is an "SSA" reference to a Rust value, along with
112/// its type.
113///
114/// NOTE: unless you know a value's type exactly, you should not
115/// generate LLVM opcodes acting on it and instead act via methods,
116/// to avoid nasty edge cases. In particular, using `Builder::store`
117/// directly is sure to cause problems -- use `OperandRef::store`
118/// instead.
119#[derive(Copy, Clone)]
120pub struct OperandRef<'tcx, V> {
121    /// The value.
122    pub val: OperandValue<V>,
123
124    /// The layout of value, based on its Rust type.
125    pub layout: TyAndLayout<'tcx>,
126}
127
128impl<V: CodegenObject> fmt::Debug for OperandRef<'_, V> {
129    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
130        write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
131    }
132}
133
134impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
135    pub fn zero_sized(layout: TyAndLayout<'tcx>) -> OperandRef<'tcx, V> {
136        assert!(layout.is_zst());
137        OperandRef { val: OperandValue::ZeroSized, layout }
138    }
139
140    pub(crate) fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
141        bx: &mut Bx,
142        val: mir::ConstValue<'tcx>,
143        ty: Ty<'tcx>,
144    ) -> Self {
145        let layout = bx.layout_of(ty);
146
147        let val = match val {
148            ConstValue::Scalar(x) => {
149                let BackendRepr::Scalar(scalar) = layout.backend_repr else {
150                    bug!("from_const: invalid ByVal layout: {:#?}", layout);
151                };
152                let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
153                OperandValue::Immediate(llval)
154            }
155            ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
156            ConstValue::Slice { data, meta } => {
157                let BackendRepr::ScalarPair(a_scalar, _) = layout.backend_repr else {
158                    bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
159                };
160                let a = Scalar::from_pointer(
161                    Pointer::new(bx.tcx().reserve_and_set_memory_alloc(data).into(), Size::ZERO),
162                    &bx.tcx(),
163                );
164                let a_llval = bx.scalar_to_backend(
165                    a,
166                    a_scalar,
167                    bx.scalar_pair_element_backend_type(layout, 0, true),
168                );
169                let b_llval = bx.const_usize(meta);
170                OperandValue::Pair(a_llval, b_llval)
171            }
172            ConstValue::Indirect { alloc_id, offset } => {
173                let alloc = bx.tcx().global_alloc(alloc_id).unwrap_memory();
174                return Self::from_const_alloc(bx, layout, alloc, offset);
175            }
176        };
177
178        OperandRef { val, layout }
179    }
180
181    fn from_const_alloc<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
182        bx: &mut Bx,
183        layout: TyAndLayout<'tcx>,
184        alloc: rustc_middle::mir::interpret::ConstAllocation<'tcx>,
185        offset: Size,
186    ) -> Self {
187        let alloc_align = alloc.inner().align;
188        assert!(alloc_align >= layout.align.abi);
189
190        let read_scalar = |start, size, s: abi::Scalar, ty| {
191            match alloc.0.read_scalar(
192                bx,
193                alloc_range(start, size),
194                /*read_provenance*/ matches!(s.primitive(), abi::Primitive::Pointer(_)),
195            ) {
196                Ok(val) => bx.scalar_to_backend(val, s, ty),
197                Err(_) => bx.const_poison(ty),
198            }
199        };
200
201        // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
202        // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
203        // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
204        // case where some of the bytes are initialized and others are not. So, we need an extra
205        // check that walks over the type of `mplace` to make sure it is truly correct to treat this
206        // like a `Scalar` (or `ScalarPair`).
207        match layout.backend_repr {
208            BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => {
209                let size = s.size(bx);
210                assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
211                let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout));
212                OperandRef { val: OperandValue::Immediate(val), layout }
213            }
214            BackendRepr::ScalarPair(
215                a @ abi::Scalar::Initialized { .. },
216                b @ abi::Scalar::Initialized { .. },
217            ) => {
218                let (a_size, b_size) = (a.size(bx), b.size(bx));
219                let b_offset = (offset + a_size).align_to(b.align(bx).abi);
220                assert!(b_offset.bytes() > 0);
221                let a_val = read_scalar(
222                    offset,
223                    a_size,
224                    a,
225                    bx.scalar_pair_element_backend_type(layout, 0, true),
226                );
227                let b_val = read_scalar(
228                    b_offset,
229                    b_size,
230                    b,
231                    bx.scalar_pair_element_backend_type(layout, 1, true),
232                );
233                OperandRef { val: OperandValue::Pair(a_val, b_val), layout }
234            }
235            _ if layout.is_zst() => OperandRef::zero_sized(layout),
236            _ => {
237                // Neither a scalar nor scalar pair. Load from a place
238                // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the
239                // same `ConstAllocation`?
240                let init = bx.const_data_from_alloc(alloc);
241                let base_addr = bx.static_addr_of(init, alloc_align, None);
242
243                let llval = bx.const_ptr_byte_offset(base_addr, offset);
244                bx.load_operand(PlaceRef::new_sized(llval, layout))
245            }
246        }
247    }
248
249    /// Asserts that this operand refers to a scalar and returns
250    /// a reference to its value.
251    pub fn immediate(self) -> V {
252        match self.val {
253            OperandValue::Immediate(s) => s,
254            _ => bug!("not immediate: {:?}", self),
255        }
256    }
257
258    /// Asserts that this operand is a pointer (or reference) and returns
259    /// the place to which it points.  (This requires no code to be emitted
260    /// as we represent places using the pointer to the place.)
261    ///
262    /// This uses [`Ty::builtin_deref`] to include the type of the place and
263    /// assumes the place is aligned to the pointee's usual ABI alignment.
264    ///
265    /// If you don't need the type, see [`OperandValue::pointer_parts`]
266    /// or [`OperandValue::deref`].
267    pub fn deref<Cx: CodegenMethods<'tcx>>(self, cx: &Cx) -> PlaceRef<'tcx, V> {
268        if self.layout.ty.is_box() {
269            // Derefer should have removed all Box derefs
270            bug!("dereferencing {:?} in codegen", self.layout.ty);
271        }
272
273        let projected_ty = self
274            .layout
275            .ty
276            .builtin_deref(true)
277            .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self));
278
279        let layout = cx.layout_of(projected_ty);
280        self.val.deref(layout.align.abi).with_type(layout)
281    }
282
283    /// If this operand is a `Pair`, we return an aggregate with the two values.
284    /// For other cases, see `immediate`.
285    pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
286        self,
287        bx: &mut Bx,
288    ) -> V {
289        if let OperandValue::Pair(a, b) = self.val {
290            let llty = bx.cx().immediate_backend_type(self.layout);
291            debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty);
292            // Reconstruct the immediate aggregate.
293            let mut llpair = bx.cx().const_poison(llty);
294            llpair = bx.insert_value(llpair, a, 0);
295            llpair = bx.insert_value(llpair, b, 1);
296            llpair
297        } else {
298            self.immediate()
299        }
300    }
301
302    /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
303    pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
304        bx: &mut Bx,
305        llval: V,
306        layout: TyAndLayout<'tcx>,
307    ) -> Self {
308        let val = if let BackendRepr::ScalarPair(..) = layout.backend_repr {
309            debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
310
311            // Deconstruct the immediate aggregate.
312            let a_llval = bx.extract_value(llval, 0);
313            let b_llval = bx.extract_value(llval, 1);
314            OperandValue::Pair(a_llval, b_llval)
315        } else {
316            OperandValue::Immediate(llval)
317        };
318        OperandRef { val, layout }
319    }
320
321    pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
322        &self,
323        fx: &mut FunctionCx<'a, 'tcx, Bx>,
324        bx: &mut Bx,
325        i: usize,
326    ) -> Self {
327        let field = self.layout.field(bx.cx(), i);
328        let offset = self.layout.fields.offset(i);
329
330        if !bx.is_backend_ref(self.layout) && bx.is_backend_ref(field) {
331            if let BackendRepr::SimdVector { count, .. } = self.layout.backend_repr
332                && let BackendRepr::Memory { sized: true } = field.backend_repr
333                && count.is_power_of_two()
334            {
335                assert_eq!(field.size, self.layout.size);
336                // This is being deprecated, but for now stdarch still needs it for
337                // Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
338                let place = PlaceRef::alloca(bx, field);
339                self.val.store(bx, place.val.with_type(self.layout));
340                return bx.load_operand(place);
341            } else {
342                // Part of https://github.com/rust-lang/compiler-team/issues/838
343                bug!("Non-ref type {self:?} cannot project to ref field type {field:?}");
344            }
345        }
346
347        let val = if field.is_zst() {
348            OperandValue::ZeroSized
349        } else if field.size == self.layout.size {
350            assert_eq!(offset.bytes(), 0);
351            fx.codegen_transmute_operand(bx, *self, field).unwrap_or_else(|| {
352                bug!(
353                    "Expected `codegen_transmute_operand` to handle equal-size \
354                      field {i:?} projection from {self:?} to {field:?}"
355                )
356            })
357        } else {
358            let (in_scalar, imm) = match (self.val, self.layout.backend_repr) {
359                // Extract a scalar component from a pair.
360                (OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => {
361                    if offset.bytes() == 0 {
362                        assert_eq!(field.size, a.size(bx.cx()));
363                        (Some(a), a_llval)
364                    } else {
365                        assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi));
366                        assert_eq!(field.size, b.size(bx.cx()));
367                        (Some(b), b_llval)
368                    }
369                }
370
371                _ => {
372                    span_bug!(fx.mir.span, "OperandRef::extract_field({:?}): not applicable", self)
373                }
374            };
375            OperandValue::Immediate(match field.backend_repr {
376                BackendRepr::SimdVector { .. } => imm,
377                BackendRepr::Scalar(out_scalar) => {
378                    let Some(in_scalar) = in_scalar else {
379                        span_bug!(
380                            fx.mir.span,
381                            "OperandRef::extract_field({:?}): missing input scalar for output scalar",
382                            self
383                        )
384                    };
385                    if in_scalar != out_scalar {
386                        // If the backend and backend_immediate types might differ,
387                        // flip back to the backend type then to the new immediate.
388                        // This avoids nop truncations, but still handles things like
389                        // Bools in union fields needs to be truncated.
390                        let backend = bx.from_immediate(imm);
391                        bx.to_immediate_scalar(backend, out_scalar)
392                    } else {
393                        imm
394                    }
395                }
396                BackendRepr::ScalarPair(_, _) | BackendRepr::Memory { .. } => bug!(),
397            })
398        };
399
400        OperandRef { val, layout: field }
401    }
402
403    /// Obtain the actual discriminant of a value.
404    #[instrument(level = "trace", skip(fx, bx))]
405    pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
406        self,
407        fx: &mut FunctionCx<'a, 'tcx, Bx>,
408        bx: &mut Bx,
409        cast_to: Ty<'tcx>,
410    ) -> V {
411        let dl = &bx.tcx().data_layout;
412        let cast_to_layout = bx.cx().layout_of(cast_to);
413        let cast_to = bx.cx().immediate_backend_type(cast_to_layout);
414
415        // We check uninhabitedness separately because a type like
416        // `enum Foo { Bar(i32, !) }` is still reported as `Variants::Single`,
417        // *not* as `Variants::Empty`.
418        if self.layout.is_uninhabited() {
419            return bx.cx().const_poison(cast_to);
420        }
421
422        let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
423            Variants::Empty => unreachable!("we already handled uninhabited types"),
424            Variants::Single { index } => {
425                let discr_val =
426                    if let Some(discr) = self.layout.ty.discriminant_for_variant(bx.tcx(), index) {
427                        discr.val
428                    } else {
429                        // This arm is for types which are neither enums nor coroutines,
430                        // and thus for which the only possible "variant" should be the first one.
431                        assert_eq!(index, FIRST_VARIANT);
432                        // There's thus no actual discriminant to return, so we return
433                        // what it would have been if this was a single-variant enum.
434                        0
435                    };
436                return bx.cx().const_uint_big(cast_to, discr_val);
437            }
438            Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
439                (tag, tag_encoding, tag_field)
440            }
441        };
442
443        // Read the tag/niche-encoded discriminant from memory.
444        let tag_op = match self.val {
445            OperandValue::ZeroSized => bug!(),
446            OperandValue::Immediate(_) | OperandValue::Pair(_, _) => {
447                self.extract_field(fx, bx, tag_field.as_usize())
448            }
449            OperandValue::Ref(place) => {
450                let tag = place.with_type(self.layout).project_field(bx, tag_field.as_usize());
451                bx.load_operand(tag)
452            }
453        };
454        let tag_imm = tag_op.immediate();
455
456        // Decode the discriminant (specifically if it's niche-encoded).
457        match *tag_encoding {
458            TagEncoding::Direct => {
459                let signed = match tag_scalar.primitive() {
460                    // We use `i1` for bytes that are always `0` or `1`,
461                    // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
462                    // let LLVM interpret the `i1` as signed, because
463                    // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
464                    Primitive::Int(_, signed) => !tag_scalar.is_bool() && signed,
465                    _ => false,
466                };
467                bx.intcast(tag_imm, cast_to, signed)
468            }
469            TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
470                // Cast to an integer so we don't have to treat a pointer as a
471                // special case.
472                let (tag, tag_llty) = match tag_scalar.primitive() {
473                    // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
474                    Primitive::Pointer(_) => {
475                        let t = bx.type_from_integer(dl.ptr_sized_integer());
476                        let tag = bx.ptrtoint(tag_imm, t);
477                        (tag, t)
478                    }
479                    _ => (tag_imm, bx.cx().immediate_backend_type(tag_op.layout)),
480                };
481
482                // `layout_sanity_check` ensures that we only get here for cases where the discriminant
483                // value and the variant index match, since that's all `Niche` can encode.
484
485                let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
486
487                // We have a subrange `niche_start..=niche_end` inside `range`.
488                // If the value of the tag is inside this subrange, it's a
489                // "niche value", an increment of the discriminant. Otherwise it
490                // indicates the untagged variant.
491                // A general algorithm to extract the discriminant from the tag
492                // is:
493                // relative_tag = tag - niche_start
494                // is_niche = relative_tag <= (ule) relative_max
495                // discr = if is_niche {
496                //     cast(relative_tag) + niche_variants.start()
497                // } else {
498                //     untagged_variant
499                // }
500                // However, we will likely be able to emit simpler code.
501                let (is_niche, tagged_discr, delta) = if relative_max == 0 {
502                    // Best case scenario: only one tagged variant. This will
503                    // likely become just a comparison and a jump.
504                    // The algorithm is:
505                    // is_niche = tag == niche_start
506                    // discr = if is_niche {
507                    //     niche_start
508                    // } else {
509                    //     untagged_variant
510                    // }
511                    let niche_start = bx.cx().const_uint_big(tag_llty, niche_start);
512                    let is_niche = bx.icmp(IntPredicate::IntEQ, tag, niche_start);
513                    let tagged_discr =
514                        bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64);
515                    (is_niche, tagged_discr, 0)
516                } else {
517                    // The special cases don't apply, so we'll have to go with
518                    // the general algorithm.
519                    let relative_discr = bx.sub(tag, bx.cx().const_uint_big(tag_llty, niche_start));
520                    let cast_tag = bx.intcast(relative_discr, cast_to, false);
521                    let is_niche = bx.icmp(
522                        IntPredicate::IntULE,
523                        relative_discr,
524                        bx.cx().const_uint(tag_llty, relative_max as u64),
525                    );
526
527                    // Thanks to parameter attributes and load metadata, LLVM already knows
528                    // the general valid range of the tag. It's possible, though, for there
529                    // to be an impossible value *in the middle*, which those ranges don't
530                    // communicate, so it's worth an `assume` to let the optimizer know.
531                    if niche_variants.contains(&untagged_variant)
532                        && bx.cx().sess().opts.optimize != OptLevel::No
533                    {
534                        let impossible =
535                            u64::from(untagged_variant.as_u32() - niche_variants.start().as_u32());
536                        let impossible = bx.cx().const_uint(tag_llty, impossible);
537                        let ne = bx.icmp(IntPredicate::IntNE, relative_discr, impossible);
538                        bx.assume(ne);
539                    }
540
541                    (is_niche, cast_tag, niche_variants.start().as_u32() as u128)
542                };
543
544                let tagged_discr = if delta == 0 {
545                    tagged_discr
546                } else {
547                    bx.add(tagged_discr, bx.cx().const_uint_big(cast_to, delta))
548                };
549
550                let discr = bx.select(
551                    is_niche,
552                    tagged_discr,
553                    bx.cx().const_uint(cast_to, untagged_variant.as_u32() as u64),
554                );
555
556                // In principle we could insert assumes on the possible range of `discr`, but
557                // currently in LLVM this isn't worth it because the original `tag` will
558                // have either a `range` parameter attribute or `!range` metadata,
559                // or come from a `transmute` that already `assume`d it.
560
561                discr
562            }
563        }
564    }
565
566    /// Creates an incomplete operand containing the [`abi::Scalar`]s expected based
567    /// on the `layout` passed. This is for use with [`OperandRef::insert_field`]
568    /// later to set the necessary immediate(s).
569    ///
570    /// Returns `None` for `layout`s which cannot be built this way.
571    pub(crate) fn builder(
572        layout: TyAndLayout<'tcx>,
573    ) -> Option<OperandRef<'tcx, Result<V, abi::Scalar>>> {
574        let val = match layout.backend_repr {
575            BackendRepr::Memory { .. } if layout.is_zst() => OperandValue::ZeroSized,
576            BackendRepr::Scalar(s) => OperandValue::Immediate(Err(s)),
577            BackendRepr::ScalarPair(a, b) => OperandValue::Pair(Err(a), Err(b)),
578            BackendRepr::Memory { .. } | BackendRepr::SimdVector { .. } => return None,
579        };
580        Some(OperandRef { val, layout })
581    }
582}
583
584impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Result<V, abi::Scalar>> {
585    pub(crate) fn insert_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
586        &mut self,
587        bx: &mut Bx,
588        v: VariantIdx,
589        f: FieldIdx,
590        operand: OperandRef<'tcx, V>,
591    ) {
592        let (expect_zst, is_zero_offset) = if let abi::FieldsShape::Primitive = self.layout.fields {
593            // The other branch looking at field layouts ICEs for primitives,
594            // so we need to handle them separately.
595            // Multiple fields is possible for cases such as aggregating
596            // a thin pointer, where the second field is the unit.
597            assert!(!self.layout.is_zst());
598            assert_eq!(v, FIRST_VARIANT);
599            let first_field = f == FieldIdx::ZERO;
600            (!first_field, first_field)
601        } else {
602            let variant_layout = self.layout.for_variant(bx.cx(), v);
603            let field_layout = variant_layout.field(bx.cx(), f.as_usize());
604            let field_offset = variant_layout.fields.offset(f.as_usize());
605            (field_layout.is_zst(), field_offset == Size::ZERO)
606        };
607
608        let mut update = |tgt: &mut Result<V, abi::Scalar>, src, from_scalar| {
609            let from_bty = bx.cx().type_from_scalar(from_scalar);
610            let to_scalar = tgt.unwrap_err();
611            let to_bty = bx.cx().type_from_scalar(to_scalar);
612            let imm = transmute_immediate(bx, src, from_scalar, from_bty, to_scalar, to_bty);
613            *tgt = Ok(imm);
614        };
615
616        match (operand.val, operand.layout.backend_repr) {
617            (OperandValue::ZeroSized, _) if expect_zst => {}
618            (OperandValue::Immediate(v), BackendRepr::Scalar(from_scalar)) => match &mut self.val {
619                OperandValue::Immediate(val @ Err(_)) if is_zero_offset => {
620                    update(val, v, from_scalar);
621                }
622                OperandValue::Pair(fst @ Err(_), _) if is_zero_offset => {
623                    update(fst, v, from_scalar);
624                }
625                OperandValue::Pair(_, snd @ Err(_)) if !is_zero_offset => {
626                    update(snd, v, from_scalar);
627                }
628                _ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
629            },
630            (OperandValue::Pair(a, b), BackendRepr::ScalarPair(from_sa, from_sb)) => {
631                match &mut self.val {
632                    OperandValue::Pair(fst @ Err(_), snd @ Err(_)) => {
633                        update(fst, a, from_sa);
634                        update(snd, b, from_sb);
635                    }
636                    _ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
637                }
638            }
639            _ => bug!("Unsupported operand {operand:?} inserting into {v:?}.{f:?} of {self:?}"),
640        }
641    }
642
643    /// After having set all necessary fields, this converts the
644    /// `OperandValue<Result<V, _>>` (as obtained from [`OperandRef::builder`])
645    /// to the normal `OperandValue<V>`.
646    ///
647    /// ICEs if any required fields were not set.
648    pub fn build(&self) -> OperandRef<'tcx, V> {
649        let OperandRef { val, layout } = *self;
650
651        let unwrap = |r: Result<V, abi::Scalar>| match r {
652            Ok(v) => v,
653            Err(_) => bug!("OperandRef::build called while fields are missing {self:?}"),
654        };
655
656        let val = match val {
657            OperandValue::ZeroSized => OperandValue::ZeroSized,
658            OperandValue::Immediate(v) => OperandValue::Immediate(unwrap(v)),
659            OperandValue::Pair(a, b) => OperandValue::Pair(unwrap(a), unwrap(b)),
660            OperandValue::Ref(_) => bug!(),
661        };
662        OperandRef { val, layout }
663    }
664}
665
666impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
667    /// Returns an `OperandValue` that's generally UB to use in any way.
668    ///
669    /// Depending on the `layout`, returns `ZeroSized` for ZSTs, an `Immediate` or
670    /// `Pair` containing poison value(s), or a `Ref` containing a poison pointer.
671    ///
672    /// Supports sized types only.
673    pub fn poison<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
674        bx: &mut Bx,
675        layout: TyAndLayout<'tcx>,
676    ) -> OperandValue<V> {
677        assert!(layout.is_sized());
678        if layout.is_zst() {
679            OperandValue::ZeroSized
680        } else if bx.cx().is_backend_immediate(layout) {
681            let ibty = bx.cx().immediate_backend_type(layout);
682            OperandValue::Immediate(bx.const_poison(ibty))
683        } else if bx.cx().is_backend_scalar_pair(layout) {
684            let ibty0 = bx.cx().scalar_pair_element_backend_type(layout, 0, true);
685            let ibty1 = bx.cx().scalar_pair_element_backend_type(layout, 1, true);
686            OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1))
687        } else {
688            let ptr = bx.cx().type_ptr();
689            OperandValue::Ref(PlaceValue::new_sized(bx.const_poison(ptr), layout.align.abi))
690        }
691    }
692
693    pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
694        self,
695        bx: &mut Bx,
696        dest: PlaceRef<'tcx, V>,
697    ) {
698        self.store_with_flags(bx, dest, MemFlags::empty());
699    }
700
701    pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
702        self,
703        bx: &mut Bx,
704        dest: PlaceRef<'tcx, V>,
705    ) {
706        self.store_with_flags(bx, dest, MemFlags::VOLATILE);
707    }
708
709    pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
710        self,
711        bx: &mut Bx,
712        dest: PlaceRef<'tcx, V>,
713    ) {
714        self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
715    }
716
717    pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
718        self,
719        bx: &mut Bx,
720        dest: PlaceRef<'tcx, V>,
721    ) {
722        self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
723    }
724
725    pub(crate) fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
726        self,
727        bx: &mut Bx,
728        dest: PlaceRef<'tcx, V>,
729        flags: MemFlags,
730    ) {
731        debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
732        match self {
733            OperandValue::ZeroSized => {
734                // Avoid generating stores of zero-sized values, because the only way to have a
735                // zero-sized value is through `undef`/`poison`, and the store itself is useless.
736            }
737            OperandValue::Ref(val) => {
738                assert!(dest.layout.is_sized(), "cannot directly store unsized values");
739                if val.llextra.is_some() {
740                    bug!("cannot directly store unsized values");
741                }
742                bx.typed_place_copy_with_flags(dest.val, val, dest.layout, flags);
743            }
744            OperandValue::Immediate(s) => {
745                let val = bx.from_immediate(s);
746                bx.store_with_flags(val, dest.val.llval, dest.val.align, flags);
747            }
748            OperandValue::Pair(a, b) => {
749                let BackendRepr::ScalarPair(a_scalar, b_scalar) = dest.layout.backend_repr else {
750                    bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
751                };
752                let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
753
754                let val = bx.from_immediate(a);
755                let align = dest.val.align;
756                bx.store_with_flags(val, dest.val.llval, align, flags);
757
758                let llptr = bx.inbounds_ptradd(dest.val.llval, bx.const_usize(b_offset.bytes()));
759                let val = bx.from_immediate(b);
760                let align = dest.val.align.restrict_for_offset(b_offset);
761                bx.store_with_flags(val, llptr, align, flags);
762            }
763        }
764    }
765
766    pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
767        self,
768        bx: &mut Bx,
769        indirect_dest: PlaceRef<'tcx, V>,
770    ) {
771        debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
772        // `indirect_dest` must have `*mut T` type. We extract `T` out of it.
773        let unsized_ty = indirect_dest
774            .layout
775            .ty
776            .builtin_deref(true)
777            .unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest));
778
779        let OperandValue::Ref(PlaceValue { llval: llptr, llextra: Some(llextra), .. }) = self
780        else {
781            bug!("store_unsized called with a sized value (or with an extern type)")
782        };
783
784        // Allocate an appropriate region on the stack, and copy the value into it. Since alloca
785        // doesn't support dynamic alignment, we allocate an extra align - 1 bytes, and align the
786        // pointer manually.
787        let (size, align) = size_of_val::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
788        let one = bx.const_usize(1);
789        let align_minus_1 = bx.sub(align, one);
790        let size_extra = bx.add(size, align_minus_1);
791        let min_align = Align::ONE;
792        let alloca = bx.dynamic_alloca(size_extra, min_align);
793        let address = bx.ptrtoint(alloca, bx.type_isize());
794        let neg_address = bx.neg(address);
795        let offset = bx.and(neg_address, align_minus_1);
796        let dst = bx.inbounds_ptradd(alloca, offset);
797        bx.memcpy(dst, min_align, llptr, min_align, size, MemFlags::empty());
798
799        // Store the allocated region and the extra to the indirect place.
800        let indirect_operand = OperandValue::Pair(dst, llextra);
801        indirect_operand.store(bx, indirect_dest);
802    }
803}
804
805impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
806    fn maybe_codegen_consume_direct(
807        &mut self,
808        bx: &mut Bx,
809        place_ref: mir::PlaceRef<'tcx>,
810    ) -> Option<OperandRef<'tcx, Bx::Value>> {
811        debug!("maybe_codegen_consume_direct(place_ref={:?})", place_ref);
812
813        match self.locals[place_ref.local] {
814            LocalRef::Operand(mut o) => {
815                // Moves out of scalar and scalar pair fields are trivial.
816                for elem in place_ref.projection.iter() {
817                    match elem {
818                        mir::ProjectionElem::Field(f, _) => {
819                            assert!(
820                                !o.layout.ty.is_any_ptr(),
821                                "Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
822                                 but tried to access field {f:?} of pointer {o:?}",
823                            );
824                            o = o.extract_field(self, bx, f.index());
825                        }
826                        mir::ProjectionElem::Index(_)
827                        | mir::ProjectionElem::ConstantIndex { .. } => {
828                            // ZSTs don't require any actual memory access.
829                            // FIXME(eddyb) deduplicate this with the identical
830                            // checks in `codegen_consume` and `extract_field`.
831                            let elem = o.layout.field(bx.cx(), 0);
832                            if elem.is_zst() {
833                                o = OperandRef::zero_sized(elem);
834                            } else {
835                                return None;
836                            }
837                        }
838                        _ => return None,
839                    }
840                }
841
842                Some(o)
843            }
844            LocalRef::PendingOperand => {
845                bug!("use of {:?} before def", place_ref);
846            }
847            LocalRef::Place(..) | LocalRef::UnsizedPlace(..) => {
848                // watch out for locals that do not have an
849                // alloca; they are handled somewhat differently
850                None
851            }
852        }
853    }
854
855    pub fn codegen_consume(
856        &mut self,
857        bx: &mut Bx,
858        place_ref: mir::PlaceRef<'tcx>,
859    ) -> OperandRef<'tcx, Bx::Value> {
860        debug!("codegen_consume(place_ref={:?})", place_ref);
861
862        let ty = self.monomorphized_place_ty(place_ref);
863        let layout = bx.cx().layout_of(ty);
864
865        // ZSTs don't require any actual memory access.
866        if layout.is_zst() {
867            return OperandRef::zero_sized(layout);
868        }
869
870        if let Some(o) = self.maybe_codegen_consume_direct(bx, place_ref) {
871            return o;
872        }
873
874        // for most places, to consume them we just load them
875        // out from their home
876        let place = self.codegen_place(bx, place_ref);
877        bx.load_operand(place)
878    }
879
880    pub fn codegen_operand(
881        &mut self,
882        bx: &mut Bx,
883        operand: &mir::Operand<'tcx>,
884    ) -> OperandRef<'tcx, Bx::Value> {
885        debug!("codegen_operand(operand={:?})", operand);
886
887        match *operand {
888            mir::Operand::Copy(ref place) | mir::Operand::Move(ref place) => {
889                self.codegen_consume(bx, place.as_ref())
890            }
891
892            mir::Operand::Constant(ref constant) => {
893                let constant_ty = self.monomorphize(constant.ty());
894                // Most SIMD vector constants should be passed as immediates.
895                // (In particular, some intrinsics really rely on this.)
896                if constant_ty.is_simd() {
897                    // However, some SIMD types do not actually use the vector ABI
898                    // (in particular, packed SIMD types do not). Ensure we exclude those.
899                    let layout = bx.layout_of(constant_ty);
900                    if let BackendRepr::SimdVector { .. } = layout.backend_repr {
901                        let (llval, ty) = self.immediate_const_vector(bx, constant);
902                        return OperandRef {
903                            val: OperandValue::Immediate(llval),
904                            layout: bx.layout_of(ty),
905                        };
906                    }
907                }
908                self.eval_mir_constant_to_operand(bx, constant)
909            }
910        }
911    }
912}