rustc_codegen_ssa/mir/
operand.rs

1use std::fmt;
2
3use itertools::Either;
4use rustc_abi as abi;
5use rustc_abi::{
6    Align, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, TagEncoding, VariantIdx, Variants,
7};
8use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
9use rustc_middle::mir::{self, ConstValue};
10use rustc_middle::ty::Ty;
11use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
12use rustc_middle::{bug, span_bug};
13use rustc_session::config::OptLevel;
14use tracing::{debug, instrument};
15
16use super::place::{PlaceRef, PlaceValue};
17use super::rvalue::transmute_scalar;
18use super::{FunctionCx, LocalRef};
19use crate::common::IntPredicate;
20use crate::traits::*;
21use crate::{MemFlags, size_of_val};
22
23/// The representation of a Rust value. The enum variant is in fact
24/// uniquely determined by the value's type, but is kept as a
25/// safety check.
26#[derive(Copy, Clone, Debug)]
27pub enum OperandValue<V> {
28    /// A reference to the actual operand. The data is guaranteed
29    /// to be valid for the operand's lifetime.
30    /// The second value, if any, is the extra data (vtable or length)
31    /// which indicates that it refers to an unsized rvalue.
32    ///
33    /// An `OperandValue` *must* be this variant for any type for which
34    /// [`LayoutTypeCodegenMethods::is_backend_ref`] returns `true`.
35    /// (That basically amounts to "isn't one of the other variants".)
36    ///
37    /// This holds a [`PlaceValue`] (like a [`PlaceRef`] does) with a pointer
38    /// to the location holding the value. The type behind that pointer is the
39    /// one returned by [`LayoutTypeCodegenMethods::backend_type`].
40    Ref(PlaceValue<V>),
41    /// A single LLVM immediate value.
42    ///
43    /// An `OperandValue` *must* be this variant for any type for which
44    /// [`LayoutTypeCodegenMethods::is_backend_immediate`] returns `true`.
45    /// The backend value in this variant must be the *immediate* backend type,
46    /// as returned by [`LayoutTypeCodegenMethods::immediate_backend_type`].
47    Immediate(V),
48    /// A pair of immediate LLVM values. Used by wide pointers too.
49    ///
50    /// # Invariants
51    /// - For `Pair(a, b)`, `a` is always at offset 0, but may have `FieldIdx(1..)`
52    /// - `b` is not at offset 0, because `V` is not a 1ZST type.
53    /// - `a` and `b` will have a different FieldIdx, but otherwise `b`'s may be lower
54    ///   or they may not be adjacent, due to arbitrary numbers of 1ZST fields that
55    ///   will not affect the shape of the data which determines if `Pair` will be used.
56    /// - An `OperandValue` *must* be this variant for any type for which
57    /// [`LayoutTypeCodegenMethods::is_backend_scalar_pair`] returns `true`.
58    /// - The backend values in this variant must be the *immediate* backend types,
59    /// as returned by [`LayoutTypeCodegenMethods::scalar_pair_element_backend_type`]
60    /// with `immediate: true`.
61    Pair(V, V),
62    /// A value taking no bytes, and which therefore needs no LLVM value at all.
63    ///
64    /// If you ever need a `V` to pass to something, get a fresh poison value
65    /// from [`ConstCodegenMethods::const_poison`].
66    ///
67    /// An `OperandValue` *must* be this variant for any type for which
68    /// `is_zst` on its `Layout` returns `true`. Note however that
69    /// these values can still require alignment.
70    ZeroSized,
71}
72
73impl<V: CodegenObject> OperandValue<V> {
74    /// Treat this value as a pointer and return the data pointer and
75    /// optional metadata as backend values.
76    ///
77    /// If you're making a place, use [`Self::deref`] instead.
78    pub(crate) fn pointer_parts(self) -> (V, Option<V>) {
79        match self {
80            OperandValue::Immediate(llptr) => (llptr, None),
81            OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
82            _ => bug!("OperandValue cannot be a pointer: {self:?}"),
83        }
84    }
85
86    /// Treat this value as a pointer and return the place to which it points.
87    ///
88    /// The pointer immediate doesn't inherently know its alignment,
89    /// so you need to pass it in. If you want to get it from a type's ABI
90    /// alignment, then maybe you want [`OperandRef::deref`] instead.
91    ///
92    /// This is the inverse of [`PlaceValue::address`].
93    pub(crate) fn deref(self, align: Align) -> PlaceValue<V> {
94        let (llval, llextra) = self.pointer_parts();
95        PlaceValue { llval, llextra, align }
96    }
97
98    pub(crate) fn is_expected_variant_for_type<'tcx, Cx: LayoutTypeCodegenMethods<'tcx>>(
99        &self,
100        cx: &Cx,
101        ty: TyAndLayout<'tcx>,
102    ) -> bool {
103        match self {
104            OperandValue::ZeroSized => ty.is_zst(),
105            OperandValue::Immediate(_) => cx.is_backend_immediate(ty),
106            OperandValue::Pair(_, _) => cx.is_backend_scalar_pair(ty),
107            OperandValue::Ref(_) => cx.is_backend_ref(ty),
108        }
109    }
110}
111
112/// An `OperandRef` is an "SSA" reference to a Rust value, along with
113/// its type.
114///
115/// NOTE: unless you know a value's type exactly, you should not
116/// generate LLVM opcodes acting on it and instead act via methods,
117/// to avoid nasty edge cases. In particular, using `Builder::store`
118/// directly is sure to cause problems -- use `OperandRef::store`
119/// instead.
120#[derive(Copy, Clone)]
121pub struct OperandRef<'tcx, V> {
122    /// The value.
123    pub val: OperandValue<V>,
124
125    /// The layout of value, based on its Rust type.
126    pub layout: TyAndLayout<'tcx>,
127}
128
129impl<V: CodegenObject> fmt::Debug for OperandRef<'_, V> {
130    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
131        write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
132    }
133}
134
135impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
136    pub fn zero_sized(layout: TyAndLayout<'tcx>) -> OperandRef<'tcx, V> {
137        assert!(layout.is_zst());
138        OperandRef { val: OperandValue::ZeroSized, layout }
139    }
140
141    pub(crate) fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
142        bx: &mut Bx,
143        val: mir::ConstValue<'tcx>,
144        ty: Ty<'tcx>,
145    ) -> Self {
146        let layout = bx.layout_of(ty);
147
148        let val = match val {
149            ConstValue::Scalar(x) => {
150                let BackendRepr::Scalar(scalar) = layout.backend_repr else {
151                    bug!("from_const: invalid ByVal layout: {:#?}", layout);
152                };
153                let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
154                OperandValue::Immediate(llval)
155            }
156            ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
157            ConstValue::Slice { data, meta } => {
158                let BackendRepr::ScalarPair(a_scalar, _) = layout.backend_repr else {
159                    bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
160                };
161                let a = Scalar::from_pointer(
162                    Pointer::new(bx.tcx().reserve_and_set_memory_alloc(data).into(), Size::ZERO),
163                    &bx.tcx(),
164                );
165                let a_llval = bx.scalar_to_backend(
166                    a,
167                    a_scalar,
168                    bx.scalar_pair_element_backend_type(layout, 0, true),
169                );
170                let b_llval = bx.const_usize(meta);
171                OperandValue::Pair(a_llval, b_llval)
172            }
173            ConstValue::Indirect { alloc_id, offset } => {
174                let alloc = bx.tcx().global_alloc(alloc_id).unwrap_memory();
175                return Self::from_const_alloc(bx, layout, alloc, offset);
176            }
177        };
178
179        OperandRef { val, layout }
180    }
181
182    fn from_const_alloc<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
183        bx: &mut Bx,
184        layout: TyAndLayout<'tcx>,
185        alloc: rustc_middle::mir::interpret::ConstAllocation<'tcx>,
186        offset: Size,
187    ) -> Self {
188        let alloc_align = alloc.inner().align;
189        assert!(alloc_align >= layout.align.abi);
190
191        let read_scalar = |start, size, s: abi::Scalar, ty| {
192            match alloc.0.read_scalar(
193                bx,
194                alloc_range(start, size),
195                /*read_provenance*/ matches!(s.primitive(), abi::Primitive::Pointer(_)),
196            ) {
197                Ok(val) => bx.scalar_to_backend(val, s, ty),
198                Err(_) => bx.const_poison(ty),
199            }
200        };
201
202        // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
203        // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
204        // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
205        // case where some of the bytes are initialized and others are not. So, we need an extra
206        // check that walks over the type of `mplace` to make sure it is truly correct to treat this
207        // like a `Scalar` (or `ScalarPair`).
208        match layout.backend_repr {
209            BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => {
210                let size = s.size(bx);
211                assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
212                let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout));
213                OperandRef { val: OperandValue::Immediate(val), layout }
214            }
215            BackendRepr::ScalarPair(
216                a @ abi::Scalar::Initialized { .. },
217                b @ abi::Scalar::Initialized { .. },
218            ) => {
219                let (a_size, b_size) = (a.size(bx), b.size(bx));
220                let b_offset = (offset + a_size).align_to(b.align(bx).abi);
221                assert!(b_offset.bytes() > 0);
222                let a_val = read_scalar(
223                    offset,
224                    a_size,
225                    a,
226                    bx.scalar_pair_element_backend_type(layout, 0, true),
227                );
228                let b_val = read_scalar(
229                    b_offset,
230                    b_size,
231                    b,
232                    bx.scalar_pair_element_backend_type(layout, 1, true),
233                );
234                OperandRef { val: OperandValue::Pair(a_val, b_val), layout }
235            }
236            _ if layout.is_zst() => OperandRef::zero_sized(layout),
237            _ => {
238                // Neither a scalar nor scalar pair. Load from a place
239                // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the
240                // same `ConstAllocation`?
241                let init = bx.const_data_from_alloc(alloc);
242                let base_addr = bx.static_addr_of(init, alloc_align, None);
243
244                let llval = bx.const_ptr_byte_offset(base_addr, offset);
245                bx.load_operand(PlaceRef::new_sized(llval, layout))
246            }
247        }
248    }
249
250    /// Asserts that this operand refers to a scalar and returns
251    /// a reference to its value.
252    pub fn immediate(self) -> V {
253        match self.val {
254            OperandValue::Immediate(s) => s,
255            _ => bug!("not immediate: {:?}", self),
256        }
257    }
258
259    /// Asserts that this operand is a pointer (or reference) and returns
260    /// the place to which it points.  (This requires no code to be emitted
261    /// as we represent places using the pointer to the place.)
262    ///
263    /// This uses [`Ty::builtin_deref`] to include the type of the place and
264    /// assumes the place is aligned to the pointee's usual ABI alignment.
265    ///
266    /// If you don't need the type, see [`OperandValue::pointer_parts`]
267    /// or [`OperandValue::deref`].
268    pub fn deref<Cx: CodegenMethods<'tcx>>(self, cx: &Cx) -> PlaceRef<'tcx, V> {
269        if self.layout.ty.is_box() {
270            // Derefer should have removed all Box derefs
271            bug!("dereferencing {:?} in codegen", self.layout.ty);
272        }
273
274        let projected_ty = self
275            .layout
276            .ty
277            .builtin_deref(true)
278            .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self));
279
280        let layout = cx.layout_of(projected_ty);
281        self.val.deref(layout.align.abi).with_type(layout)
282    }
283
284    /// If this operand is a `Pair`, we return an aggregate with the two values.
285    /// For other cases, see `immediate`.
286    pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
287        self,
288        bx: &mut Bx,
289    ) -> V {
290        if let OperandValue::Pair(a, b) = self.val {
291            let llty = bx.cx().immediate_backend_type(self.layout);
292            debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty);
293            // Reconstruct the immediate aggregate.
294            let mut llpair = bx.cx().const_poison(llty);
295            llpair = bx.insert_value(llpair, a, 0);
296            llpair = bx.insert_value(llpair, b, 1);
297            llpair
298        } else {
299            self.immediate()
300        }
301    }
302
303    /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
304    pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
305        bx: &mut Bx,
306        llval: V,
307        layout: TyAndLayout<'tcx>,
308    ) -> Self {
309        let val = if let BackendRepr::ScalarPair(..) = layout.backend_repr {
310            debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
311
312            // Deconstruct the immediate aggregate.
313            let a_llval = bx.extract_value(llval, 0);
314            let b_llval = bx.extract_value(llval, 1);
315            OperandValue::Pair(a_llval, b_llval)
316        } else {
317            OperandValue::Immediate(llval)
318        };
319        OperandRef { val, layout }
320    }
321
322    pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
323        &self,
324        fx: &mut FunctionCx<'a, 'tcx, Bx>,
325        bx: &mut Bx,
326        i: usize,
327    ) -> Self {
328        let field = self.layout.field(bx.cx(), i);
329        let offset = self.layout.fields.offset(i);
330
331        if !bx.is_backend_ref(self.layout) && bx.is_backend_ref(field) {
332            if let BackendRepr::SimdVector { count, .. } = self.layout.backend_repr
333                && let BackendRepr::Memory { sized: true } = field.backend_repr
334                && count.is_power_of_two()
335            {
336                assert_eq!(field.size, self.layout.size);
337                // This is being deprecated, but for now stdarch still needs it for
338                // Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
339                let place = PlaceRef::alloca(bx, field);
340                self.val.store(bx, place.val.with_type(self.layout));
341                return bx.load_operand(place);
342            } else {
343                // Part of https://github.com/rust-lang/compiler-team/issues/838
344                bug!("Non-ref type {self:?} cannot project to ref field type {field:?}");
345            }
346        }
347
348        let val = if field.is_zst() {
349            OperandValue::ZeroSized
350        } else if let BackendRepr::SimdVector { .. } = self.layout.backend_repr {
351            // codegen_transmute_operand doesn't support SIMD, but since the previous
352            // check handled ZSTs, the only possible field access into something SIMD
353            // is to the `non_1zst_field` that's the same SIMD. (Other things, even
354            // just padding, would change the wrapper's representation type.)
355            assert_eq!(field.size, self.layout.size);
356            self.val
357        } else if field.size == self.layout.size {
358            assert_eq!(offset.bytes(), 0);
359            fx.codegen_transmute_operand(bx, *self, field)
360        } else {
361            let (in_scalar, imm) = match (self.val, self.layout.backend_repr) {
362                // Extract a scalar component from a pair.
363                (OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => {
364                    if offset.bytes() == 0 {
365                        assert_eq!(field.size, a.size(bx.cx()));
366                        (Some(a), a_llval)
367                    } else {
368                        assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi));
369                        assert_eq!(field.size, b.size(bx.cx()));
370                        (Some(b), b_llval)
371                    }
372                }
373
374                _ => {
375                    span_bug!(fx.mir.span, "OperandRef::extract_field({:?}): not applicable", self)
376                }
377            };
378            OperandValue::Immediate(match field.backend_repr {
379                BackendRepr::SimdVector { .. } => imm,
380                BackendRepr::Scalar(out_scalar) => {
381                    let Some(in_scalar) = in_scalar else {
382                        span_bug!(
383                            fx.mir.span,
384                            "OperandRef::extract_field({:?}): missing input scalar for output scalar",
385                            self
386                        )
387                    };
388                    if in_scalar != out_scalar {
389                        // If the backend and backend_immediate types might differ,
390                        // flip back to the backend type then to the new immediate.
391                        // This avoids nop truncations, but still handles things like
392                        // Bools in union fields needs to be truncated.
393                        let backend = bx.from_immediate(imm);
394                        bx.to_immediate_scalar(backend, out_scalar)
395                    } else {
396                        imm
397                    }
398                }
399                BackendRepr::ScalarPair(_, _) | BackendRepr::Memory { .. } => bug!(),
400            })
401        };
402
403        OperandRef { val, layout: field }
404    }
405
406    /// Obtain the actual discriminant of a value.
407    #[instrument(level = "trace", skip(fx, bx))]
408    pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
409        self,
410        fx: &mut FunctionCx<'a, 'tcx, Bx>,
411        bx: &mut Bx,
412        cast_to: Ty<'tcx>,
413    ) -> V {
414        let dl = &bx.tcx().data_layout;
415        let cast_to_layout = bx.cx().layout_of(cast_to);
416        let cast_to = bx.cx().immediate_backend_type(cast_to_layout);
417
418        // We check uninhabitedness separately because a type like
419        // `enum Foo { Bar(i32, !) }` is still reported as `Variants::Single`,
420        // *not* as `Variants::Empty`.
421        if self.layout.is_uninhabited() {
422            return bx.cx().const_poison(cast_to);
423        }
424
425        let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
426            Variants::Empty => unreachable!("we already handled uninhabited types"),
427            Variants::Single { index } => {
428                let discr_val =
429                    if let Some(discr) = self.layout.ty.discriminant_for_variant(bx.tcx(), index) {
430                        discr.val
431                    } else {
432                        // This arm is for types which are neither enums nor coroutines,
433                        // and thus for which the only possible "variant" should be the first one.
434                        assert_eq!(index, FIRST_VARIANT);
435                        // There's thus no actual discriminant to return, so we return
436                        // what it would have been if this was a single-variant enum.
437                        0
438                    };
439                return bx.cx().const_uint_big(cast_to, discr_val);
440            }
441            Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
442                (tag, tag_encoding, tag_field)
443            }
444        };
445
446        // Read the tag/niche-encoded discriminant from memory.
447        let tag_op = match self.val {
448            OperandValue::ZeroSized => bug!(),
449            OperandValue::Immediate(_) | OperandValue::Pair(_, _) => {
450                self.extract_field(fx, bx, tag_field.as_usize())
451            }
452            OperandValue::Ref(place) => {
453                let tag = place.with_type(self.layout).project_field(bx, tag_field.as_usize());
454                bx.load_operand(tag)
455            }
456        };
457        let tag_imm = tag_op.immediate();
458
459        // Decode the discriminant (specifically if it's niche-encoded).
460        match *tag_encoding {
461            TagEncoding::Direct => {
462                let signed = match tag_scalar.primitive() {
463                    // We use `i1` for bytes that are always `0` or `1`,
464                    // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
465                    // let LLVM interpret the `i1` as signed, because
466                    // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
467                    Primitive::Int(_, signed) => !tag_scalar.is_bool() && signed,
468                    _ => false,
469                };
470                bx.intcast(tag_imm, cast_to, signed)
471            }
472            TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
473                // Cast to an integer so we don't have to treat a pointer as a
474                // special case.
475                let (tag, tag_llty) = match tag_scalar.primitive() {
476                    // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
477                    Primitive::Pointer(_) => {
478                        let t = bx.type_from_integer(dl.ptr_sized_integer());
479                        let tag = bx.ptrtoint(tag_imm, t);
480                        (tag, t)
481                    }
482                    _ => (tag_imm, bx.cx().immediate_backend_type(tag_op.layout)),
483                };
484
485                // `layout_sanity_check` ensures that we only get here for cases where the discriminant
486                // value and the variant index match, since that's all `Niche` can encode.
487
488                let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
489
490                // We have a subrange `niche_start..=niche_end` inside `range`.
491                // If the value of the tag is inside this subrange, it's a
492                // "niche value", an increment of the discriminant. Otherwise it
493                // indicates the untagged variant.
494                // A general algorithm to extract the discriminant from the tag
495                // is:
496                // relative_tag = tag - niche_start
497                // is_niche = relative_tag <= (ule) relative_max
498                // discr = if is_niche {
499                //     cast(relative_tag) + niche_variants.start()
500                // } else {
501                //     untagged_variant
502                // }
503                // However, we will likely be able to emit simpler code.
504                let (is_niche, tagged_discr, delta) = if relative_max == 0 {
505                    // Best case scenario: only one tagged variant. This will
506                    // likely become just a comparison and a jump.
507                    // The algorithm is:
508                    // is_niche = tag == niche_start
509                    // discr = if is_niche {
510                    //     niche_start
511                    // } else {
512                    //     untagged_variant
513                    // }
514                    let niche_start = bx.cx().const_uint_big(tag_llty, niche_start);
515                    let is_niche = bx.icmp(IntPredicate::IntEQ, tag, niche_start);
516                    let tagged_discr =
517                        bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64);
518                    (is_niche, tagged_discr, 0)
519                } else {
520                    // The special cases don't apply, so we'll have to go with
521                    // the general algorithm.
522                    let relative_discr = bx.sub(tag, bx.cx().const_uint_big(tag_llty, niche_start));
523                    let cast_tag = bx.intcast(relative_discr, cast_to, false);
524                    let is_niche = bx.icmp(
525                        IntPredicate::IntULE,
526                        relative_discr,
527                        bx.cx().const_uint(tag_llty, relative_max as u64),
528                    );
529
530                    // Thanks to parameter attributes and load metadata, LLVM already knows
531                    // the general valid range of the tag. It's possible, though, for there
532                    // to be an impossible value *in the middle*, which those ranges don't
533                    // communicate, so it's worth an `assume` to let the optimizer know.
534                    if niche_variants.contains(&untagged_variant)
535                        && bx.cx().sess().opts.optimize != OptLevel::No
536                    {
537                        let impossible =
538                            u64::from(untagged_variant.as_u32() - niche_variants.start().as_u32());
539                        let impossible = bx.cx().const_uint(tag_llty, impossible);
540                        let ne = bx.icmp(IntPredicate::IntNE, relative_discr, impossible);
541                        bx.assume(ne);
542                    }
543
544                    (is_niche, cast_tag, niche_variants.start().as_u32() as u128)
545                };
546
547                let tagged_discr = if delta == 0 {
548                    tagged_discr
549                } else {
550                    bx.add(tagged_discr, bx.cx().const_uint_big(cast_to, delta))
551                };
552
553                let discr = bx.select(
554                    is_niche,
555                    tagged_discr,
556                    bx.cx().const_uint(cast_to, untagged_variant.as_u32() as u64),
557                );
558
559                // In principle we could insert assumes on the possible range of `discr`, but
560                // currently in LLVM this isn't worth it because the original `tag` will
561                // have either a `range` parameter attribute or `!range` metadata,
562                // or come from a `transmute` that already `assume`d it.
563
564                discr
565            }
566        }
567    }
568
569    /// Creates an incomplete operand containing the [`abi::Scalar`]s expected based
570    /// on the `layout` passed. This is for use with [`OperandRef::insert_field`]
571    /// later to set the necessary immediate(s), one-by-one converting all the `Right` to `Left`.
572    ///
573    /// Returns `None` for `layout`s which cannot be built this way.
574    pub(crate) fn builder(
575        layout: TyAndLayout<'tcx>,
576    ) -> Option<OperandRef<'tcx, Either<V, abi::Scalar>>> {
577        // Uninhabited types are weird, because for example `Result<!, !>`
578        // shows up as `FieldsShape::Primitive` and we need to be able to write
579        // a field into `(u32, !)`. We'll do that in an `alloca` instead.
580        if layout.uninhabited {
581            return None;
582        }
583
584        let val = match layout.backend_repr {
585            BackendRepr::Memory { .. } if layout.is_zst() => OperandValue::ZeroSized,
586            BackendRepr::Scalar(s) => OperandValue::Immediate(Either::Right(s)),
587            BackendRepr::ScalarPair(a, b) => OperandValue::Pair(Either::Right(a), Either::Right(b)),
588            BackendRepr::Memory { .. } | BackendRepr::SimdVector { .. } => return None,
589        };
590        Some(OperandRef { val, layout })
591    }
592}
593
594impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Either<V, abi::Scalar>> {
595    pub(crate) fn insert_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
596        &mut self,
597        bx: &mut Bx,
598        v: VariantIdx,
599        f: FieldIdx,
600        operand: OperandRef<'tcx, V>,
601    ) {
602        let (expect_zst, is_zero_offset) = if let abi::FieldsShape::Primitive = self.layout.fields {
603            // The other branch looking at field layouts ICEs for primitives,
604            // so we need to handle them separately.
605            // Multiple fields is possible for cases such as aggregating
606            // a thin pointer, where the second field is the unit.
607            assert!(!self.layout.is_zst());
608            assert_eq!(v, FIRST_VARIANT);
609            let first_field = f == FieldIdx::ZERO;
610            (!first_field, first_field)
611        } else {
612            let variant_layout = self.layout.for_variant(bx.cx(), v);
613            let field_layout = variant_layout.field(bx.cx(), f.as_usize());
614            let field_offset = variant_layout.fields.offset(f.as_usize());
615            (field_layout.is_zst(), field_offset == Size::ZERO)
616        };
617
618        let mut update = |tgt: &mut Either<V, abi::Scalar>, src, from_scalar| {
619            let to_scalar = tgt.unwrap_right();
620            let imm = transmute_scalar(bx, src, from_scalar, to_scalar);
621            *tgt = Either::Left(imm);
622        };
623
624        match (operand.val, operand.layout.backend_repr) {
625            (OperandValue::ZeroSized, _) if expect_zst => {}
626            (OperandValue::Immediate(v), BackendRepr::Scalar(from_scalar)) => match &mut self.val {
627                OperandValue::Immediate(val @ Either::Right(_)) if is_zero_offset => {
628                    update(val, v, from_scalar);
629                }
630                OperandValue::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
631                    update(fst, v, from_scalar);
632                }
633                OperandValue::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
634                    update(snd, v, from_scalar);
635                }
636                _ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
637            },
638            (OperandValue::Pair(a, b), BackendRepr::ScalarPair(from_sa, from_sb)) => {
639                match &mut self.val {
640                    OperandValue::Pair(fst @ Either::Right(_), snd @ Either::Right(_)) => {
641                        update(fst, a, from_sa);
642                        update(snd, b, from_sb);
643                    }
644                    _ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
645                }
646            }
647            _ => bug!("Unsupported operand {operand:?} inserting into {v:?}.{f:?} of {self:?}"),
648        }
649    }
650
651    /// Insert the immediate value `imm` for field `f` in the *type itself*,
652    /// rather than into one of the variants.
653    ///
654    /// Most things want [`OperandRef::insert_field`] instead, but this one is
655    /// necessary for writing things like enum tags that aren't in any variant.
656    pub(super) fn insert_imm(&mut self, f: FieldIdx, imm: V) {
657        let field_offset = self.layout.fields.offset(f.as_usize());
658        let is_zero_offset = field_offset == Size::ZERO;
659        match &mut self.val {
660            OperandValue::Immediate(val @ Either::Right(_)) if is_zero_offset => {
661                *val = Either::Left(imm);
662            }
663            OperandValue::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
664                *fst = Either::Left(imm);
665            }
666            OperandValue::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
667                *snd = Either::Left(imm);
668            }
669            _ => bug!("Tried to insert {imm:?} into field {f:?} of {self:?}"),
670        }
671    }
672
673    /// After having set all necessary fields, this converts the
674    /// `OperandValue<Either<V, _>>` (as obtained from [`OperandRef::builder`])
675    /// to the normal `OperandValue<V>`.
676    ///
677    /// ICEs if any required fields were not set.
678    pub fn build(&self, cx: &impl CodegenMethods<'tcx, Value = V>) -> OperandRef<'tcx, V> {
679        let OperandRef { val, layout } = *self;
680
681        // For something like `Option::<u32>::None`, it's expected that the
682        // payload scalar will not actually have been set, so this converts
683        // unset scalars to corresponding `undef` values so long as the scalar
684        // from the layout allows uninit.
685        let unwrap = |r: Either<V, abi::Scalar>| match r {
686            Either::Left(v) => v,
687            Either::Right(s) if s.is_uninit_valid() => {
688                let bty = cx.type_from_scalar(s);
689                cx.const_undef(bty)
690            }
691            Either::Right(_) => bug!("OperandRef::build called while fields are missing {self:?}"),
692        };
693
694        let val = match val {
695            OperandValue::ZeroSized => OperandValue::ZeroSized,
696            OperandValue::Immediate(v) => OperandValue::Immediate(unwrap(v)),
697            OperandValue::Pair(a, b) => OperandValue::Pair(unwrap(a), unwrap(b)),
698            OperandValue::Ref(_) => bug!(),
699        };
700        OperandRef { val, layout }
701    }
702}
703
704impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
705    /// Returns an `OperandValue` that's generally UB to use in any way.
706    ///
707    /// Depending on the `layout`, returns `ZeroSized` for ZSTs, an `Immediate` or
708    /// `Pair` containing poison value(s), or a `Ref` containing a poison pointer.
709    ///
710    /// Supports sized types only.
711    pub fn poison<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
712        bx: &mut Bx,
713        layout: TyAndLayout<'tcx>,
714    ) -> OperandValue<V> {
715        assert!(layout.is_sized());
716        if layout.is_zst() {
717            OperandValue::ZeroSized
718        } else if bx.cx().is_backend_immediate(layout) {
719            let ibty = bx.cx().immediate_backend_type(layout);
720            OperandValue::Immediate(bx.const_poison(ibty))
721        } else if bx.cx().is_backend_scalar_pair(layout) {
722            let ibty0 = bx.cx().scalar_pair_element_backend_type(layout, 0, true);
723            let ibty1 = bx.cx().scalar_pair_element_backend_type(layout, 1, true);
724            OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1))
725        } else {
726            let ptr = bx.cx().type_ptr();
727            OperandValue::Ref(PlaceValue::new_sized(bx.const_poison(ptr), layout.align.abi))
728        }
729    }
730
731    pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
732        self,
733        bx: &mut Bx,
734        dest: PlaceRef<'tcx, V>,
735    ) {
736        self.store_with_flags(bx, dest, MemFlags::empty());
737    }
738
739    pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
740        self,
741        bx: &mut Bx,
742        dest: PlaceRef<'tcx, V>,
743    ) {
744        self.store_with_flags(bx, dest, MemFlags::VOLATILE);
745    }
746
747    pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
748        self,
749        bx: &mut Bx,
750        dest: PlaceRef<'tcx, V>,
751    ) {
752        self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
753    }
754
755    pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
756        self,
757        bx: &mut Bx,
758        dest: PlaceRef<'tcx, V>,
759    ) {
760        self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
761    }
762
763    pub(crate) fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
764        self,
765        bx: &mut Bx,
766        dest: PlaceRef<'tcx, V>,
767        flags: MemFlags,
768    ) {
769        debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
770        match self {
771            OperandValue::ZeroSized => {
772                // Avoid generating stores of zero-sized values, because the only way to have a
773                // zero-sized value is through `undef`/`poison`, and the store itself is useless.
774            }
775            OperandValue::Ref(val) => {
776                assert!(dest.layout.is_sized(), "cannot directly store unsized values");
777                if val.llextra.is_some() {
778                    bug!("cannot directly store unsized values");
779                }
780                bx.typed_place_copy_with_flags(dest.val, val, dest.layout, flags);
781            }
782            OperandValue::Immediate(s) => {
783                let val = bx.from_immediate(s);
784                bx.store_with_flags(val, dest.val.llval, dest.val.align, flags);
785            }
786            OperandValue::Pair(a, b) => {
787                let BackendRepr::ScalarPair(a_scalar, b_scalar) = dest.layout.backend_repr else {
788                    bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
789                };
790                let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
791
792                let val = bx.from_immediate(a);
793                let align = dest.val.align;
794                bx.store_with_flags(val, dest.val.llval, align, flags);
795
796                let llptr = bx.inbounds_ptradd(dest.val.llval, bx.const_usize(b_offset.bytes()));
797                let val = bx.from_immediate(b);
798                let align = dest.val.align.restrict_for_offset(b_offset);
799                bx.store_with_flags(val, llptr, align, flags);
800            }
801        }
802    }
803
804    pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
805        self,
806        bx: &mut Bx,
807        indirect_dest: PlaceRef<'tcx, V>,
808    ) {
809        debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
810        // `indirect_dest` must have `*mut T` type. We extract `T` out of it.
811        let unsized_ty = indirect_dest
812            .layout
813            .ty
814            .builtin_deref(true)
815            .unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest));
816
817        let OperandValue::Ref(PlaceValue { llval: llptr, llextra: Some(llextra), .. }) = self
818        else {
819            bug!("store_unsized called with a sized value (or with an extern type)")
820        };
821
822        // Allocate an appropriate region on the stack, and copy the value into it. Since alloca
823        // doesn't support dynamic alignment, we allocate an extra align - 1 bytes, and align the
824        // pointer manually.
825        let (size, align) = size_of_val::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
826        let one = bx.const_usize(1);
827        let align_minus_1 = bx.sub(align, one);
828        let size_extra = bx.add(size, align_minus_1);
829        let min_align = Align::ONE;
830        let alloca = bx.dynamic_alloca(size_extra, min_align);
831        let address = bx.ptrtoint(alloca, bx.type_isize());
832        let neg_address = bx.neg(address);
833        let offset = bx.and(neg_address, align_minus_1);
834        let dst = bx.inbounds_ptradd(alloca, offset);
835        bx.memcpy(dst, min_align, llptr, min_align, size, MemFlags::empty());
836
837        // Store the allocated region and the extra to the indirect place.
838        let indirect_operand = OperandValue::Pair(dst, llextra);
839        indirect_operand.store(bx, indirect_dest);
840    }
841}
842
843impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
844    fn maybe_codegen_consume_direct(
845        &mut self,
846        bx: &mut Bx,
847        place_ref: mir::PlaceRef<'tcx>,
848    ) -> Option<OperandRef<'tcx, Bx::Value>> {
849        debug!("maybe_codegen_consume_direct(place_ref={:?})", place_ref);
850
851        match self.locals[place_ref.local] {
852            LocalRef::Operand(mut o) => {
853                // Moves out of scalar and scalar pair fields are trivial.
854                for elem in place_ref.projection.iter() {
855                    match elem {
856                        mir::ProjectionElem::Field(f, _) => {
857                            assert!(
858                                !o.layout.ty.is_any_ptr(),
859                                "Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
860                                 but tried to access field {f:?} of pointer {o:?}",
861                            );
862                            o = o.extract_field(self, bx, f.index());
863                        }
864                        mir::ProjectionElem::Index(_)
865                        | mir::ProjectionElem::ConstantIndex { .. } => {
866                            // ZSTs don't require any actual memory access.
867                            // FIXME(eddyb) deduplicate this with the identical
868                            // checks in `codegen_consume` and `extract_field`.
869                            let elem = o.layout.field(bx.cx(), 0);
870                            if elem.is_zst() {
871                                o = OperandRef::zero_sized(elem);
872                            } else {
873                                return None;
874                            }
875                        }
876                        _ => return None,
877                    }
878                }
879
880                Some(o)
881            }
882            LocalRef::PendingOperand => {
883                bug!("use of {:?} before def", place_ref);
884            }
885            LocalRef::Place(..) | LocalRef::UnsizedPlace(..) => {
886                // watch out for locals that do not have an
887                // alloca; they are handled somewhat differently
888                None
889            }
890        }
891    }
892
893    pub fn codegen_consume(
894        &mut self,
895        bx: &mut Bx,
896        place_ref: mir::PlaceRef<'tcx>,
897    ) -> OperandRef<'tcx, Bx::Value> {
898        debug!("codegen_consume(place_ref={:?})", place_ref);
899
900        let ty = self.monomorphized_place_ty(place_ref);
901        let layout = bx.cx().layout_of(ty);
902
903        // ZSTs don't require any actual memory access.
904        if layout.is_zst() {
905            return OperandRef::zero_sized(layout);
906        }
907
908        if let Some(o) = self.maybe_codegen_consume_direct(bx, place_ref) {
909            return o;
910        }
911
912        // for most places, to consume them we just load them
913        // out from their home
914        let place = self.codegen_place(bx, place_ref);
915        bx.load_operand(place)
916    }
917
918    pub fn codegen_operand(
919        &mut self,
920        bx: &mut Bx,
921        operand: &mir::Operand<'tcx>,
922    ) -> OperandRef<'tcx, Bx::Value> {
923        debug!("codegen_operand(operand={:?})", operand);
924
925        match *operand {
926            mir::Operand::Copy(ref place) | mir::Operand::Move(ref place) => {
927                self.codegen_consume(bx, place.as_ref())
928            }
929
930            mir::Operand::Constant(ref constant) => {
931                let constant_ty = self.monomorphize(constant.ty());
932                // Most SIMD vector constants should be passed as immediates.
933                // (In particular, some intrinsics really rely on this.)
934                if constant_ty.is_simd() {
935                    // However, some SIMD types do not actually use the vector ABI
936                    // (in particular, packed SIMD types do not). Ensure we exclude those.
937                    let layout = bx.layout_of(constant_ty);
938                    if let BackendRepr::SimdVector { .. } = layout.backend_repr {
939                        let (llval, ty) = self.immediate_const_vector(bx, constant);
940                        return OperandRef {
941                            val: OperandValue::Immediate(llval),
942                            layout: bx.layout_of(ty),
943                        };
944                    }
945                }
946                self.eval_mir_constant_to_operand(bx, constant)
947            }
948        }
949    }
950}