rustc_const_eval/interpret/
intrinsics.rs

1//! Intrinsics and other functions that the interpreter executes without
2//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
3//! and miri.
4
5mod simd;
6
7use std::assert_matches::assert_matches;
8
9use rustc_abi::{FieldIdx, HasDataLayout, Size, VariantIdx};
10use rustc_apfloat::ieee::{Double, Half, Quad, Single};
11use rustc_middle::mir::interpret::{CTFE_ALLOC_SALT, read_target_uint, write_target_uint};
12use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic};
13use rustc_middle::ty::layout::TyAndLayout;
14use rustc_middle::ty::{FloatTy, Ty, TyCtxt};
15use rustc_middle::{bug, span_bug, ty};
16use rustc_span::{Symbol, sym};
17use tracing::trace;
18
19use super::memory::MemoryKind;
20use super::util::ensure_monomorphic_enough;
21use super::{
22    AllocId, CheckInAllocMsg, ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Pointer,
23    PointerArithmetic, Provenance, Scalar, err_ub_custom, err_unsup_format, interp_ok, throw_inval,
24    throw_ub_custom, throw_ub_format,
25};
26use crate::fluent_generated as fluent;
27
28#[derive(Copy, Clone, Debug, PartialEq, Eq)]
29enum MulAddType {
30    /// Used with `fma` and `simd_fma`, always uses fused-multiply-add
31    Fused,
32    /// Used with `fmuladd` and `simd_relaxed_fma`, nondeterministically determines whether to use
33    /// fma or simple multiply-add
34    Nondeterministic,
35}
36
37#[derive(Copy, Clone)]
38pub(crate) enum MinMax {
39    /// The IEEE `Minimum` operation - see `f32::minimum` etc
40    /// In particular, `-0.0` is considered smaller than `+0.0` and
41    /// if either input is NaN, the result is NaN.
42    Minimum,
43    /// The IEEE `MinNum` operation - see `f32::min` etc
44    /// In particular, if the inputs are `-0.0` and `+0.0`, the result is non-deterministic,
45    /// and is one argument is NaN, the other one is returned.
46    MinNum,
47    /// The IEEE `Maximum` operation - see `f32::maximum` etc
48    /// In particular, `-0.0` is considered smaller than `+0.0` and
49    /// if either input is NaN, the result is NaN.
50    Maximum,
51    /// The IEEE `MaxNum` operation - see `f32::max` etc
52    /// In particular, if the inputs are `-0.0` and `+0.0`, the result is non-deterministic,
53    /// and is one argument is NaN, the other one is returned.
54    MaxNum,
55}
56
57/// Directly returns an `Allocation` containing an absolute path representation of the given type.
58pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (AllocId, u64) {
59    let path = crate::util::type_name(tcx, ty);
60    let bytes = path.into_bytes();
61    let len = bytes.len().try_into().unwrap();
62    (tcx.allocate_bytes_dedup(bytes, CTFE_ALLOC_SALT), len)
63}
64impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
65    /// Generates a value of `TypeId` for `ty` in-place.
66    fn write_type_id(
67        &mut self,
68        ty: Ty<'tcx>,
69        dest: &PlaceTy<'tcx, M::Provenance>,
70    ) -> InterpResult<'tcx, ()> {
71        let tcx = self.tcx;
72        let type_id_hash = tcx.type_id_hash(ty).as_u128();
73        let op = self.const_val_to_op(
74            ConstValue::Scalar(Scalar::from_u128(type_id_hash)),
75            tcx.types.u128,
76            None,
77        )?;
78        self.copy_op_allow_transmute(&op, dest)?;
79
80        // Give the each pointer-sized chunk provenance that knows about the type id.
81        // Here we rely on `TypeId` being a newtype around an array of pointers, so we
82        // first project to its only field and then the array elements.
83        let alloc_id = tcx.reserve_and_set_type_id_alloc(ty);
84        let arr = self.project_field(dest, FieldIdx::ZERO)?;
85        let mut elem_iter = self.project_array_fields(&arr)?;
86        while let Some((_, elem)) = elem_iter.next(self)? {
87            // Decorate this part of the hash with provenance; leave the integer part unchanged.
88            let hash_fragment = self.read_scalar(&elem)?.to_target_usize(&tcx)?;
89            let ptr = Pointer::new(alloc_id.into(), Size::from_bytes(hash_fragment));
90            let ptr = self.global_root_pointer(ptr)?;
91            let val = Scalar::from_pointer(ptr, &tcx);
92            self.write_scalar(val, &elem)?;
93        }
94        interp_ok(())
95    }
96
97    /// Read a value of type `TypeId`, returning the type it represents.
98    pub(crate) fn read_type_id(
99        &self,
100        op: &OpTy<'tcx, M::Provenance>,
101    ) -> InterpResult<'tcx, Ty<'tcx>> {
102        // `TypeId` is a newtype around an array of pointers. All pointers must have the same
103        // provenance, and that provenance represents the type.
104        let ptr_size = self.pointer_size().bytes_usize();
105        let arr = self.project_field(op, FieldIdx::ZERO)?;
106
107        let mut ty_and_hash = None;
108        let mut elem_iter = self.project_array_fields(&arr)?;
109        while let Some((idx, elem)) = elem_iter.next(self)? {
110            let elem = self.read_pointer(&elem)?;
111            let (elem_ty, elem_hash) = self.get_ptr_type_id(elem)?;
112            // If this is the first element, remember the type and its hash.
113            // If this is not the first element, ensure it is consistent with the previous ones.
114            let full_hash = match ty_and_hash {
115                None => {
116                    let hash = self.tcx.type_id_hash(elem_ty).as_u128();
117                    let mut hash_bytes = [0u8; 16];
118                    write_target_uint(self.data_layout().endian, &mut hash_bytes, hash).unwrap();
119                    ty_and_hash = Some((elem_ty, hash_bytes));
120                    hash_bytes
121                }
122                Some((ty, hash_bytes)) => {
123                    if ty != elem_ty {
124                        throw_ub_format!(
125                            "invalid `TypeId` value: not all bytes carry the same type id metadata"
126                        );
127                    }
128                    hash_bytes
129                }
130            };
131            // Ensure the elem_hash matches the corresponding part of the full hash.
132            let hash_frag = &full_hash[(idx as usize) * ptr_size..][..ptr_size];
133            if read_target_uint(self.data_layout().endian, hash_frag).unwrap() != elem_hash.into() {
134                throw_ub_format!(
135                    "invalid `TypeId` value: the hash does not match the type id metadata"
136                );
137            }
138        }
139
140        interp_ok(ty_and_hash.unwrap().0)
141    }
142
143    /// Returns `true` if emulation happened.
144    /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
145    /// intrinsic handling.
146    pub fn eval_intrinsic(
147        &mut self,
148        instance: ty::Instance<'tcx>,
149        args: &[OpTy<'tcx, M::Provenance>],
150        dest: &PlaceTy<'tcx, M::Provenance>,
151        ret: Option<mir::BasicBlock>,
152    ) -> InterpResult<'tcx, bool> {
153        let instance_args = instance.args;
154        let intrinsic_name = self.tcx.item_name(instance.def_id());
155
156        if intrinsic_name.as_str().starts_with("simd_") {
157            return self.eval_simd_intrinsic(intrinsic_name, instance_args, args, dest, ret);
158        }
159
160        let tcx = self.tcx.tcx;
161
162        match intrinsic_name {
163            sym::type_name => {
164                let tp_ty = instance.args.type_at(0);
165                ensure_monomorphic_enough(tcx, tp_ty)?;
166                let (alloc_id, meta) = alloc_type_name(tcx, tp_ty);
167                let val = ConstValue::Slice { alloc_id, meta };
168                let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?;
169                self.copy_op(&val, dest)?;
170            }
171            sym::needs_drop => {
172                let tp_ty = instance.args.type_at(0);
173                ensure_monomorphic_enough(tcx, tp_ty)?;
174                let val = ConstValue::from_bool(tp_ty.needs_drop(tcx, self.typing_env));
175                let val = self.const_val_to_op(val, tcx.types.bool, Some(dest.layout))?;
176                self.copy_op(&val, dest)?;
177            }
178            sym::type_id => {
179                let tp_ty = instance.args.type_at(0);
180                ensure_monomorphic_enough(tcx, tp_ty)?;
181                self.write_type_id(tp_ty, dest)?;
182            }
183            sym::type_id_eq => {
184                let a_ty = self.read_type_id(&args[0])?;
185                let b_ty = self.read_type_id(&args[1])?;
186                self.write_scalar(Scalar::from_bool(a_ty == b_ty), dest)?;
187            }
188            sym::size_of => {
189                let tp_ty = instance.args.type_at(0);
190                let layout = self.layout_of(tp_ty)?;
191                if !layout.is_sized() {
192                    span_bug!(self.cur_span(), "unsized type for `size_of`");
193                }
194                let val = layout.size.bytes();
195                self.write_scalar(Scalar::from_target_usize(val, self), dest)?;
196            }
197            sym::align_of => {
198                let tp_ty = instance.args.type_at(0);
199                let layout = self.layout_of(tp_ty)?;
200                if !layout.is_sized() {
201                    span_bug!(self.cur_span(), "unsized type for `align_of`");
202                }
203                let val = layout.align.bytes();
204                self.write_scalar(Scalar::from_target_usize(val, self), dest)?;
205            }
206            sym::offset_of => {
207                let tp_ty = instance.args.type_at(0);
208
209                let variant = self.read_scalar(&args[0])?.to_u32()?;
210                let field = self.read_scalar(&args[1])?.to_u32()? as usize;
211
212                let layout = self.layout_of(tp_ty)?;
213                let cx = ty::layout::LayoutCx::new(*self.tcx, self.typing_env);
214
215                let layout = layout.for_variant(&cx, VariantIdx::from_u32(variant));
216                let offset = layout.fields.offset(field).bytes();
217
218                self.write_scalar(Scalar::from_target_usize(offset, self), dest)?;
219            }
220            sym::variant_count => {
221                let tp_ty = instance.args.type_at(0);
222                let ty = match tp_ty.kind() {
223                    // Pattern types have the same number of variants as their base type.
224                    // Even if we restrict e.g. which variants are valid, the variants are essentially just uninhabited.
225                    // And `Result<(), !>` still has two variants according to `variant_count`.
226                    ty::Pat(base, _) => *base,
227                    _ => tp_ty,
228                };
229                let val = match ty.kind() {
230                    // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
231                    ty::Adt(adt, _) => {
232                        ConstValue::from_target_usize(adt.variants().len() as u64, &tcx)
233                    }
234                    ty::Alias(..) | ty::Param(_) | ty::Placeholder(_) | ty::Infer(_) => {
235                        throw_inval!(TooGeneric)
236                    }
237                    ty::Pat(..) => unreachable!(),
238                    ty::Bound(_, _) => bug!("bound ty during ctfe"),
239                    ty::Bool
240                    | ty::Char
241                    | ty::Int(_)
242                    | ty::Uint(_)
243                    | ty::Float(_)
244                    | ty::Foreign(_)
245                    | ty::Str
246                    | ty::Array(_, _)
247                    | ty::Slice(_)
248                    | ty::RawPtr(_, _)
249                    | ty::Ref(_, _, _)
250                    | ty::FnDef(_, _)
251                    | ty::FnPtr(..)
252                    | ty::Dynamic(_, _)
253                    | ty::Closure(_, _)
254                    | ty::CoroutineClosure(_, _)
255                    | ty::Coroutine(_, _)
256                    | ty::CoroutineWitness(..)
257                    | ty::UnsafeBinder(_)
258                    | ty::Never
259                    | ty::Tuple(_)
260                    | ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx),
261                };
262                let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?;
263                self.copy_op(&val, dest)?;
264            }
265
266            sym::caller_location => {
267                let span = self.find_closest_untracked_caller_location();
268                let val = self.tcx.span_as_caller_location(span);
269                let val =
270                    self.const_val_to_op(val, self.tcx.caller_location_ty(), Some(dest.layout))?;
271                self.copy_op(&val, dest)?;
272            }
273
274            sym::align_of_val | sym::size_of_val => {
275                // Avoid `deref_pointer` -- this is not a deref, the ptr does not have to be
276                // dereferenceable!
277                let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
278                let (size, align) = self
279                    .size_and_align_of_val(&place)?
280                    .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
281
282                let result = match intrinsic_name {
283                    sym::align_of_val => align.bytes(),
284                    sym::size_of_val => size.bytes(),
285                    _ => bug!(),
286                };
287
288                self.write_scalar(Scalar::from_target_usize(result, self), dest)?;
289            }
290
291            sym::fadd_algebraic
292            | sym::fsub_algebraic
293            | sym::fmul_algebraic
294            | sym::fdiv_algebraic
295            | sym::frem_algebraic => {
296                let a = self.read_immediate(&args[0])?;
297                let b = self.read_immediate(&args[1])?;
298
299                let op = match intrinsic_name {
300                    sym::fadd_algebraic => BinOp::Add,
301                    sym::fsub_algebraic => BinOp::Sub,
302                    sym::fmul_algebraic => BinOp::Mul,
303                    sym::fdiv_algebraic => BinOp::Div,
304                    sym::frem_algebraic => BinOp::Rem,
305
306                    _ => bug!(),
307                };
308
309                let res = self.binary_op(op, &a, &b)?;
310                // `binary_op` already called `generate_nan` if needed.
311                let res = M::apply_float_nondet(self, res)?;
312                self.write_immediate(*res, dest)?;
313            }
314
315            sym::ctpop
316            | sym::cttz
317            | sym::cttz_nonzero
318            | sym::ctlz
319            | sym::ctlz_nonzero
320            | sym::bswap
321            | sym::bitreverse => {
322                let ty = instance_args.type_at(0);
323                let layout = self.layout_of(ty)?;
324                let val = self.read_scalar(&args[0])?;
325
326                let out_val = self.numeric_intrinsic(intrinsic_name, val, layout, dest.layout)?;
327                self.write_scalar(out_val, dest)?;
328            }
329            sym::saturating_add | sym::saturating_sub => {
330                let l = self.read_immediate(&args[0])?;
331                let r = self.read_immediate(&args[1])?;
332                let val = self.saturating_arith(
333                    if intrinsic_name == sym::saturating_add { BinOp::Add } else { BinOp::Sub },
334                    &l,
335                    &r,
336                )?;
337                self.write_scalar(val, dest)?;
338            }
339            sym::discriminant_value => {
340                let place = self.deref_pointer(&args[0])?;
341                let variant = self.read_discriminant(&place)?;
342                let discr = self.discriminant_for_variant(place.layout.ty, variant)?;
343                self.write_immediate(*discr, dest)?;
344            }
345            sym::exact_div => {
346                let l = self.read_immediate(&args[0])?;
347                let r = self.read_immediate(&args[1])?;
348                self.exact_div(&l, &r, dest)?;
349            }
350            sym::copy => {
351                self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
352            }
353            sym::write_bytes => {
354                self.write_bytes_intrinsic(&args[0], &args[1], &args[2], "write_bytes")?;
355            }
356            sym::compare_bytes => {
357                let result = self.compare_bytes_intrinsic(&args[0], &args[1], &args[2])?;
358                self.write_scalar(result, dest)?;
359            }
360            sym::arith_offset => {
361                let ptr = self.read_pointer(&args[0])?;
362                let offset_count = self.read_target_isize(&args[1])?;
363                let pointee_ty = instance_args.type_at(0);
364
365                let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
366                let offset_bytes = offset_count.wrapping_mul(pointee_size);
367                let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
368                self.write_pointer(offset_ptr, dest)?;
369            }
370            sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
371                let a = self.read_pointer(&args[0])?;
372                let b = self.read_pointer(&args[1])?;
373
374                let usize_layout = self.layout_of(self.tcx.types.usize)?;
375                let isize_layout = self.layout_of(self.tcx.types.isize)?;
376
377                // Get offsets for both that are at least relative to the same base.
378                // With `OFFSET_IS_ADDR` this is trivial; without it we need either
379                // two integers or two pointers into the same allocation.
380                let (a_offset, b_offset, is_addr) = if M::Provenance::OFFSET_IS_ADDR {
381                    (a.addr().bytes(), b.addr().bytes(), /*is_addr*/ true)
382                } else {
383                    match (self.ptr_try_get_alloc_id(a, 0), self.ptr_try_get_alloc_id(b, 0)) {
384                        (Err(a), Err(b)) => {
385                            // Neither pointer points to an allocation, so they are both absolute.
386                            (a, b, /*is_addr*/ true)
387                        }
388                        (Ok((a_alloc_id, a_offset, _)), Ok((b_alloc_id, b_offset, _)))
389                            if a_alloc_id == b_alloc_id =>
390                        {
391                            // Found allocation for both, and it's the same.
392                            // Use these offsets for distance calculation.
393                            (a_offset.bytes(), b_offset.bytes(), /*is_addr*/ false)
394                        }
395                        _ => {
396                            // Not into the same allocation -- this is UB.
397                            throw_ub_custom!(
398                                fluent::const_eval_offset_from_different_allocations,
399                                name = intrinsic_name,
400                            );
401                        }
402                    }
403                };
404
405                // Compute distance: a - b.
406                let dist = {
407                    // Addresses are unsigned, so this is a `usize` computation. We have to do the
408                    // overflow check separately anyway.
409                    let (val, overflowed) = {
410                        let a_offset = ImmTy::from_uint(a_offset, usize_layout);
411                        let b_offset = ImmTy::from_uint(b_offset, usize_layout);
412                        self.binary_op(BinOp::SubWithOverflow, &a_offset, &b_offset)?
413                            .to_scalar_pair()
414                    };
415                    if overflowed.to_bool()? {
416                        // a < b
417                        if intrinsic_name == sym::ptr_offset_from_unsigned {
418                            throw_ub_custom!(
419                                fluent::const_eval_offset_from_unsigned_overflow,
420                                a_offset = a_offset,
421                                b_offset = b_offset,
422                                is_addr = is_addr,
423                            );
424                        }
425                        // The signed form of the intrinsic allows this. If we interpret the
426                        // difference as isize, we'll get the proper signed difference. If that
427                        // seems *positive* or equal to isize::MIN, they were more than isize::MAX apart.
428                        let dist = val.to_target_isize(self)?;
429                        if dist >= 0 || i128::from(dist) == self.pointer_size().signed_int_min() {
430                            throw_ub_custom!(
431                                fluent::const_eval_offset_from_underflow,
432                                name = intrinsic_name,
433                            );
434                        }
435                        dist
436                    } else {
437                        // b >= a
438                        let dist = val.to_target_isize(self)?;
439                        // If converting to isize produced a *negative* result, we had an overflow
440                        // because they were more than isize::MAX apart.
441                        if dist < 0 {
442                            throw_ub_custom!(
443                                fluent::const_eval_offset_from_overflow,
444                                name = intrinsic_name,
445                            );
446                        }
447                        dist
448                    }
449                };
450
451                // Check that the memory between them is dereferenceable at all, starting from the
452                // origin pointer: `dist` is `a - b`, so it is based on `b`.
453                self.check_ptr_access_signed(b, dist, CheckInAllocMsg::Dereferenceable)
454                    .map_err_kind(|_| {
455                        // This could mean they point to different allocations, or they point to the same allocation
456                        // but not the entire range between the pointers is in-bounds.
457                        if let Ok((a_alloc_id, ..)) = self.ptr_try_get_alloc_id(a, 0)
458                            && let Ok((b_alloc_id, ..)) = self.ptr_try_get_alloc_id(b, 0)
459                            && a_alloc_id == b_alloc_id
460                        {
461                            err_ub_custom!(
462                                fluent::const_eval_offset_from_out_of_bounds,
463                                name = intrinsic_name,
464                            )
465                        } else {
466                            err_ub_custom!(
467                                fluent::const_eval_offset_from_different_allocations,
468                                name = intrinsic_name,
469                            )
470                        }
471                    })?;
472                // Then check that this is also dereferenceable from `a`. This ensures that they are
473                // derived from the same allocation.
474                self.check_ptr_access_signed(
475                    a,
476                    dist.checked_neg().unwrap(), // i64::MIN is impossible as no allocation can be that large
477                    CheckInAllocMsg::Dereferenceable,
478                )
479                .map_err_kind(|_| {
480                    // Make the error more specific.
481                    err_ub_custom!(
482                        fluent::const_eval_offset_from_different_allocations,
483                        name = intrinsic_name,
484                    )
485                })?;
486
487                // Perform division by size to compute return value.
488                let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned {
489                    assert!(0 <= dist && dist <= self.target_isize_max());
490                    usize_layout
491                } else {
492                    assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max());
493                    isize_layout
494                };
495                let pointee_layout = self.layout_of(instance_args.type_at(0))?;
496                // If ret_layout is unsigned, we checked that so is the distance, so we are good.
497                let val = ImmTy::from_int(dist, ret_layout);
498                let size = ImmTy::from_int(pointee_layout.size.bytes(), ret_layout);
499                self.exact_div(&val, &size, dest)?;
500            }
501
502            sym::black_box => {
503                // These just return their argument
504                self.copy_op(&args[0], dest)?;
505            }
506            sym::raw_eq => {
507                let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
508                self.write_scalar(result, dest)?;
509            }
510            sym::typed_swap_nonoverlapping => {
511                self.typed_swap_nonoverlapping_intrinsic(&args[0], &args[1])?;
512            }
513
514            sym::vtable_size => {
515                let ptr = self.read_pointer(&args[0])?;
516                // `None` because we don't know which trait to expect here; any vtable is okay.
517                let (size, _align) = self.get_vtable_size_and_align(ptr, None)?;
518                self.write_scalar(Scalar::from_target_usize(size.bytes(), self), dest)?;
519            }
520            sym::vtable_align => {
521                let ptr = self.read_pointer(&args[0])?;
522                // `None` because we don't know which trait to expect here; any vtable is okay.
523                let (_size, align) = self.get_vtable_size_and_align(ptr, None)?;
524                self.write_scalar(Scalar::from_target_usize(align.bytes(), self), dest)?;
525            }
526
527            sym::minnumf16 => self.float_minmax_intrinsic::<Half>(args, MinMax::MinNum, dest)?,
528            sym::minnumf32 => self.float_minmax_intrinsic::<Single>(args, MinMax::MinNum, dest)?,
529            sym::minnumf64 => self.float_minmax_intrinsic::<Double>(args, MinMax::MinNum, dest)?,
530            sym::minnumf128 => self.float_minmax_intrinsic::<Quad>(args, MinMax::MinNum, dest)?,
531
532            sym::minimumf16 => self.float_minmax_intrinsic::<Half>(args, MinMax::Minimum, dest)?,
533            sym::minimumf32 => {
534                self.float_minmax_intrinsic::<Single>(args, MinMax::Minimum, dest)?
535            }
536            sym::minimumf64 => {
537                self.float_minmax_intrinsic::<Double>(args, MinMax::Minimum, dest)?
538            }
539            sym::minimumf128 => self.float_minmax_intrinsic::<Quad>(args, MinMax::Minimum, dest)?,
540
541            sym::maxnumf16 => self.float_minmax_intrinsic::<Half>(args, MinMax::MaxNum, dest)?,
542            sym::maxnumf32 => self.float_minmax_intrinsic::<Single>(args, MinMax::MaxNum, dest)?,
543            sym::maxnumf64 => self.float_minmax_intrinsic::<Double>(args, MinMax::MaxNum, dest)?,
544            sym::maxnumf128 => self.float_minmax_intrinsic::<Quad>(args, MinMax::MaxNum, dest)?,
545
546            sym::maximumf16 => self.float_minmax_intrinsic::<Half>(args, MinMax::Maximum, dest)?,
547            sym::maximumf32 => {
548                self.float_minmax_intrinsic::<Single>(args, MinMax::Maximum, dest)?
549            }
550            sym::maximumf64 => {
551                self.float_minmax_intrinsic::<Double>(args, MinMax::Maximum, dest)?
552            }
553            sym::maximumf128 => self.float_minmax_intrinsic::<Quad>(args, MinMax::Maximum, dest)?,
554
555            sym::copysignf16 => self.float_copysign_intrinsic::<Half>(args, dest)?,
556            sym::copysignf32 => self.float_copysign_intrinsic::<Single>(args, dest)?,
557            sym::copysignf64 => self.float_copysign_intrinsic::<Double>(args, dest)?,
558            sym::copysignf128 => self.float_copysign_intrinsic::<Quad>(args, dest)?,
559
560            sym::fabsf16 => self.float_abs_intrinsic::<Half>(args, dest)?,
561            sym::fabsf32 => self.float_abs_intrinsic::<Single>(args, dest)?,
562            sym::fabsf64 => self.float_abs_intrinsic::<Double>(args, dest)?,
563            sym::fabsf128 => self.float_abs_intrinsic::<Quad>(args, dest)?,
564
565            sym::floorf16 => self.float_round_intrinsic::<Half>(
566                args,
567                dest,
568                rustc_apfloat::Round::TowardNegative,
569            )?,
570            sym::floorf32 => self.float_round_intrinsic::<Single>(
571                args,
572                dest,
573                rustc_apfloat::Round::TowardNegative,
574            )?,
575            sym::floorf64 => self.float_round_intrinsic::<Double>(
576                args,
577                dest,
578                rustc_apfloat::Round::TowardNegative,
579            )?,
580            sym::floorf128 => self.float_round_intrinsic::<Quad>(
581                args,
582                dest,
583                rustc_apfloat::Round::TowardNegative,
584            )?,
585
586            sym::ceilf16 => self.float_round_intrinsic::<Half>(
587                args,
588                dest,
589                rustc_apfloat::Round::TowardPositive,
590            )?,
591            sym::ceilf32 => self.float_round_intrinsic::<Single>(
592                args,
593                dest,
594                rustc_apfloat::Round::TowardPositive,
595            )?,
596            sym::ceilf64 => self.float_round_intrinsic::<Double>(
597                args,
598                dest,
599                rustc_apfloat::Round::TowardPositive,
600            )?,
601            sym::ceilf128 => self.float_round_intrinsic::<Quad>(
602                args,
603                dest,
604                rustc_apfloat::Round::TowardPositive,
605            )?,
606
607            sym::truncf16 => {
608                self.float_round_intrinsic::<Half>(args, dest, rustc_apfloat::Round::TowardZero)?
609            }
610            sym::truncf32 => {
611                self.float_round_intrinsic::<Single>(args, dest, rustc_apfloat::Round::TowardZero)?
612            }
613            sym::truncf64 => {
614                self.float_round_intrinsic::<Double>(args, dest, rustc_apfloat::Round::TowardZero)?
615            }
616            sym::truncf128 => {
617                self.float_round_intrinsic::<Quad>(args, dest, rustc_apfloat::Round::TowardZero)?
618            }
619
620            sym::roundf16 => self.float_round_intrinsic::<Half>(
621                args,
622                dest,
623                rustc_apfloat::Round::NearestTiesToAway,
624            )?,
625            sym::roundf32 => self.float_round_intrinsic::<Single>(
626                args,
627                dest,
628                rustc_apfloat::Round::NearestTiesToAway,
629            )?,
630            sym::roundf64 => self.float_round_intrinsic::<Double>(
631                args,
632                dest,
633                rustc_apfloat::Round::NearestTiesToAway,
634            )?,
635            sym::roundf128 => self.float_round_intrinsic::<Quad>(
636                args,
637                dest,
638                rustc_apfloat::Round::NearestTiesToAway,
639            )?,
640
641            sym::round_ties_even_f16 => self.float_round_intrinsic::<Half>(
642                args,
643                dest,
644                rustc_apfloat::Round::NearestTiesToEven,
645            )?,
646            sym::round_ties_even_f32 => self.float_round_intrinsic::<Single>(
647                args,
648                dest,
649                rustc_apfloat::Round::NearestTiesToEven,
650            )?,
651            sym::round_ties_even_f64 => self.float_round_intrinsic::<Double>(
652                args,
653                dest,
654                rustc_apfloat::Round::NearestTiesToEven,
655            )?,
656            sym::round_ties_even_f128 => self.float_round_intrinsic::<Quad>(
657                args,
658                dest,
659                rustc_apfloat::Round::NearestTiesToEven,
660            )?,
661            sym::fmaf16 => self.float_muladd_intrinsic::<Half>(args, dest, MulAddType::Fused)?,
662            sym::fmaf32 => self.float_muladd_intrinsic::<Single>(args, dest, MulAddType::Fused)?,
663            sym::fmaf64 => self.float_muladd_intrinsic::<Double>(args, dest, MulAddType::Fused)?,
664            sym::fmaf128 => self.float_muladd_intrinsic::<Quad>(args, dest, MulAddType::Fused)?,
665            sym::fmuladdf16 => {
666                self.float_muladd_intrinsic::<Half>(args, dest, MulAddType::Nondeterministic)?
667            }
668            sym::fmuladdf32 => {
669                self.float_muladd_intrinsic::<Single>(args, dest, MulAddType::Nondeterministic)?
670            }
671            sym::fmuladdf64 => {
672                self.float_muladd_intrinsic::<Double>(args, dest, MulAddType::Nondeterministic)?
673            }
674            sym::fmuladdf128 => {
675                self.float_muladd_intrinsic::<Quad>(args, dest, MulAddType::Nondeterministic)?
676            }
677
678            // Unsupported intrinsic: skip the return_to_block below.
679            _ => return interp_ok(false),
680        }
681
682        trace!("{:?}", self.dump_place(&dest.clone().into()));
683        self.return_to_block(ret)?;
684        interp_ok(true)
685    }
686
687    pub(super) fn eval_nondiverging_intrinsic(
688        &mut self,
689        intrinsic: &NonDivergingIntrinsic<'tcx>,
690    ) -> InterpResult<'tcx> {
691        match intrinsic {
692            NonDivergingIntrinsic::Assume(op) => {
693                let op = self.eval_operand(op, None)?;
694                let cond = self.read_scalar(&op)?.to_bool()?;
695                if !cond {
696                    throw_ub_custom!(fluent::const_eval_assume_false);
697                }
698                interp_ok(())
699            }
700            NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
701                count,
702                src,
703                dst,
704            }) => {
705                let src = self.eval_operand(src, None)?;
706                let dst = self.eval_operand(dst, None)?;
707                let count = self.eval_operand(count, None)?;
708                self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)
709            }
710        }
711    }
712
713    pub fn numeric_intrinsic(
714        &self,
715        name: Symbol,
716        val: Scalar<M::Provenance>,
717        layout: TyAndLayout<'tcx>,
718        ret_layout: TyAndLayout<'tcx>,
719    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
720        assert!(layout.ty.is_integral(), "invalid type for numeric intrinsic: {}", layout.ty);
721        let bits = val.to_bits(layout.size)?; // these operations all ignore the sign
722        let extra = 128 - u128::from(layout.size.bits());
723        let bits_out = match name {
724            sym::ctpop => u128::from(bits.count_ones()),
725            sym::ctlz_nonzero | sym::cttz_nonzero if bits == 0 => {
726                throw_ub_custom!(fluent::const_eval_call_nonzero_intrinsic, name = name,);
727            }
728            sym::ctlz | sym::ctlz_nonzero => u128::from(bits.leading_zeros()) - extra,
729            sym::cttz | sym::cttz_nonzero => u128::from((bits << extra).trailing_zeros()) - extra,
730            sym::bswap => {
731                assert_eq!(layout, ret_layout);
732                (bits << extra).swap_bytes()
733            }
734            sym::bitreverse => {
735                assert_eq!(layout, ret_layout);
736                (bits << extra).reverse_bits()
737            }
738            _ => bug!("not a numeric intrinsic: {}", name),
739        };
740        interp_ok(Scalar::from_uint(bits_out, ret_layout.size))
741    }
742
743    pub fn exact_div(
744        &mut self,
745        a: &ImmTy<'tcx, M::Provenance>,
746        b: &ImmTy<'tcx, M::Provenance>,
747        dest: &PlaceTy<'tcx, M::Provenance>,
748    ) -> InterpResult<'tcx> {
749        assert_eq!(a.layout.ty, b.layout.ty);
750        assert_matches!(a.layout.ty.kind(), ty::Int(..) | ty::Uint(..));
751
752        // Performs an exact division, resulting in undefined behavior where
753        // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
754        // First, check x % y != 0 (or if that computation overflows).
755        let rem = self.binary_op(BinOp::Rem, a, b)?;
756        // sign does not matter for 0 test, so `to_bits` is fine
757        if rem.to_scalar().to_bits(a.layout.size)? != 0 {
758            throw_ub_custom!(
759                fluent::const_eval_exact_div_has_remainder,
760                a = format!("{a}"),
761                b = format!("{b}")
762            )
763        }
764        // `Rem` says this is all right, so we can let `Div` do its job.
765        let res = self.binary_op(BinOp::Div, a, b)?;
766        self.write_immediate(*res, dest)
767    }
768
769    pub fn saturating_arith(
770        &self,
771        mir_op: BinOp,
772        l: &ImmTy<'tcx, M::Provenance>,
773        r: &ImmTy<'tcx, M::Provenance>,
774    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
775        assert_eq!(l.layout.ty, r.layout.ty);
776        assert_matches!(l.layout.ty.kind(), ty::Int(..) | ty::Uint(..));
777        assert_matches!(mir_op, BinOp::Add | BinOp::Sub);
778
779        let (val, overflowed) =
780            self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
781        interp_ok(if overflowed.to_bool()? {
782            let size = l.layout.size;
783            if l.layout.backend_repr.is_signed() {
784                // For signed ints the saturated value depends on the sign of the first
785                // term since the sign of the second term can be inferred from this and
786                // the fact that the operation has overflowed (if either is 0 no
787                // overflow can occur)
788                let first_term: i128 = l.to_scalar().to_int(l.layout.size)?;
789                if first_term >= 0 {
790                    // Negative overflow not possible since the positive first term
791                    // can only increase an (in range) negative term for addition
792                    // or corresponding negated positive term for subtraction.
793                    Scalar::from_int(size.signed_int_max(), size)
794                } else {
795                    // Positive overflow not possible for similar reason.
796                    Scalar::from_int(size.signed_int_min(), size)
797                }
798            } else {
799                // unsigned
800                if matches!(mir_op, BinOp::Add) {
801                    // max unsigned
802                    Scalar::from_uint(size.unsigned_int_max(), size)
803                } else {
804                    // underflow to 0
805                    Scalar::from_uint(0u128, size)
806                }
807            }
808        } else {
809            val
810        })
811    }
812
813    /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
814    /// allocation.
815    pub fn ptr_offset_inbounds(
816        &self,
817        ptr: Pointer<Option<M::Provenance>>,
818        offset_bytes: i64,
819    ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
820        // The offset must be in bounds starting from `ptr`.
821        self.check_ptr_access_signed(
822            ptr,
823            offset_bytes,
824            CheckInAllocMsg::InboundsPointerArithmetic,
825        )?;
826        // This also implies that there is no overflow, so we are done.
827        interp_ok(ptr.wrapping_signed_offset(offset_bytes, self))
828    }
829
830    /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
831    pub(crate) fn copy_intrinsic(
832        &mut self,
833        src: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
834        dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
835        count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
836        nonoverlapping: bool,
837    ) -> InterpResult<'tcx> {
838        let count = self.read_target_usize(count)?;
839        let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap())?;
840        let (size, align) = (layout.size, layout.align.abi);
841
842        let size = self.compute_size_in_bytes(size, count).ok_or_else(|| {
843            err_ub_custom!(
844                fluent::const_eval_size_overflow,
845                name = if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
846            )
847        })?;
848
849        let src = self.read_pointer(src)?;
850        let dst = self.read_pointer(dst)?;
851
852        self.check_ptr_align(src, align)?;
853        self.check_ptr_align(dst, align)?;
854
855        self.mem_copy(src, dst, size, nonoverlapping)
856    }
857
858    /// Does a *typed* swap of `*left` and `*right`.
859    fn typed_swap_nonoverlapping_intrinsic(
860        &mut self,
861        left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
862        right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
863    ) -> InterpResult<'tcx> {
864        let left = self.deref_pointer(left)?;
865        let right = self.deref_pointer(right)?;
866        assert_eq!(left.layout, right.layout);
867        assert!(left.layout.is_sized());
868        let kind = MemoryKind::Stack;
869        let temp = self.allocate(left.layout, kind)?;
870        self.copy_op(&left, &temp)?; // checks alignment of `left`
871
872        // We want to always enforce non-overlapping, even if this is a scalar type.
873        // Therefore we directly use the underlying `mem_copy` here.
874        self.mem_copy(right.ptr(), left.ptr(), left.layout.size, /*nonoverlapping*/ true)?;
875        // This means we also need to do the validation of the value that used to be in `right`
876        // ourselves. This value is now in `left.` The one that started out in `left` already got
877        // validated by the copy above.
878        if M::enforce_validity(self, left.layout) {
879            self.validate_operand(
880                &left.clone().into(),
881                M::enforce_validity_recursively(self, left.layout),
882                /*reset_provenance_and_padding*/ true,
883            )?;
884        }
885
886        self.copy_op(&temp, &right)?; // checks alignment of `right`
887
888        self.deallocate_ptr(temp.ptr(), None, kind)?;
889        interp_ok(())
890    }
891
892    pub fn write_bytes_intrinsic(
893        &mut self,
894        dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
895        byte: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
896        count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
897        name: &'static str,
898    ) -> InterpResult<'tcx> {
899        let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap())?;
900
901        let dst = self.read_pointer(dst)?;
902        let byte = self.read_scalar(byte)?.to_u8()?;
903        let count = self.read_target_usize(count)?;
904
905        // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
906        // but no actual allocation can be big enough for the difference to be noticeable.
907        let len = self
908            .compute_size_in_bytes(layout.size, count)
909            .ok_or_else(|| err_ub_custom!(fluent::const_eval_size_overflow, name = name))?;
910
911        let bytes = std::iter::repeat_n(byte, len.bytes_usize());
912        self.write_bytes_ptr(dst, bytes)
913    }
914
915    pub(crate) fn compare_bytes_intrinsic(
916        &mut self,
917        left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
918        right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
919        byte_count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
920    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
921        let left = self.read_pointer(left)?;
922        let right = self.read_pointer(right)?;
923        let n = Size::from_bytes(self.read_target_usize(byte_count)?);
924
925        let left_bytes = self.read_bytes_ptr_strip_provenance(left, n)?;
926        let right_bytes = self.read_bytes_ptr_strip_provenance(right, n)?;
927
928        // `Ordering`'s discriminants are -1/0/+1, so casting does the right thing.
929        let result = Ord::cmp(left_bytes, right_bytes) as i32;
930        interp_ok(Scalar::from_i32(result))
931    }
932
933    pub(crate) fn raw_eq_intrinsic(
934        &mut self,
935        lhs: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
936        rhs: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
937    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
938        let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap())?;
939        assert!(layout.is_sized());
940
941        let get_bytes = |this: &InterpCx<'tcx, M>,
942                         op: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>|
943         -> InterpResult<'tcx, &[u8]> {
944            let ptr = this.read_pointer(op)?;
945            this.check_ptr_align(ptr, layout.align.abi)?;
946            let Some(alloc_ref) = self.get_ptr_alloc(ptr, layout.size)? else {
947                // zero-sized access
948                return interp_ok(&[]);
949            };
950            alloc_ref.get_bytes_strip_provenance()
951        };
952
953        let lhs_bytes = get_bytes(self, lhs)?;
954        let rhs_bytes = get_bytes(self, rhs)?;
955        interp_ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
956    }
957
958    fn float_minmax<F>(
959        &self,
960        a: Scalar<M::Provenance>,
961        b: Scalar<M::Provenance>,
962        op: MinMax,
963    ) -> InterpResult<'tcx, Scalar<M::Provenance>>
964    where
965        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
966    {
967        let a: F = a.to_float()?;
968        let b: F = b.to_float()?;
969        let res = if matches!(op, MinMax::MinNum | MinMax::MaxNum) && a == b {
970            // They are definitely not NaN (those are never equal), but they could be `+0` and `-0`.
971            // Let the machine decide which one to return.
972            M::equal_float_min_max(self, a, b)
973        } else {
974            let result = match op {
975                MinMax::Minimum => a.minimum(b),
976                MinMax::MinNum => a.min(b),
977                MinMax::Maximum => a.maximum(b),
978                MinMax::MaxNum => a.max(b),
979            };
980            self.adjust_nan(result, &[a, b])
981        };
982
983        interp_ok(res.into())
984    }
985
986    fn float_minmax_intrinsic<F>(
987        &mut self,
988        args: &[OpTy<'tcx, M::Provenance>],
989        op: MinMax,
990        dest: &PlaceTy<'tcx, M::Provenance>,
991    ) -> InterpResult<'tcx, ()>
992    where
993        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
994    {
995        let res =
996            self.float_minmax::<F>(self.read_scalar(&args[0])?, self.read_scalar(&args[1])?, op)?;
997        self.write_scalar(res, dest)?;
998        interp_ok(())
999    }
1000
1001    fn float_copysign_intrinsic<F>(
1002        &mut self,
1003        args: &[OpTy<'tcx, M::Provenance>],
1004        dest: &PlaceTy<'tcx, M::Provenance>,
1005    ) -> InterpResult<'tcx, ()>
1006    where
1007        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1008    {
1009        let a: F = self.read_scalar(&args[0])?.to_float()?;
1010        let b: F = self.read_scalar(&args[1])?.to_float()?;
1011        // bitwise, no NaN adjustments
1012        self.write_scalar(a.copy_sign(b), dest)?;
1013        interp_ok(())
1014    }
1015
1016    fn float_abs_intrinsic<F>(
1017        &mut self,
1018        args: &[OpTy<'tcx, M::Provenance>],
1019        dest: &PlaceTy<'tcx, M::Provenance>,
1020    ) -> InterpResult<'tcx, ()>
1021    where
1022        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1023    {
1024        let x: F = self.read_scalar(&args[0])?.to_float()?;
1025        // bitwise, no NaN adjustments
1026        self.write_scalar(x.abs(), dest)?;
1027        interp_ok(())
1028    }
1029
1030    fn float_round<F>(
1031        &mut self,
1032        x: Scalar<M::Provenance>,
1033        mode: rustc_apfloat::Round,
1034    ) -> InterpResult<'tcx, Scalar<M::Provenance>>
1035    where
1036        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1037    {
1038        let x: F = x.to_float()?;
1039        let res = x.round_to_integral(mode).value;
1040        let res = self.adjust_nan(res, &[x]);
1041        interp_ok(res.into())
1042    }
1043
1044    fn float_round_intrinsic<F>(
1045        &mut self,
1046        args: &[OpTy<'tcx, M::Provenance>],
1047        dest: &PlaceTy<'tcx, M::Provenance>,
1048        mode: rustc_apfloat::Round,
1049    ) -> InterpResult<'tcx, ()>
1050    where
1051        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1052    {
1053        let res = self.float_round::<F>(self.read_scalar(&args[0])?, mode)?;
1054        self.write_scalar(res, dest)?;
1055        interp_ok(())
1056    }
1057
1058    fn float_muladd<F>(
1059        &self,
1060        a: Scalar<M::Provenance>,
1061        b: Scalar<M::Provenance>,
1062        c: Scalar<M::Provenance>,
1063        typ: MulAddType,
1064    ) -> InterpResult<'tcx, Scalar<M::Provenance>>
1065    where
1066        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1067    {
1068        let a: F = a.to_float()?;
1069        let b: F = b.to_float()?;
1070        let c: F = c.to_float()?;
1071
1072        let fuse = typ == MulAddType::Fused || M::float_fuse_mul_add(self);
1073
1074        let res = if fuse { a.mul_add(b, c).value } else { ((a * b).value + c).value };
1075        let res = self.adjust_nan(res, &[a, b, c]);
1076        interp_ok(res.into())
1077    }
1078
1079    fn float_muladd_intrinsic<F>(
1080        &mut self,
1081        args: &[OpTy<'tcx, M::Provenance>],
1082        dest: &PlaceTy<'tcx, M::Provenance>,
1083        typ: MulAddType,
1084    ) -> InterpResult<'tcx, ()>
1085    where
1086        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1087    {
1088        let a = self.read_scalar(&args[0])?;
1089        let b = self.read_scalar(&args[1])?;
1090        let c = self.read_scalar(&args[2])?;
1091
1092        let res = self.float_muladd::<F>(a, b, c, typ)?;
1093        self.write_scalar(res, dest)?;
1094        interp_ok(())
1095    }
1096
1097    /// Converts `src` from floating point to integer type `dest_ty`
1098    /// after rounding with mode `round`.
1099    /// Returns `None` if `f` is NaN or out of range.
1100    pub fn float_to_int_checked(
1101        &self,
1102        src: &ImmTy<'tcx, M::Provenance>,
1103        cast_to: TyAndLayout<'tcx>,
1104        round: rustc_apfloat::Round,
1105    ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::Provenance>>> {
1106        fn float_to_int_inner<'tcx, F: rustc_apfloat::Float, M: Machine<'tcx>>(
1107            ecx: &InterpCx<'tcx, M>,
1108            src: F,
1109            cast_to: TyAndLayout<'tcx>,
1110            round: rustc_apfloat::Round,
1111        ) -> (Scalar<M::Provenance>, rustc_apfloat::Status) {
1112            let int_size = cast_to.layout.size;
1113            match cast_to.ty.kind() {
1114                // Unsigned
1115                ty::Uint(_) => {
1116                    let res = src.to_u128_r(int_size.bits_usize(), round, &mut false);
1117                    (Scalar::from_uint(res.value, int_size), res.status)
1118                }
1119                // Signed
1120                ty::Int(_) => {
1121                    let res = src.to_i128_r(int_size.bits_usize(), round, &mut false);
1122                    (Scalar::from_int(res.value, int_size), res.status)
1123                }
1124                // Nothing else
1125                _ => span_bug!(
1126                    ecx.cur_span(),
1127                    "attempted float-to-int conversion with non-int output type {}",
1128                    cast_to.ty,
1129                ),
1130            }
1131        }
1132
1133        let ty::Float(fty) = src.layout.ty.kind() else {
1134            bug!("float_to_int_checked: non-float input type {}", src.layout.ty)
1135        };
1136
1137        let (val, status) = match fty {
1138            FloatTy::F16 => float_to_int_inner(self, src.to_scalar().to_f16()?, cast_to, round),
1139            FloatTy::F32 => float_to_int_inner(self, src.to_scalar().to_f32()?, cast_to, round),
1140            FloatTy::F64 => float_to_int_inner(self, src.to_scalar().to_f64()?, cast_to, round),
1141            FloatTy::F128 => float_to_int_inner(self, src.to_scalar().to_f128()?, cast_to, round),
1142        };
1143
1144        if status.intersects(
1145            rustc_apfloat::Status::INVALID_OP
1146                | rustc_apfloat::Status::OVERFLOW
1147                | rustc_apfloat::Status::UNDERFLOW,
1148        ) {
1149            // Floating point value is NaN (flagged with INVALID_OP) or outside the range
1150            // of values of the integer type (flagged with OVERFLOW or UNDERFLOW).
1151            interp_ok(None)
1152        } else {
1153            // Floating point value can be represented by the integer type after rounding.
1154            // The INEXACT flag is ignored on purpose to allow rounding.
1155            interp_ok(Some(ImmTy::from_scalar(val, cast_to)))
1156        }
1157    }
1158}