rustc_const_eval/interpret/
intrinsics.rs

1//! Intrinsics and other functions that the interpreter executes without
2//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
3//! and miri.
4
5use std::assert_matches::assert_matches;
6
7use rustc_abi::Size;
8use rustc_apfloat::ieee::{Double, Half, Quad, Single};
9use rustc_hir::def_id::DefId;
10use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic};
11use rustc_middle::ty::layout::{TyAndLayout, ValidityRequirement};
12use rustc_middle::ty::{GenericArgsRef, Ty, TyCtxt};
13use rustc_middle::{bug, ty};
14use rustc_span::{Symbol, sym};
15use tracing::trace;
16
17use super::memory::MemoryKind;
18use super::util::ensure_monomorphic_enough;
19use super::{
20    Allocation, CheckInAllocMsg, ConstAllocation, GlobalId, ImmTy, InterpCx, InterpResult, Machine,
21    OpTy, PlaceTy, Pointer, PointerArithmetic, Provenance, Scalar, err_inval, err_ub_custom,
22    err_unsup_format, interp_ok, throw_inval, throw_ub_custom, throw_ub_format,
23};
24use crate::fluent_generated as fluent;
25
26/// Directly returns an `Allocation` containing an absolute path representation of the given type.
27pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> {
28    let path = crate::util::type_name(tcx, ty);
29    let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes(), ());
30    tcx.mk_const_alloc(alloc)
31}
32
33/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
34/// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
35pub(crate) fn eval_nullary_intrinsic<'tcx>(
36    tcx: TyCtxt<'tcx>,
37    typing_env: ty::TypingEnv<'tcx>,
38    def_id: DefId,
39    args: GenericArgsRef<'tcx>,
40) -> InterpResult<'tcx, ConstValue<'tcx>> {
41    let tp_ty = args.type_at(0);
42    let name = tcx.item_name(def_id);
43    interp_ok(match name {
44        sym::type_name => {
45            ensure_monomorphic_enough(tcx, tp_ty)?;
46            let alloc = alloc_type_name(tcx, tp_ty);
47            ConstValue::Slice { data: alloc, meta: alloc.inner().size().bytes() }
48        }
49        sym::needs_drop => {
50            ensure_monomorphic_enough(tcx, tp_ty)?;
51            ConstValue::from_bool(tp_ty.needs_drop(tcx, typing_env))
52        }
53        sym::type_id => {
54            ensure_monomorphic_enough(tcx, tp_ty)?;
55            ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128())
56        }
57        sym::variant_count => match match tp_ty.kind() {
58            // Pattern types have the same number of variants as their base type.
59            // Even if we restrict e.g. which variants are valid, the variants are essentially just uninhabited.
60            // And `Result<(), !>` still has two variants according to `variant_count`.
61            ty::Pat(base, _) => *base,
62            _ => tp_ty,
63        }
64        .kind()
65        {
66            // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
67            ty::Adt(adt, _) => ConstValue::from_target_usize(adt.variants().len() as u64, &tcx),
68            ty::Alias(..) | ty::Param(_) | ty::Placeholder(_) | ty::Infer(_) => {
69                throw_inval!(TooGeneric)
70            }
71            ty::Pat(..) => unreachable!(),
72            ty::Bound(_, _) => bug!("bound ty during ctfe"),
73            ty::Bool
74            | ty::Char
75            | ty::Int(_)
76            | ty::Uint(_)
77            | ty::Float(_)
78            | ty::Foreign(_)
79            | ty::Str
80            | ty::Array(_, _)
81            | ty::Slice(_)
82            | ty::RawPtr(_, _)
83            | ty::Ref(_, _, _)
84            | ty::FnDef(_, _)
85            | ty::FnPtr(..)
86            | ty::Dynamic(_, _, _)
87            | ty::Closure(_, _)
88            | ty::CoroutineClosure(_, _)
89            | ty::Coroutine(_, _)
90            | ty::CoroutineWitness(..)
91            | ty::UnsafeBinder(_)
92            | ty::Never
93            | ty::Tuple(_)
94            | ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx),
95        },
96        other => bug!("`{}` is not a zero arg intrinsic", other),
97    })
98}
99
100impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
101    /// Returns `true` if emulation happened.
102    /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
103    /// intrinsic handling.
104    pub fn eval_intrinsic(
105        &mut self,
106        instance: ty::Instance<'tcx>,
107        args: &[OpTy<'tcx, M::Provenance>],
108        dest: &PlaceTy<'tcx, M::Provenance>,
109        ret: Option<mir::BasicBlock>,
110    ) -> InterpResult<'tcx, bool> {
111        let instance_args = instance.args;
112        let intrinsic_name = self.tcx.item_name(instance.def_id());
113
114        match intrinsic_name {
115            sym::caller_location => {
116                let span = self.find_closest_untracked_caller_location();
117                let val = self.tcx.span_as_caller_location(span);
118                let val =
119                    self.const_val_to_op(val, self.tcx.caller_location_ty(), Some(dest.layout))?;
120                self.copy_op(&val, dest)?;
121            }
122
123            sym::align_of_val | sym::size_of_val => {
124                // Avoid `deref_pointer` -- this is not a deref, the ptr does not have to be
125                // dereferenceable!
126                let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
127                let (size, align) = self
128                    .size_and_align_of_val(&place)?
129                    .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
130
131                let result = match intrinsic_name {
132                    sym::align_of_val => align.bytes(),
133                    sym::size_of_val => size.bytes(),
134                    _ => bug!(),
135                };
136
137                self.write_scalar(Scalar::from_target_usize(result, self), dest)?;
138            }
139
140            sym::needs_drop | sym::type_id | sym::type_name | sym::variant_count => {
141                let gid = GlobalId { instance, promoted: None };
142                let ty = self
143                    .tcx
144                    .fn_sig(instance.def_id())
145                    .instantiate(self.tcx.tcx, instance.args)
146                    .output()
147                    .no_bound_vars()
148                    .unwrap();
149                let val = self
150                    .ctfe_query(|tcx| tcx.const_eval_global_id(self.typing_env, gid, tcx.span))?;
151                let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
152                self.copy_op(&val, dest)?;
153            }
154
155            sym::fadd_algebraic
156            | sym::fsub_algebraic
157            | sym::fmul_algebraic
158            | sym::fdiv_algebraic
159            | sym::frem_algebraic => {
160                let a = self.read_immediate(&args[0])?;
161                let b = self.read_immediate(&args[1])?;
162
163                let op = match intrinsic_name {
164                    sym::fadd_algebraic => BinOp::Add,
165                    sym::fsub_algebraic => BinOp::Sub,
166                    sym::fmul_algebraic => BinOp::Mul,
167                    sym::fdiv_algebraic => BinOp::Div,
168                    sym::frem_algebraic => BinOp::Rem,
169
170                    _ => bug!(),
171                };
172
173                let res = self.binary_op(op, &a, &b)?;
174                // `binary_op` already called `generate_nan` if needed.
175                let res = M::apply_float_nondet(self, res)?;
176                self.write_immediate(*res, dest)?;
177            }
178
179            sym::ctpop
180            | sym::cttz
181            | sym::cttz_nonzero
182            | sym::ctlz
183            | sym::ctlz_nonzero
184            | sym::bswap
185            | sym::bitreverse => {
186                let ty = instance_args.type_at(0);
187                let layout = self.layout_of(ty)?;
188                let val = self.read_scalar(&args[0])?;
189
190                let out_val = self.numeric_intrinsic(intrinsic_name, val, layout, dest.layout)?;
191                self.write_scalar(out_val, dest)?;
192            }
193            sym::saturating_add | sym::saturating_sub => {
194                let l = self.read_immediate(&args[0])?;
195                let r = self.read_immediate(&args[1])?;
196                let val = self.saturating_arith(
197                    if intrinsic_name == sym::saturating_add { BinOp::Add } else { BinOp::Sub },
198                    &l,
199                    &r,
200                )?;
201                self.write_scalar(val, dest)?;
202            }
203            sym::discriminant_value => {
204                let place = self.deref_pointer(&args[0])?;
205                let variant = self.read_discriminant(&place)?;
206                let discr = self.discriminant_for_variant(place.layout.ty, variant)?;
207                self.write_immediate(*discr, dest)?;
208            }
209            sym::exact_div => {
210                let l = self.read_immediate(&args[0])?;
211                let r = self.read_immediate(&args[1])?;
212                self.exact_div(&l, &r, dest)?;
213            }
214            sym::rotate_left | sym::rotate_right => {
215                // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
216                // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
217                let layout_val = self.layout_of(instance_args.type_at(0))?;
218                let val = self.read_scalar(&args[0])?;
219                let val_bits = val.to_bits(layout_val.size)?; // sign is ignored here
220
221                let layout_raw_shift = self.layout_of(self.tcx.types.u32)?;
222                let raw_shift = self.read_scalar(&args[1])?;
223                let raw_shift_bits = raw_shift.to_bits(layout_raw_shift.size)?;
224
225                let width_bits = u128::from(layout_val.size.bits());
226                let shift_bits = raw_shift_bits % width_bits;
227                let inv_shift_bits = (width_bits - shift_bits) % width_bits;
228                let result_bits = if intrinsic_name == sym::rotate_left {
229                    (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
230                } else {
231                    (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
232                };
233                let truncated_bits = layout_val.size.truncate(result_bits);
234                let result = Scalar::from_uint(truncated_bits, layout_val.size);
235                self.write_scalar(result, dest)?;
236            }
237            sym::copy => {
238                self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
239            }
240            sym::write_bytes => {
241                self.write_bytes_intrinsic(&args[0], &args[1], &args[2], "write_bytes")?;
242            }
243            sym::compare_bytes => {
244                let result = self.compare_bytes_intrinsic(&args[0], &args[1], &args[2])?;
245                self.write_scalar(result, dest)?;
246            }
247            sym::arith_offset => {
248                let ptr = self.read_pointer(&args[0])?;
249                let offset_count = self.read_target_isize(&args[1])?;
250                let pointee_ty = instance_args.type_at(0);
251
252                let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
253                let offset_bytes = offset_count.wrapping_mul(pointee_size);
254                let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
255                self.write_pointer(offset_ptr, dest)?;
256            }
257            sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
258                let a = self.read_pointer(&args[0])?;
259                let b = self.read_pointer(&args[1])?;
260
261                let usize_layout = self.layout_of(self.tcx.types.usize)?;
262                let isize_layout = self.layout_of(self.tcx.types.isize)?;
263
264                // Get offsets for both that are at least relative to the same base.
265                // With `OFFSET_IS_ADDR` this is trivial; without it we need either
266                // two integers or two pointers into the same allocation.
267                let (a_offset, b_offset, is_addr) = if M::Provenance::OFFSET_IS_ADDR {
268                    (a.addr().bytes(), b.addr().bytes(), /*is_addr*/ true)
269                } else {
270                    match (self.ptr_try_get_alloc_id(a, 0), self.ptr_try_get_alloc_id(b, 0)) {
271                        (Err(a), Err(b)) => {
272                            // Neither pointer points to an allocation, so they are both absolute.
273                            (a, b, /*is_addr*/ true)
274                        }
275                        (Ok((a_alloc_id, a_offset, _)), Ok((b_alloc_id, b_offset, _)))
276                            if a_alloc_id == b_alloc_id =>
277                        {
278                            // Found allocation for both, and it's the same.
279                            // Use these offsets for distance calculation.
280                            (a_offset.bytes(), b_offset.bytes(), /*is_addr*/ false)
281                        }
282                        _ => {
283                            // Not into the same allocation -- this is UB.
284                            throw_ub_custom!(
285                                fluent::const_eval_offset_from_different_allocations,
286                                name = intrinsic_name,
287                            );
288                        }
289                    }
290                };
291
292                // Compute distance: a - b.
293                let dist = {
294                    // Addresses are unsigned, so this is a `usize` computation. We have to do the
295                    // overflow check separately anyway.
296                    let (val, overflowed) = {
297                        let a_offset = ImmTy::from_uint(a_offset, usize_layout);
298                        let b_offset = ImmTy::from_uint(b_offset, usize_layout);
299                        self.binary_op(BinOp::SubWithOverflow, &a_offset, &b_offset)?
300                            .to_scalar_pair()
301                    };
302                    if overflowed.to_bool()? {
303                        // a < b
304                        if intrinsic_name == sym::ptr_offset_from_unsigned {
305                            throw_ub_custom!(
306                                fluent::const_eval_offset_from_unsigned_overflow,
307                                a_offset = a_offset,
308                                b_offset = b_offset,
309                                is_addr = is_addr,
310                            );
311                        }
312                        // The signed form of the intrinsic allows this. If we interpret the
313                        // difference as isize, we'll get the proper signed difference. If that
314                        // seems *positive* or equal to isize::MIN, they were more than isize::MAX apart.
315                        let dist = val.to_target_isize(self)?;
316                        if dist >= 0 || i128::from(dist) == self.pointer_size().signed_int_min() {
317                            throw_ub_custom!(
318                                fluent::const_eval_offset_from_underflow,
319                                name = intrinsic_name,
320                            );
321                        }
322                        dist
323                    } else {
324                        // b >= a
325                        let dist = val.to_target_isize(self)?;
326                        // If converting to isize produced a *negative* result, we had an overflow
327                        // because they were more than isize::MAX apart.
328                        if dist < 0 {
329                            throw_ub_custom!(
330                                fluent::const_eval_offset_from_overflow,
331                                name = intrinsic_name,
332                            );
333                        }
334                        dist
335                    }
336                };
337
338                // Check that the memory between them is dereferenceable at all, starting from the
339                // origin pointer: `dist` is `a - b`, so it is based on `b`.
340                self.check_ptr_access_signed(b, dist, CheckInAllocMsg::Dereferenceable)
341                    .map_err_kind(|_| {
342                        // This could mean they point to different allocations, or they point to the same allocation
343                        // but not the entire range between the pointers is in-bounds.
344                        if let Ok((a_alloc_id, ..)) = self.ptr_try_get_alloc_id(a, 0)
345                            && let Ok((b_alloc_id, ..)) = self.ptr_try_get_alloc_id(b, 0)
346                            && a_alloc_id == b_alloc_id
347                        {
348                            err_ub_custom!(
349                                fluent::const_eval_offset_from_out_of_bounds,
350                                name = intrinsic_name,
351                            )
352                        } else {
353                            err_ub_custom!(
354                                fluent::const_eval_offset_from_different_allocations,
355                                name = intrinsic_name,
356                            )
357                        }
358                    })?;
359                // Then check that this is also dereferenceable from `a`. This ensures that they are
360                // derived from the same allocation.
361                self.check_ptr_access_signed(
362                    a,
363                    dist.checked_neg().unwrap(), // i64::MIN is impossible as no allocation can be that large
364                    CheckInAllocMsg::Dereferenceable,
365                )
366                .map_err_kind(|_| {
367                    // Make the error more specific.
368                    err_ub_custom!(
369                        fluent::const_eval_offset_from_different_allocations,
370                        name = intrinsic_name,
371                    )
372                })?;
373
374                // Perform division by size to compute return value.
375                let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned {
376                    assert!(0 <= dist && dist <= self.target_isize_max());
377                    usize_layout
378                } else {
379                    assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max());
380                    isize_layout
381                };
382                let pointee_layout = self.layout_of(instance_args.type_at(0))?;
383                // If ret_layout is unsigned, we checked that so is the distance, so we are good.
384                let val = ImmTy::from_int(dist, ret_layout);
385                let size = ImmTy::from_int(pointee_layout.size.bytes(), ret_layout);
386                self.exact_div(&val, &size, dest)?;
387            }
388
389            sym::assert_inhabited
390            | sym::assert_zero_valid
391            | sym::assert_mem_uninitialized_valid => {
392                let ty = instance.args.type_at(0);
393                let requirement = ValidityRequirement::from_intrinsic(intrinsic_name).unwrap();
394
395                let should_panic = !self
396                    .tcx
397                    .check_validity_requirement((requirement, self.typing_env.as_query_input(ty)))
398                    .map_err(|_| err_inval!(TooGeneric))?;
399
400                if should_panic {
401                    let layout = self.layout_of(ty)?;
402
403                    let msg = match requirement {
404                        // For *all* intrinsics we first check `is_uninhabited` to give a more specific
405                        // error message.
406                        _ if layout.is_uninhabited() => format!(
407                            "aborted execution: attempted to instantiate uninhabited type `{ty}`"
408                        ),
409                        ValidityRequirement::Inhabited => bug!("handled earlier"),
410                        ValidityRequirement::Zero => format!(
411                            "aborted execution: attempted to zero-initialize type `{ty}`, which is invalid"
412                        ),
413                        ValidityRequirement::UninitMitigated0x01Fill => format!(
414                            "aborted execution: attempted to leave type `{ty}` uninitialized, which is invalid"
415                        ),
416                        ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"),
417                    };
418
419                    M::panic_nounwind(self, &msg)?;
420                    // Skip the `return_to_block` at the end (we panicked, we do not return).
421                    return interp_ok(true);
422                }
423            }
424            sym::simd_insert => {
425                let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
426                let elem = &args[2];
427                let (input, input_len) = self.project_to_simd(&args[0])?;
428                let (dest, dest_len) = self.project_to_simd(dest)?;
429                assert_eq!(input_len, dest_len, "Return vector length must match input length");
430                // Bounds are not checked by typeck so we have to do it ourselves.
431                if index >= input_len {
432                    throw_ub_format!(
433                        "`simd_insert` index {index} is out-of-bounds of vector with length {input_len}"
434                    );
435                }
436
437                for i in 0..dest_len {
438                    let place = self.project_index(&dest, i)?;
439                    let value =
440                        if i == index { elem.clone() } else { self.project_index(&input, i)? };
441                    self.copy_op(&value, &place)?;
442                }
443            }
444            sym::simd_extract => {
445                let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
446                let (input, input_len) = self.project_to_simd(&args[0])?;
447                // Bounds are not checked by typeck so we have to do it ourselves.
448                if index >= input_len {
449                    throw_ub_format!(
450                        "`simd_extract` index {index} is out-of-bounds of vector with length {input_len}"
451                    );
452                }
453                self.copy_op(&self.project_index(&input, index)?, dest)?;
454            }
455            sym::black_box => {
456                // These just return their argument
457                self.copy_op(&args[0], dest)?;
458            }
459            sym::raw_eq => {
460                let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
461                self.write_scalar(result, dest)?;
462            }
463            sym::typed_swap_nonoverlapping => {
464                self.typed_swap_nonoverlapping_intrinsic(&args[0], &args[1])?;
465            }
466
467            sym::vtable_size => {
468                let ptr = self.read_pointer(&args[0])?;
469                // `None` because we don't know which trait to expect here; any vtable is okay.
470                let (size, _align) = self.get_vtable_size_and_align(ptr, None)?;
471                self.write_scalar(Scalar::from_target_usize(size.bytes(), self), dest)?;
472            }
473            sym::vtable_align => {
474                let ptr = self.read_pointer(&args[0])?;
475                // `None` because we don't know which trait to expect here; any vtable is okay.
476                let (_size, align) = self.get_vtable_size_and_align(ptr, None)?;
477                self.write_scalar(Scalar::from_target_usize(align.bytes(), self), dest)?;
478            }
479
480            sym::minnumf16 => self.float_min_intrinsic::<Half>(args, dest)?,
481            sym::minnumf32 => self.float_min_intrinsic::<Single>(args, dest)?,
482            sym::minnumf64 => self.float_min_intrinsic::<Double>(args, dest)?,
483            sym::minnumf128 => self.float_min_intrinsic::<Quad>(args, dest)?,
484
485            sym::minimumf16 => self.float_minimum_intrinsic::<Half>(args, dest)?,
486            sym::minimumf32 => self.float_minimum_intrinsic::<Single>(args, dest)?,
487            sym::minimumf64 => self.float_minimum_intrinsic::<Double>(args, dest)?,
488            sym::minimumf128 => self.float_minimum_intrinsic::<Quad>(args, dest)?,
489
490            sym::maxnumf16 => self.float_max_intrinsic::<Half>(args, dest)?,
491            sym::maxnumf32 => self.float_max_intrinsic::<Single>(args, dest)?,
492            sym::maxnumf64 => self.float_max_intrinsic::<Double>(args, dest)?,
493            sym::maxnumf128 => self.float_max_intrinsic::<Quad>(args, dest)?,
494
495            sym::maximumf16 => self.float_maximum_intrinsic::<Half>(args, dest)?,
496            sym::maximumf32 => self.float_maximum_intrinsic::<Single>(args, dest)?,
497            sym::maximumf64 => self.float_maximum_intrinsic::<Double>(args, dest)?,
498            sym::maximumf128 => self.float_maximum_intrinsic::<Quad>(args, dest)?,
499
500            sym::copysignf16 => self.float_copysign_intrinsic::<Half>(args, dest)?,
501            sym::copysignf32 => self.float_copysign_intrinsic::<Single>(args, dest)?,
502            sym::copysignf64 => self.float_copysign_intrinsic::<Double>(args, dest)?,
503            sym::copysignf128 => self.float_copysign_intrinsic::<Quad>(args, dest)?,
504
505            sym::fabsf16 => self.float_abs_intrinsic::<Half>(args, dest)?,
506            sym::fabsf32 => self.float_abs_intrinsic::<Single>(args, dest)?,
507            sym::fabsf64 => self.float_abs_intrinsic::<Double>(args, dest)?,
508            sym::fabsf128 => self.float_abs_intrinsic::<Quad>(args, dest)?,
509
510            sym::floorf16 => self.float_round_intrinsic::<Half>(
511                args,
512                dest,
513                rustc_apfloat::Round::TowardNegative,
514            )?,
515            sym::floorf32 => self.float_round_intrinsic::<Single>(
516                args,
517                dest,
518                rustc_apfloat::Round::TowardNegative,
519            )?,
520            sym::floorf64 => self.float_round_intrinsic::<Double>(
521                args,
522                dest,
523                rustc_apfloat::Round::TowardNegative,
524            )?,
525            sym::floorf128 => self.float_round_intrinsic::<Quad>(
526                args,
527                dest,
528                rustc_apfloat::Round::TowardNegative,
529            )?,
530
531            sym::ceilf16 => self.float_round_intrinsic::<Half>(
532                args,
533                dest,
534                rustc_apfloat::Round::TowardPositive,
535            )?,
536            sym::ceilf32 => self.float_round_intrinsic::<Single>(
537                args,
538                dest,
539                rustc_apfloat::Round::TowardPositive,
540            )?,
541            sym::ceilf64 => self.float_round_intrinsic::<Double>(
542                args,
543                dest,
544                rustc_apfloat::Round::TowardPositive,
545            )?,
546            sym::ceilf128 => self.float_round_intrinsic::<Quad>(
547                args,
548                dest,
549                rustc_apfloat::Round::TowardPositive,
550            )?,
551
552            sym::truncf16 => {
553                self.float_round_intrinsic::<Half>(args, dest, rustc_apfloat::Round::TowardZero)?
554            }
555            sym::truncf32 => {
556                self.float_round_intrinsic::<Single>(args, dest, rustc_apfloat::Round::TowardZero)?
557            }
558            sym::truncf64 => {
559                self.float_round_intrinsic::<Double>(args, dest, rustc_apfloat::Round::TowardZero)?
560            }
561            sym::truncf128 => {
562                self.float_round_intrinsic::<Quad>(args, dest, rustc_apfloat::Round::TowardZero)?
563            }
564
565            sym::roundf16 => self.float_round_intrinsic::<Half>(
566                args,
567                dest,
568                rustc_apfloat::Round::NearestTiesToAway,
569            )?,
570            sym::roundf32 => self.float_round_intrinsic::<Single>(
571                args,
572                dest,
573                rustc_apfloat::Round::NearestTiesToAway,
574            )?,
575            sym::roundf64 => self.float_round_intrinsic::<Double>(
576                args,
577                dest,
578                rustc_apfloat::Round::NearestTiesToAway,
579            )?,
580            sym::roundf128 => self.float_round_intrinsic::<Quad>(
581                args,
582                dest,
583                rustc_apfloat::Round::NearestTiesToAway,
584            )?,
585
586            sym::round_ties_even_f16 => self.float_round_intrinsic::<Half>(
587                args,
588                dest,
589                rustc_apfloat::Round::NearestTiesToEven,
590            )?,
591            sym::round_ties_even_f32 => self.float_round_intrinsic::<Single>(
592                args,
593                dest,
594                rustc_apfloat::Round::NearestTiesToEven,
595            )?,
596            sym::round_ties_even_f64 => self.float_round_intrinsic::<Double>(
597                args,
598                dest,
599                rustc_apfloat::Round::NearestTiesToEven,
600            )?,
601            sym::round_ties_even_f128 => self.float_round_intrinsic::<Quad>(
602                args,
603                dest,
604                rustc_apfloat::Round::NearestTiesToEven,
605            )?,
606
607            // Unsupported intrinsic: skip the return_to_block below.
608            _ => return interp_ok(false),
609        }
610
611        trace!("{:?}", self.dump_place(&dest.clone().into()));
612        self.return_to_block(ret)?;
613        interp_ok(true)
614    }
615
616    pub(super) fn eval_nondiverging_intrinsic(
617        &mut self,
618        intrinsic: &NonDivergingIntrinsic<'tcx>,
619    ) -> InterpResult<'tcx> {
620        match intrinsic {
621            NonDivergingIntrinsic::Assume(op) => {
622                let op = self.eval_operand(op, None)?;
623                let cond = self.read_scalar(&op)?.to_bool()?;
624                if !cond {
625                    throw_ub_custom!(fluent::const_eval_assume_false);
626                }
627                interp_ok(())
628            }
629            NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
630                count,
631                src,
632                dst,
633            }) => {
634                let src = self.eval_operand(src, None)?;
635                let dst = self.eval_operand(dst, None)?;
636                let count = self.eval_operand(count, None)?;
637                self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)
638            }
639        }
640    }
641
642    pub fn numeric_intrinsic(
643        &self,
644        name: Symbol,
645        val: Scalar<M::Provenance>,
646        layout: TyAndLayout<'tcx>,
647        ret_layout: TyAndLayout<'tcx>,
648    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
649        assert!(layout.ty.is_integral(), "invalid type for numeric intrinsic: {}", layout.ty);
650        let bits = val.to_bits(layout.size)?; // these operations all ignore the sign
651        let extra = 128 - u128::from(layout.size.bits());
652        let bits_out = match name {
653            sym::ctpop => u128::from(bits.count_ones()),
654            sym::ctlz_nonzero | sym::cttz_nonzero if bits == 0 => {
655                throw_ub_custom!(fluent::const_eval_call_nonzero_intrinsic, name = name,);
656            }
657            sym::ctlz | sym::ctlz_nonzero => u128::from(bits.leading_zeros()) - extra,
658            sym::cttz | sym::cttz_nonzero => u128::from((bits << extra).trailing_zeros()) - extra,
659            sym::bswap => {
660                assert_eq!(layout, ret_layout);
661                (bits << extra).swap_bytes()
662            }
663            sym::bitreverse => {
664                assert_eq!(layout, ret_layout);
665                (bits << extra).reverse_bits()
666            }
667            _ => bug!("not a numeric intrinsic: {}", name),
668        };
669        interp_ok(Scalar::from_uint(bits_out, ret_layout.size))
670    }
671
672    pub fn exact_div(
673        &mut self,
674        a: &ImmTy<'tcx, M::Provenance>,
675        b: &ImmTy<'tcx, M::Provenance>,
676        dest: &PlaceTy<'tcx, M::Provenance>,
677    ) -> InterpResult<'tcx> {
678        assert_eq!(a.layout.ty, b.layout.ty);
679        assert_matches!(a.layout.ty.kind(), ty::Int(..) | ty::Uint(..));
680
681        // Performs an exact division, resulting in undefined behavior where
682        // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
683        // First, check x % y != 0 (or if that computation overflows).
684        let rem = self.binary_op(BinOp::Rem, a, b)?;
685        // sign does not matter for 0 test, so `to_bits` is fine
686        if rem.to_scalar().to_bits(a.layout.size)? != 0 {
687            throw_ub_custom!(
688                fluent::const_eval_exact_div_has_remainder,
689                a = format!("{a}"),
690                b = format!("{b}")
691            )
692        }
693        // `Rem` says this is all right, so we can let `Div` do its job.
694        let res = self.binary_op(BinOp::Div, a, b)?;
695        self.write_immediate(*res, dest)
696    }
697
698    pub fn saturating_arith(
699        &self,
700        mir_op: BinOp,
701        l: &ImmTy<'tcx, M::Provenance>,
702        r: &ImmTy<'tcx, M::Provenance>,
703    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
704        assert_eq!(l.layout.ty, r.layout.ty);
705        assert_matches!(l.layout.ty.kind(), ty::Int(..) | ty::Uint(..));
706        assert_matches!(mir_op, BinOp::Add | BinOp::Sub);
707
708        let (val, overflowed) =
709            self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
710        interp_ok(if overflowed.to_bool()? {
711            let size = l.layout.size;
712            if l.layout.backend_repr.is_signed() {
713                // For signed ints the saturated value depends on the sign of the first
714                // term since the sign of the second term can be inferred from this and
715                // the fact that the operation has overflowed (if either is 0 no
716                // overflow can occur)
717                let first_term: i128 = l.to_scalar().to_int(l.layout.size)?;
718                if first_term >= 0 {
719                    // Negative overflow not possible since the positive first term
720                    // can only increase an (in range) negative term for addition
721                    // or corresponding negated positive term for subtraction.
722                    Scalar::from_int(size.signed_int_max(), size)
723                } else {
724                    // Positive overflow not possible for similar reason.
725                    Scalar::from_int(size.signed_int_min(), size)
726                }
727            } else {
728                // unsigned
729                if matches!(mir_op, BinOp::Add) {
730                    // max unsigned
731                    Scalar::from_uint(size.unsigned_int_max(), size)
732                } else {
733                    // underflow to 0
734                    Scalar::from_uint(0u128, size)
735                }
736            }
737        } else {
738            val
739        })
740    }
741
742    /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
743    /// allocation.
744    pub fn ptr_offset_inbounds(
745        &self,
746        ptr: Pointer<Option<M::Provenance>>,
747        offset_bytes: i64,
748    ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
749        // The offset must be in bounds starting from `ptr`.
750        self.check_ptr_access_signed(
751            ptr,
752            offset_bytes,
753            CheckInAllocMsg::InboundsPointerArithmetic,
754        )?;
755        // This also implies that there is no overflow, so we are done.
756        interp_ok(ptr.wrapping_signed_offset(offset_bytes, self))
757    }
758
759    /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
760    pub(crate) fn copy_intrinsic(
761        &mut self,
762        src: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
763        dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
764        count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
765        nonoverlapping: bool,
766    ) -> InterpResult<'tcx> {
767        let count = self.read_target_usize(count)?;
768        let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap())?;
769        let (size, align) = (layout.size, layout.align.abi);
770
771        let size = self.compute_size_in_bytes(size, count).ok_or_else(|| {
772            err_ub_custom!(
773                fluent::const_eval_size_overflow,
774                name = if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
775            )
776        })?;
777
778        let src = self.read_pointer(src)?;
779        let dst = self.read_pointer(dst)?;
780
781        self.check_ptr_align(src, align)?;
782        self.check_ptr_align(dst, align)?;
783
784        self.mem_copy(src, dst, size, nonoverlapping)
785    }
786
787    /// Does a *typed* swap of `*left` and `*right`.
788    fn typed_swap_nonoverlapping_intrinsic(
789        &mut self,
790        left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
791        right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
792    ) -> InterpResult<'tcx> {
793        let left = self.deref_pointer(left)?;
794        let right = self.deref_pointer(right)?;
795        assert_eq!(left.layout, right.layout);
796        assert!(left.layout.is_sized());
797        let kind = MemoryKind::Stack;
798        let temp = self.allocate(left.layout, kind)?;
799        self.copy_op(&left, &temp)?; // checks alignment of `left`
800
801        // We want to always enforce non-overlapping, even if this is a scalar type.
802        // Therefore we directly use the underlying `mem_copy` here.
803        self.mem_copy(right.ptr(), left.ptr(), left.layout.size, /*nonoverlapping*/ true)?;
804        // This means we also need to do the validation of the value that used to be in `right`
805        // ourselves. This value is now in `left.` The one that started out in `left` already got
806        // validated by the copy above.
807        if M::enforce_validity(self, left.layout) {
808            self.validate_operand(
809                &left.clone().into(),
810                M::enforce_validity_recursively(self, left.layout),
811                /*reset_provenance_and_padding*/ true,
812            )?;
813        }
814
815        self.copy_op(&temp, &right)?; // checks alignment of `right`
816
817        self.deallocate_ptr(temp.ptr(), None, kind)?;
818        interp_ok(())
819    }
820
821    pub fn write_bytes_intrinsic(
822        &mut self,
823        dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
824        byte: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
825        count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
826        name: &'static str,
827    ) -> InterpResult<'tcx> {
828        let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap())?;
829
830        let dst = self.read_pointer(dst)?;
831        let byte = self.read_scalar(byte)?.to_u8()?;
832        let count = self.read_target_usize(count)?;
833
834        // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
835        // but no actual allocation can be big enough for the difference to be noticeable.
836        let len = self
837            .compute_size_in_bytes(layout.size, count)
838            .ok_or_else(|| err_ub_custom!(fluent::const_eval_size_overflow, name = name))?;
839
840        let bytes = std::iter::repeat(byte).take(len.bytes_usize());
841        self.write_bytes_ptr(dst, bytes)
842    }
843
844    pub(crate) fn compare_bytes_intrinsic(
845        &mut self,
846        left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
847        right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
848        byte_count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
849    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
850        let left = self.read_pointer(left)?;
851        let right = self.read_pointer(right)?;
852        let n = Size::from_bytes(self.read_target_usize(byte_count)?);
853
854        let left_bytes = self.read_bytes_ptr_strip_provenance(left, n)?;
855        let right_bytes = self.read_bytes_ptr_strip_provenance(right, n)?;
856
857        // `Ordering`'s discriminants are -1/0/+1, so casting does the right thing.
858        let result = Ord::cmp(left_bytes, right_bytes) as i32;
859        interp_ok(Scalar::from_i32(result))
860    }
861
862    pub(crate) fn raw_eq_intrinsic(
863        &mut self,
864        lhs: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
865        rhs: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
866    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
867        let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap())?;
868        assert!(layout.is_sized());
869
870        let get_bytes = |this: &InterpCx<'tcx, M>,
871                         op: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>|
872         -> InterpResult<'tcx, &[u8]> {
873            let ptr = this.read_pointer(op)?;
874            this.check_ptr_align(ptr, layout.align.abi)?;
875            let Some(alloc_ref) = self.get_ptr_alloc(ptr, layout.size)? else {
876                // zero-sized access
877                return interp_ok(&[]);
878            };
879            alloc_ref.get_bytes_strip_provenance()
880        };
881
882        let lhs_bytes = get_bytes(self, lhs)?;
883        let rhs_bytes = get_bytes(self, rhs)?;
884        interp_ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
885    }
886
887    fn float_min_intrinsic<F>(
888        &mut self,
889        args: &[OpTy<'tcx, M::Provenance>],
890        dest: &PlaceTy<'tcx, M::Provenance>,
891    ) -> InterpResult<'tcx, ()>
892    where
893        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
894    {
895        let a: F = self.read_scalar(&args[0])?.to_float()?;
896        let b: F = self.read_scalar(&args[1])?.to_float()?;
897        let res = if a == b {
898            // They are definitely not NaN (those are never equal), but they could be `+0` and `-0`.
899            // Let the machine decide which one to return.
900            M::equal_float_min_max(self, a, b)
901        } else {
902            self.adjust_nan(a.min(b), &[a, b])
903        };
904        self.write_scalar(res, dest)?;
905        interp_ok(())
906    }
907
908    fn float_max_intrinsic<F>(
909        &mut self,
910        args: &[OpTy<'tcx, M::Provenance>],
911        dest: &PlaceTy<'tcx, M::Provenance>,
912    ) -> InterpResult<'tcx, ()>
913    where
914        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
915    {
916        let a: F = self.read_scalar(&args[0])?.to_float()?;
917        let b: F = self.read_scalar(&args[1])?.to_float()?;
918        let res = if a == b {
919            // They are definitely not NaN (those are never equal), but they could be `+0` and `-0`.
920            // Let the machine decide which one to return.
921            M::equal_float_min_max(self, a, b)
922        } else {
923            self.adjust_nan(a.max(b), &[a, b])
924        };
925        self.write_scalar(res, dest)?;
926        interp_ok(())
927    }
928
929    fn float_minimum_intrinsic<F>(
930        &mut self,
931        args: &[OpTy<'tcx, M::Provenance>],
932        dest: &PlaceTy<'tcx, M::Provenance>,
933    ) -> InterpResult<'tcx, ()>
934    where
935        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
936    {
937        let a: F = self.read_scalar(&args[0])?.to_float()?;
938        let b: F = self.read_scalar(&args[1])?.to_float()?;
939        let res = a.minimum(b);
940        let res = self.adjust_nan(res, &[a, b]);
941        self.write_scalar(res, dest)?;
942        interp_ok(())
943    }
944
945    fn float_maximum_intrinsic<F>(
946        &mut self,
947        args: &[OpTy<'tcx, M::Provenance>],
948        dest: &PlaceTy<'tcx, M::Provenance>,
949    ) -> InterpResult<'tcx, ()>
950    where
951        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
952    {
953        let a: F = self.read_scalar(&args[0])?.to_float()?;
954        let b: F = self.read_scalar(&args[1])?.to_float()?;
955        let res = a.maximum(b);
956        let res = self.adjust_nan(res, &[a, b]);
957        self.write_scalar(res, dest)?;
958        interp_ok(())
959    }
960
961    fn float_copysign_intrinsic<F>(
962        &mut self,
963        args: &[OpTy<'tcx, M::Provenance>],
964        dest: &PlaceTy<'tcx, M::Provenance>,
965    ) -> InterpResult<'tcx, ()>
966    where
967        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
968    {
969        let a: F = self.read_scalar(&args[0])?.to_float()?;
970        let b: F = self.read_scalar(&args[1])?.to_float()?;
971        // bitwise, no NaN adjustments
972        self.write_scalar(a.copy_sign(b), dest)?;
973        interp_ok(())
974    }
975
976    fn float_abs_intrinsic<F>(
977        &mut self,
978        args: &[OpTy<'tcx, M::Provenance>],
979        dest: &PlaceTy<'tcx, M::Provenance>,
980    ) -> InterpResult<'tcx, ()>
981    where
982        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
983    {
984        let x: F = self.read_scalar(&args[0])?.to_float()?;
985        // bitwise, no NaN adjustments
986        self.write_scalar(x.abs(), dest)?;
987        interp_ok(())
988    }
989
990    fn float_round_intrinsic<F>(
991        &mut self,
992        args: &[OpTy<'tcx, M::Provenance>],
993        dest: &PlaceTy<'tcx, M::Provenance>,
994        mode: rustc_apfloat::Round,
995    ) -> InterpResult<'tcx, ()>
996    where
997        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
998    {
999        let x: F = self.read_scalar(&args[0])?.to_float()?;
1000        let res = x.round_to_integral(mode).value;
1001        let res = self.adjust_nan(res, &[x]);
1002        self.write_scalar(res, dest)?;
1003        interp_ok(())
1004    }
1005}