rustc_codegen_llvm/
intrinsic.rs

1use std::assert_matches::assert_matches;
2use std::cmp::Ordering;
3
4use rustc_abi::{Align, BackendRepr, ExternAbi, Float, HasDataLayout, Primitive, Size};
5use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
6use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
7use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
8use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
9use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
10use rustc_codegen_ssa::traits::*;
11use rustc_hir as hir;
12use rustc_middle::mir::BinOp;
13use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, LayoutOf};
14use rustc_middle::ty::{self, GenericArgsRef, Ty};
15use rustc_middle::{bug, span_bug};
16use rustc_span::{Span, Symbol, sym};
17use rustc_symbol_mangling::mangle_internal_symbol;
18use rustc_target::spec::PanicStrategy;
19use tracing::debug;
20
21use crate::abi::FnAbiLlvmExt;
22use crate::builder::Builder;
23use crate::context::CodegenCx;
24use crate::llvm::{self, Metadata};
25use crate::type_::Type;
26use crate::type_of::LayoutLlvmExt;
27use crate::va_arg::emit_va_arg;
28use crate::value::Value;
29
30fn call_simple_intrinsic<'ll, 'tcx>(
31    bx: &mut Builder<'_, 'll, 'tcx>,
32    name: Symbol,
33    args: &[OperandRef<'tcx, &'ll Value>],
34) -> Option<&'ll Value> {
35    let (base_name, type_params): (&'static str, &[&'ll Type]) = match name {
36        sym::sqrtf16 => ("llvm.sqrt", &[bx.type_f16()]),
37        sym::sqrtf32 => ("llvm.sqrt", &[bx.type_f32()]),
38        sym::sqrtf64 => ("llvm.sqrt", &[bx.type_f64()]),
39        sym::sqrtf128 => ("llvm.sqrt", &[bx.type_f128()]),
40
41        sym::powif16 => ("llvm.powi", &[bx.type_f16(), bx.type_i32()]),
42        sym::powif32 => ("llvm.powi", &[bx.type_f32(), bx.type_i32()]),
43        sym::powif64 => ("llvm.powi", &[bx.type_f64(), bx.type_i32()]),
44        sym::powif128 => ("llvm.powi", &[bx.type_f128(), bx.type_i32()]),
45
46        sym::sinf16 => ("llvm.sin", &[bx.type_f16()]),
47        sym::sinf32 => ("llvm.sin", &[bx.type_f32()]),
48        sym::sinf64 => ("llvm.sin", &[bx.type_f64()]),
49        sym::sinf128 => ("llvm.sin", &[bx.type_f128()]),
50
51        sym::cosf16 => ("llvm.cos", &[bx.type_f16()]),
52        sym::cosf32 => ("llvm.cos", &[bx.type_f32()]),
53        sym::cosf64 => ("llvm.cos", &[bx.type_f64()]),
54        sym::cosf128 => ("llvm.cos", &[bx.type_f128()]),
55
56        sym::powf16 => ("llvm.pow", &[bx.type_f16()]),
57        sym::powf32 => ("llvm.pow", &[bx.type_f32()]),
58        sym::powf64 => ("llvm.pow", &[bx.type_f64()]),
59        sym::powf128 => ("llvm.pow", &[bx.type_f128()]),
60
61        sym::expf16 => ("llvm.exp", &[bx.type_f16()]),
62        sym::expf32 => ("llvm.exp", &[bx.type_f32()]),
63        sym::expf64 => ("llvm.exp", &[bx.type_f64()]),
64        sym::expf128 => ("llvm.exp", &[bx.type_f128()]),
65
66        sym::exp2f16 => ("llvm.exp2", &[bx.type_f16()]),
67        sym::exp2f32 => ("llvm.exp2", &[bx.type_f32()]),
68        sym::exp2f64 => ("llvm.exp2", &[bx.type_f64()]),
69        sym::exp2f128 => ("llvm.exp2", &[bx.type_f128()]),
70
71        sym::logf16 => ("llvm.log", &[bx.type_f16()]),
72        sym::logf32 => ("llvm.log", &[bx.type_f32()]),
73        sym::logf64 => ("llvm.log", &[bx.type_f64()]),
74        sym::logf128 => ("llvm.log", &[bx.type_f128()]),
75
76        sym::log10f16 => ("llvm.log10", &[bx.type_f16()]),
77        sym::log10f32 => ("llvm.log10", &[bx.type_f32()]),
78        sym::log10f64 => ("llvm.log10", &[bx.type_f64()]),
79        sym::log10f128 => ("llvm.log10", &[bx.type_f128()]),
80
81        sym::log2f16 => ("llvm.log2", &[bx.type_f16()]),
82        sym::log2f32 => ("llvm.log2", &[bx.type_f32()]),
83        sym::log2f64 => ("llvm.log2", &[bx.type_f64()]),
84        sym::log2f128 => ("llvm.log2", &[bx.type_f128()]),
85
86        sym::fmaf16 => ("llvm.fma", &[bx.type_f16()]),
87        sym::fmaf32 => ("llvm.fma", &[bx.type_f32()]),
88        sym::fmaf64 => ("llvm.fma", &[bx.type_f64()]),
89        sym::fmaf128 => ("llvm.fma", &[bx.type_f128()]),
90
91        sym::fmuladdf16 => ("llvm.fmuladd", &[bx.type_f16()]),
92        sym::fmuladdf32 => ("llvm.fmuladd", &[bx.type_f32()]),
93        sym::fmuladdf64 => ("llvm.fmuladd", &[bx.type_f64()]),
94        sym::fmuladdf128 => ("llvm.fmuladd", &[bx.type_f128()]),
95
96        sym::fabsf16 => ("llvm.fabs", &[bx.type_f16()]),
97        sym::fabsf32 => ("llvm.fabs", &[bx.type_f32()]),
98        sym::fabsf64 => ("llvm.fabs", &[bx.type_f64()]),
99        sym::fabsf128 => ("llvm.fabs", &[bx.type_f128()]),
100
101        sym::minnumf16 => ("llvm.minnum", &[bx.type_f16()]),
102        sym::minnumf32 => ("llvm.minnum", &[bx.type_f32()]),
103        sym::minnumf64 => ("llvm.minnum", &[bx.type_f64()]),
104        sym::minnumf128 => ("llvm.minnum", &[bx.type_f128()]),
105
106        // FIXME: LLVM currently mis-compile those intrinsics, re-enable them
107        // when llvm/llvm-project#{139380,139381,140445} are fixed.
108        //sym::minimumf16 => ("llvm.minimum", &[bx.type_f16()]),
109        //sym::minimumf32 => ("llvm.minimum", &[bx.type_f32()]),
110        //sym::minimumf64 => ("llvm.minimum", &[bx.type_f64()]),
111        //sym::minimumf128 => ("llvm.minimum", &[cx.type_f128()]),
112        //
113        sym::maxnumf16 => ("llvm.maxnum", &[bx.type_f16()]),
114        sym::maxnumf32 => ("llvm.maxnum", &[bx.type_f32()]),
115        sym::maxnumf64 => ("llvm.maxnum", &[bx.type_f64()]),
116        sym::maxnumf128 => ("llvm.maxnum", &[bx.type_f128()]),
117
118        // FIXME: LLVM currently mis-compile those intrinsics, re-enable them
119        // when llvm/llvm-project#{139380,139381,140445} are fixed.
120        //sym::maximumf16 => ("llvm.maximum", &[bx.type_f16()]),
121        //sym::maximumf32 => ("llvm.maximum", &[bx.type_f32()]),
122        //sym::maximumf64 => ("llvm.maximum", &[bx.type_f64()]),
123        //sym::maximumf128 => ("llvm.maximum", &[cx.type_f128()]),
124        //
125        sym::copysignf16 => ("llvm.copysign", &[bx.type_f16()]),
126        sym::copysignf32 => ("llvm.copysign", &[bx.type_f32()]),
127        sym::copysignf64 => ("llvm.copysign", &[bx.type_f64()]),
128        sym::copysignf128 => ("llvm.copysign", &[bx.type_f128()]),
129
130        sym::floorf16 => ("llvm.floor", &[bx.type_f16()]),
131        sym::floorf32 => ("llvm.floor", &[bx.type_f32()]),
132        sym::floorf64 => ("llvm.floor", &[bx.type_f64()]),
133        sym::floorf128 => ("llvm.floor", &[bx.type_f128()]),
134
135        sym::ceilf16 => ("llvm.ceil", &[bx.type_f16()]),
136        sym::ceilf32 => ("llvm.ceil", &[bx.type_f32()]),
137        sym::ceilf64 => ("llvm.ceil", &[bx.type_f64()]),
138        sym::ceilf128 => ("llvm.ceil", &[bx.type_f128()]),
139
140        sym::truncf16 => ("llvm.trunc", &[bx.type_f16()]),
141        sym::truncf32 => ("llvm.trunc", &[bx.type_f32()]),
142        sym::truncf64 => ("llvm.trunc", &[bx.type_f64()]),
143        sym::truncf128 => ("llvm.trunc", &[bx.type_f128()]),
144
145        // We could use any of `rint`, `nearbyint`, or `roundeven`
146        // for this -- they are all identical in semantics when
147        // assuming the default FP environment.
148        // `rint` is what we used for $forever.
149        sym::round_ties_even_f16 => ("llvm.rint", &[bx.type_f16()]),
150        sym::round_ties_even_f32 => ("llvm.rint", &[bx.type_f32()]),
151        sym::round_ties_even_f64 => ("llvm.rint", &[bx.type_f64()]),
152        sym::round_ties_even_f128 => ("llvm.rint", &[bx.type_f128()]),
153
154        sym::roundf16 => ("llvm.round", &[bx.type_f16()]),
155        sym::roundf32 => ("llvm.round", &[bx.type_f32()]),
156        sym::roundf64 => ("llvm.round", &[bx.type_f64()]),
157        sym::roundf128 => ("llvm.round", &[bx.type_f128()]),
158
159        _ => return None,
160    };
161    Some(bx.call_intrinsic(
162        base_name,
163        type_params,
164        &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
165    ))
166}
167
168impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
169    fn codegen_intrinsic_call(
170        &mut self,
171        instance: ty::Instance<'tcx>,
172        args: &[OperandRef<'tcx, &'ll Value>],
173        result: PlaceRef<'tcx, &'ll Value>,
174        span: Span,
175    ) -> Result<(), ty::Instance<'tcx>> {
176        let tcx = self.tcx;
177
178        let name = tcx.item_name(instance.def_id());
179        let fn_args = instance.args;
180
181        let simple = call_simple_intrinsic(self, name, args);
182        let llval = match name {
183            _ if simple.is_some() => simple.unwrap(),
184            sym::ptr_mask => {
185                let ptr = args[0].immediate();
186                self.call_intrinsic(
187                    "llvm.ptrmask",
188                    &[self.val_ty(ptr), self.type_isize()],
189                    &[ptr, args[1].immediate()],
190                )
191            }
192            sym::is_val_statically_known => {
193                if let OperandValue::Immediate(imm) = args[0].val {
194                    self.call_intrinsic(
195                        "llvm.is.constant",
196                        &[args[0].layout.immediate_llvm_type(self.cx)],
197                        &[imm],
198                    )
199                } else {
200                    self.const_bool(false)
201                }
202            }
203            sym::select_unpredictable => {
204                let cond = args[0].immediate();
205                assert_eq!(args[1].layout, args[2].layout);
206                let select = |bx: &mut Self, true_val, false_val| {
207                    let result = bx.select(cond, true_val, false_val);
208                    bx.set_unpredictable(&result);
209                    result
210                };
211                match (args[1].val, args[2].val) {
212                    (OperandValue::Ref(true_val), OperandValue::Ref(false_val)) => {
213                        assert!(true_val.llextra.is_none());
214                        assert!(false_val.llextra.is_none());
215                        assert_eq!(true_val.align, false_val.align);
216                        let ptr = select(self, true_val.llval, false_val.llval);
217                        let selected =
218                            OperandValue::Ref(PlaceValue::new_sized(ptr, true_val.align));
219                        selected.store(self, result);
220                        return Ok(());
221                    }
222                    (OperandValue::Immediate(_), OperandValue::Immediate(_))
223                    | (OperandValue::Pair(_, _), OperandValue::Pair(_, _)) => {
224                        let true_val = args[1].immediate_or_packed_pair(self);
225                        let false_val = args[2].immediate_or_packed_pair(self);
226                        select(self, true_val, false_val)
227                    }
228                    (OperandValue::ZeroSized, OperandValue::ZeroSized) => return Ok(()),
229                    _ => span_bug!(span, "Incompatible OperandValue for select_unpredictable"),
230                }
231            }
232            sym::catch_unwind => {
233                catch_unwind_intrinsic(
234                    self,
235                    args[0].immediate(),
236                    args[1].immediate(),
237                    args[2].immediate(),
238                    result,
239                );
240                return Ok(());
241            }
242            sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[], &[]),
243            sym::va_copy => {
244                let dest = args[0].immediate();
245                self.call_intrinsic(
246                    "llvm.va_copy",
247                    &[self.val_ty(dest)],
248                    &[dest, args[1].immediate()],
249                )
250            }
251            sym::va_arg => {
252                match result.layout.backend_repr {
253                    BackendRepr::Scalar(scalar) => {
254                        match scalar.primitive() {
255                            Primitive::Int(..) => {
256                                if self.cx().size_of(result.layout.ty).bytes() < 4 {
257                                    // `va_arg` should not be called on an integer type
258                                    // less than 4 bytes in length. If it is, promote
259                                    // the integer to an `i32` and truncate the result
260                                    // back to the smaller type.
261                                    let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
262                                    self.trunc(promoted_result, result.layout.llvm_type(self))
263                                } else {
264                                    emit_va_arg(self, args[0], result.layout.ty)
265                                }
266                            }
267                            Primitive::Float(Float::F16) => {
268                                bug!("the va_arg intrinsic does not work with `f16`")
269                            }
270                            Primitive::Float(Float::F64) | Primitive::Pointer(_) => {
271                                emit_va_arg(self, args[0], result.layout.ty)
272                            }
273                            // `va_arg` should never be used with the return type f32.
274                            Primitive::Float(Float::F32) => {
275                                bug!("the va_arg intrinsic does not work with `f32`")
276                            }
277                            Primitive::Float(Float::F128) => {
278                                bug!("the va_arg intrinsic does not work with `f128`")
279                            }
280                        }
281                    }
282                    _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
283                }
284            }
285
286            sym::volatile_load | sym::unaligned_volatile_load => {
287                let ptr = args[0].immediate();
288                let load = self.volatile_load(result.layout.llvm_type(self), ptr);
289                let align = if name == sym::unaligned_volatile_load {
290                    1
291                } else {
292                    result.layout.align.abi.bytes() as u32
293                };
294                unsafe {
295                    llvm::LLVMSetAlignment(load, align);
296                }
297                if !result.layout.is_zst() {
298                    self.store_to_place(load, result.val);
299                }
300                return Ok(());
301            }
302            sym::volatile_store => {
303                let dst = args[0].deref(self.cx());
304                args[1].val.volatile_store(self, dst);
305                return Ok(());
306            }
307            sym::unaligned_volatile_store => {
308                let dst = args[0].deref(self.cx());
309                args[1].val.unaligned_volatile_store(self, dst);
310                return Ok(());
311            }
312            sym::prefetch_read_data
313            | sym::prefetch_write_data
314            | sym::prefetch_read_instruction
315            | sym::prefetch_write_instruction => {
316                let (rw, cache_type) = match name {
317                    sym::prefetch_read_data => (0, 1),
318                    sym::prefetch_write_data => (1, 1),
319                    sym::prefetch_read_instruction => (0, 0),
320                    sym::prefetch_write_instruction => (1, 0),
321                    _ => bug!(),
322                };
323                let ptr = args[0].immediate();
324                self.call_intrinsic(
325                    "llvm.prefetch",
326                    &[self.val_ty(ptr)],
327                    &[ptr, self.const_i32(rw), args[1].immediate(), self.const_i32(cache_type)],
328                )
329            }
330            sym::carrying_mul_add => {
331                let (size, signed) = fn_args.type_at(0).int_size_and_signed(self.tcx);
332
333                let wide_llty = self.type_ix(size.bits() * 2);
334                let args = args.as_array().unwrap();
335                let [a, b, c, d] = args.map(|a| self.intcast(a.immediate(), wide_llty, signed));
336
337                let wide = if signed {
338                    let prod = self.unchecked_smul(a, b);
339                    let acc = self.unchecked_sadd(prod, c);
340                    self.unchecked_sadd(acc, d)
341                } else {
342                    let prod = self.unchecked_umul(a, b);
343                    let acc = self.unchecked_uadd(prod, c);
344                    self.unchecked_uadd(acc, d)
345                };
346
347                let narrow_llty = self.type_ix(size.bits());
348                let low = self.trunc(wide, narrow_llty);
349                let bits_const = self.const_uint(wide_llty, size.bits());
350                // No need for ashr when signed; LLVM changes it to lshr anyway.
351                let high = self.lshr(wide, bits_const);
352                // FIXME: could be `trunc nuw`, even for signed.
353                let high = self.trunc(high, narrow_llty);
354
355                let pair_llty = self.type_struct(&[narrow_llty, narrow_llty], false);
356                let pair = self.const_poison(pair_llty);
357                let pair = self.insert_value(pair, low, 0);
358                let pair = self.insert_value(pair, high, 1);
359                pair
360            }
361            sym::ctlz
362            | sym::ctlz_nonzero
363            | sym::cttz
364            | sym::cttz_nonzero
365            | sym::ctpop
366            | sym::bswap
367            | sym::bitreverse
368            | sym::rotate_left
369            | sym::rotate_right
370            | sym::saturating_add
371            | sym::saturating_sub => {
372                let ty = args[0].layout.ty;
373                if !ty.is_integral() {
374                    tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
375                        span,
376                        name,
377                        ty,
378                    });
379                    return Ok(());
380                }
381                let (size, signed) = ty.int_size_and_signed(self.tcx);
382                let width = size.bits();
383                let llty = self.type_ix(width);
384                match name {
385                    sym::ctlz | sym::cttz => {
386                        let y = self.const_bool(false);
387                        let ret = self.call_intrinsic(
388                            format!("llvm.{name}"),
389                            &[llty],
390                            &[args[0].immediate(), y],
391                        );
392
393                        self.intcast(ret, result.layout.llvm_type(self), false)
394                    }
395                    sym::ctlz_nonzero => {
396                        let y = self.const_bool(true);
397                        let ret =
398                            self.call_intrinsic("llvm.ctlz", &[llty], &[args[0].immediate(), y]);
399                        self.intcast(ret, result.layout.llvm_type(self), false)
400                    }
401                    sym::cttz_nonzero => {
402                        let y = self.const_bool(true);
403                        let ret =
404                            self.call_intrinsic("llvm.cttz", &[llty], &[args[0].immediate(), y]);
405                        self.intcast(ret, result.layout.llvm_type(self), false)
406                    }
407                    sym::ctpop => {
408                        let ret =
409                            self.call_intrinsic("llvm.ctpop", &[llty], &[args[0].immediate()]);
410                        self.intcast(ret, result.layout.llvm_type(self), false)
411                    }
412                    sym::bswap => {
413                        if width == 8 {
414                            args[0].immediate() // byte swap a u8/i8 is just a no-op
415                        } else {
416                            self.call_intrinsic("llvm.bswap", &[llty], &[args[0].immediate()])
417                        }
418                    }
419                    sym::bitreverse => {
420                        self.call_intrinsic("llvm.bitreverse", &[llty], &[args[0].immediate()])
421                    }
422                    sym::rotate_left | sym::rotate_right => {
423                        let is_left = name == sym::rotate_left;
424                        let val = args[0].immediate();
425                        let raw_shift = args[1].immediate();
426                        // rotate = funnel shift with first two args the same
427                        let llvm_name = format!("llvm.fsh{}", if is_left { 'l' } else { 'r' });
428
429                        // llvm expects shift to be the same type as the values, but rust
430                        // always uses `u32`.
431                        let raw_shift = self.intcast(raw_shift, self.val_ty(val), false);
432
433                        self.call_intrinsic(llvm_name, &[llty], &[val, val, raw_shift])
434                    }
435                    sym::saturating_add | sym::saturating_sub => {
436                        let is_add = name == sym::saturating_add;
437                        let lhs = args[0].immediate();
438                        let rhs = args[1].immediate();
439                        let llvm_name = format!(
440                            "llvm.{}{}.sat",
441                            if signed { 's' } else { 'u' },
442                            if is_add { "add" } else { "sub" },
443                        );
444                        self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs])
445                    }
446                    _ => bug!(),
447                }
448            }
449
450            sym::raw_eq => {
451                use BackendRepr::*;
452                let tp_ty = fn_args.type_at(0);
453                let layout = self.layout_of(tp_ty).layout;
454                let use_integer_compare = match layout.backend_repr() {
455                    Scalar(_) | ScalarPair(_, _) => true,
456                    SimdVector { .. } => false,
457                    Memory { .. } => {
458                        // For rusty ABIs, small aggregates are actually passed
459                        // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
460                        // so we re-use that same threshold here.
461                        layout.size() <= self.data_layout().pointer_size() * 2
462                    }
463                };
464
465                let a = args[0].immediate();
466                let b = args[1].immediate();
467                if layout.size().bytes() == 0 {
468                    self.const_bool(true)
469                } else if use_integer_compare {
470                    let integer_ty = self.type_ix(layout.size().bits());
471                    let a_val = self.load(integer_ty, a, layout.align().abi);
472                    let b_val = self.load(integer_ty, b, layout.align().abi);
473                    self.icmp(IntPredicate::IntEQ, a_val, b_val)
474                } else {
475                    let n = self.const_usize(layout.size().bytes());
476                    let cmp = self.call_intrinsic("memcmp", &[], &[a, b, n]);
477                    self.icmp(IntPredicate::IntEQ, cmp, self.const_int(self.type_int(), 0))
478                }
479            }
480
481            sym::compare_bytes => {
482                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
483                let cmp = self.call_intrinsic(
484                    "memcmp",
485                    &[],
486                    &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
487                );
488                // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
489                self.sext(cmp, self.type_ix(32))
490            }
491
492            sym::black_box => {
493                args[0].val.store(self, result);
494                let result_val_span = [result.val.llval];
495                // We need to "use" the argument in some way LLVM can't introspect, and on
496                // targets that support it we can typically leverage inline assembly to do
497                // this. LLVM's interpretation of inline assembly is that it's, well, a black
498                // box. This isn't the greatest implementation since it probably deoptimizes
499                // more than we want, but it's so far good enough.
500                //
501                // For zero-sized types, the location pointed to by the result may be
502                // uninitialized. Do not "use" the result in this case; instead just clobber
503                // the memory.
504                let (constraint, inputs): (&str, &[_]) = if result.layout.is_zst() {
505                    ("~{memory}", &[])
506                } else {
507                    ("r,~{memory}", &result_val_span)
508                };
509                crate::asm::inline_asm_call(
510                    self,
511                    "",
512                    constraint,
513                    inputs,
514                    self.type_void(),
515                    &[],
516                    true,
517                    false,
518                    llvm::AsmDialect::Att,
519                    &[span],
520                    false,
521                    None,
522                    None,
523                )
524                .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
525
526                // We have copied the value to `result` already.
527                return Ok(());
528            }
529
530            _ if name.as_str().starts_with("simd_") => {
531                // Unpack non-power-of-2 #[repr(packed, simd)] arguments.
532                // This gives them the expected layout of a regular #[repr(simd)] vector.
533                let mut loaded_args = Vec::new();
534                for arg in args {
535                    loaded_args.push(
536                        // #[repr(packed, simd)] vectors are passed like arrays (as references,
537                        // with reduced alignment and no padding) rather than as immediates.
538                        // We can use a vector load to fix the layout and turn the argument
539                        // into an immediate.
540                        if arg.layout.ty.is_simd()
541                            && let OperandValue::Ref(place) = arg.val
542                        {
543                            let (size, elem_ty) = arg.layout.ty.simd_size_and_type(self.tcx());
544                            let elem_ll_ty = match elem_ty.kind() {
545                                ty::Float(f) => self.type_float_from_ty(*f),
546                                ty::Int(i) => self.type_int_from_ty(*i),
547                                ty::Uint(u) => self.type_uint_from_ty(*u),
548                                ty::RawPtr(_, _) => self.type_ptr(),
549                                _ => unreachable!(),
550                            };
551                            let loaded =
552                                self.load_from_place(self.type_vector(elem_ll_ty, size), place);
553                            OperandRef::from_immediate_or_packed_pair(self, loaded, arg.layout)
554                        } else {
555                            *arg
556                        },
557                    );
558                }
559
560                let llret_ty = if result.layout.ty.is_simd()
561                    && let BackendRepr::Memory { .. } = result.layout.backend_repr
562                {
563                    let (size, elem_ty) = result.layout.ty.simd_size_and_type(self.tcx());
564                    let elem_ll_ty = match elem_ty.kind() {
565                        ty::Float(f) => self.type_float_from_ty(*f),
566                        ty::Int(i) => self.type_int_from_ty(*i),
567                        ty::Uint(u) => self.type_uint_from_ty(*u),
568                        ty::RawPtr(_, _) => self.type_ptr(),
569                        _ => unreachable!(),
570                    };
571                    self.type_vector(elem_ll_ty, size)
572                } else {
573                    result.layout.llvm_type(self)
574                };
575
576                match generic_simd_intrinsic(
577                    self,
578                    name,
579                    fn_args,
580                    &loaded_args,
581                    result.layout.ty,
582                    llret_ty,
583                    span,
584                ) {
585                    Ok(llval) => llval,
586                    // If there was an error, just skip this invocation... we'll abort compilation
587                    // anyway, but we can keep codegen'ing to find more errors.
588                    Err(()) => return Ok(()),
589                }
590            }
591
592            _ => {
593                debug!("unknown intrinsic '{}' -- falling back to default body", name);
594                // Call the fallback body instead of generating the intrinsic code
595                return Err(ty::Instance::new_raw(instance.def_id(), instance.args));
596            }
597        };
598
599        if result.layout.ty.is_bool() {
600            let val = self.from_immediate(llval);
601            self.store_to_place(val, result.val);
602        } else if !result.layout.ty.is_unit() {
603            self.store_to_place(llval, result.val);
604        }
605        Ok(())
606    }
607
608    fn abort(&mut self) {
609        self.call_intrinsic("llvm.trap", &[], &[]);
610    }
611
612    fn assume(&mut self, val: Self::Value) {
613        if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
614            self.call_intrinsic("llvm.assume", &[], &[val]);
615        }
616    }
617
618    fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
619        if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
620            self.call_intrinsic(
621                "llvm.expect",
622                &[self.type_i1()],
623                &[cond, self.const_bool(expected)],
624            )
625        } else {
626            cond
627        }
628    }
629
630    fn type_checked_load(
631        &mut self,
632        llvtable: &'ll Value,
633        vtable_byte_offset: u64,
634        typeid: &'ll Metadata,
635    ) -> Self::Value {
636        let typeid = self.get_metadata_value(typeid);
637        let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
638        let type_checked_load = self.call_intrinsic(
639            "llvm.type.checked.load",
640            &[],
641            &[llvtable, vtable_byte_offset, typeid],
642        );
643        self.extract_value(type_checked_load, 0)
644    }
645
646    fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
647        self.call_intrinsic("llvm.va_start", &[self.val_ty(va_list)], &[va_list])
648    }
649
650    fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
651        self.call_intrinsic("llvm.va_end", &[self.val_ty(va_list)], &[va_list])
652    }
653}
654
655fn catch_unwind_intrinsic<'ll, 'tcx>(
656    bx: &mut Builder<'_, 'll, 'tcx>,
657    try_func: &'ll Value,
658    data: &'ll Value,
659    catch_func: &'ll Value,
660    dest: PlaceRef<'tcx, &'ll Value>,
661) {
662    if bx.sess().panic_strategy() == PanicStrategy::Abort {
663        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
664        bx.call(try_func_ty, None, None, try_func, &[data], None, None);
665        // Return 0 unconditionally from the intrinsic call;
666        // we can never unwind.
667        OperandValue::Immediate(bx.const_i32(0)).store(bx, dest);
668    } else if wants_msvc_seh(bx.sess()) {
669        codegen_msvc_try(bx, try_func, data, catch_func, dest);
670    } else if wants_wasm_eh(bx.sess()) {
671        codegen_wasm_try(bx, try_func, data, catch_func, dest);
672    } else if bx.sess().target.os == "emscripten" {
673        codegen_emcc_try(bx, try_func, data, catch_func, dest);
674    } else {
675        codegen_gnu_try(bx, try_func, data, catch_func, dest);
676    }
677}
678
679// MSVC's definition of the `rust_try` function.
680//
681// This implementation uses the new exception handling instructions in LLVM
682// which have support in LLVM for SEH on MSVC targets. Although these
683// instructions are meant to work for all targets, as of the time of this
684// writing, however, LLVM does not recommend the usage of these new instructions
685// as the old ones are still more optimized.
686fn codegen_msvc_try<'ll, 'tcx>(
687    bx: &mut Builder<'_, 'll, 'tcx>,
688    try_func: &'ll Value,
689    data: &'ll Value,
690    catch_func: &'ll Value,
691    dest: PlaceRef<'tcx, &'ll Value>,
692) {
693    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
694        bx.set_personality_fn(bx.eh_personality());
695
696        let normal = bx.append_sibling_block("normal");
697        let catchswitch = bx.append_sibling_block("catchswitch");
698        let catchpad_rust = bx.append_sibling_block("catchpad_rust");
699        let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
700        let caught = bx.append_sibling_block("caught");
701
702        let try_func = llvm::get_param(bx.llfn(), 0);
703        let data = llvm::get_param(bx.llfn(), 1);
704        let catch_func = llvm::get_param(bx.llfn(), 2);
705
706        // We're generating an IR snippet that looks like:
707        //
708        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
709        //      %slot = alloca i8*
710        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
711        //
712        //   normal:
713        //      ret i32 0
714        //
715        //   catchswitch:
716        //      %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
717        //
718        //   catchpad_rust:
719        //      %tok = catchpad within %cs [%type_descriptor, 8, %slot]
720        //      %ptr = load %slot
721        //      call %catch_func(%data, %ptr)
722        //      catchret from %tok to label %caught
723        //
724        //   catchpad_foreign:
725        //      %tok = catchpad within %cs [null, 64, null]
726        //      call %catch_func(%data, null)
727        //      catchret from %tok to label %caught
728        //
729        //   caught:
730        //      ret i32 1
731        //   }
732        //
733        // This structure follows the basic usage of throw/try/catch in LLVM.
734        // For example, compile this C++ snippet to see what LLVM generates:
735        //
736        //      struct rust_panic {
737        //          rust_panic(const rust_panic&);
738        //          ~rust_panic();
739        //
740        //          void* x[2];
741        //      };
742        //
743        //      int __rust_try(
744        //          void (*try_func)(void*),
745        //          void *data,
746        //          void (*catch_func)(void*, void*) noexcept
747        //      ) {
748        //          try {
749        //              try_func(data);
750        //              return 0;
751        //          } catch(rust_panic& a) {
752        //              catch_func(data, &a);
753        //              return 1;
754        //          } catch(...) {
755        //              catch_func(data, NULL);
756        //              return 1;
757        //          }
758        //      }
759        //
760        // More information can be found in libstd's seh.rs implementation.
761        let ptr_size = bx.tcx().data_layout.pointer_size();
762        let ptr_align = bx.tcx().data_layout.pointer_align().abi;
763        let slot = bx.alloca(ptr_size, ptr_align);
764        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
765        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
766
767        bx.switch_to_block(normal);
768        bx.ret(bx.const_i32(0));
769
770        bx.switch_to_block(catchswitch);
771        let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
772
773        // We can't use the TypeDescriptor defined in libpanic_unwind because it
774        // might be in another DLL and the SEH encoding only supports specifying
775        // a TypeDescriptor from the current module.
776        //
777        // However this isn't an issue since the MSVC runtime uses string
778        // comparison on the type name to match TypeDescriptors rather than
779        // pointer equality.
780        //
781        // So instead we generate a new TypeDescriptor in each module that uses
782        // `try` and let the linker merge duplicate definitions in the same
783        // module.
784        //
785        // When modifying, make sure that the type_name string exactly matches
786        // the one used in library/panic_unwind/src/seh.rs.
787        let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
788        let type_name = bx.const_bytes(b"rust_panic\0");
789        let type_info =
790            bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
791        let tydesc = bx.declare_global(
792            &mangle_internal_symbol(bx.tcx, "__rust_panic_type_info"),
793            bx.val_ty(type_info),
794        );
795
796        llvm::set_linkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
797        if bx.cx.tcx.sess.target.supports_comdat() {
798            llvm::SetUniqueComdat(bx.llmod, tydesc);
799        }
800        llvm::set_initializer(tydesc, type_info);
801
802        // The flag value of 8 indicates that we are catching the exception by
803        // reference instead of by value. We can't use catch by value because
804        // that requires copying the exception object, which we don't support
805        // since our exception object effectively contains a Box.
806        //
807        // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
808        bx.switch_to_block(catchpad_rust);
809        let flags = bx.const_i32(8);
810        let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
811        let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
812        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
813        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
814        bx.catch_ret(&funclet, caught);
815
816        // The flag value of 64 indicates a "catch-all".
817        bx.switch_to_block(catchpad_foreign);
818        let flags = bx.const_i32(64);
819        let null = bx.const_null(bx.type_ptr());
820        let funclet = bx.catch_pad(cs, &[null, flags, null]);
821        bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet), None);
822        bx.catch_ret(&funclet, caught);
823
824        bx.switch_to_block(caught);
825        bx.ret(bx.const_i32(1));
826    });
827
828    // Note that no invoke is used here because by definition this function
829    // can't panic (that's what it's catching).
830    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
831    OperandValue::Immediate(ret).store(bx, dest);
832}
833
834// WASM's definition of the `rust_try` function.
835fn codegen_wasm_try<'ll, 'tcx>(
836    bx: &mut Builder<'_, 'll, 'tcx>,
837    try_func: &'ll Value,
838    data: &'ll Value,
839    catch_func: &'ll Value,
840    dest: PlaceRef<'tcx, &'ll Value>,
841) {
842    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
843        bx.set_personality_fn(bx.eh_personality());
844
845        let normal = bx.append_sibling_block("normal");
846        let catchswitch = bx.append_sibling_block("catchswitch");
847        let catchpad = bx.append_sibling_block("catchpad");
848        let caught = bx.append_sibling_block("caught");
849
850        let try_func = llvm::get_param(bx.llfn(), 0);
851        let data = llvm::get_param(bx.llfn(), 1);
852        let catch_func = llvm::get_param(bx.llfn(), 2);
853
854        // We're generating an IR snippet that looks like:
855        //
856        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
857        //      %slot = alloca i8*
858        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
859        //
860        //   normal:
861        //      ret i32 0
862        //
863        //   catchswitch:
864        //      %cs = catchswitch within none [%catchpad] unwind to caller
865        //
866        //   catchpad:
867        //      %tok = catchpad within %cs [null]
868        //      %ptr = call @llvm.wasm.get.exception(token %tok)
869        //      %sel = call @llvm.wasm.get.ehselector(token %tok)
870        //      call %catch_func(%data, %ptr)
871        //      catchret from %tok to label %caught
872        //
873        //   caught:
874        //      ret i32 1
875        //   }
876        //
877        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
878        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
879
880        bx.switch_to_block(normal);
881        bx.ret(bx.const_i32(0));
882
883        bx.switch_to_block(catchswitch);
884        let cs = bx.catch_switch(None, None, &[catchpad]);
885
886        bx.switch_to_block(catchpad);
887        let null = bx.const_null(bx.type_ptr());
888        let funclet = bx.catch_pad(cs, &[null]);
889
890        let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[], &[funclet.cleanuppad()]);
891        let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[], &[funclet.cleanuppad()]);
892
893        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
894        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
895        bx.catch_ret(&funclet, caught);
896
897        bx.switch_to_block(caught);
898        bx.ret(bx.const_i32(1));
899    });
900
901    // Note that no invoke is used here because by definition this function
902    // can't panic (that's what it's catching).
903    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
904    OperandValue::Immediate(ret).store(bx, dest);
905}
906
907// Definition of the standard `try` function for Rust using the GNU-like model
908// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
909// instructions).
910//
911// This codegen is a little surprising because we always call a shim
912// function instead of inlining the call to `invoke` manually here. This is done
913// because in LLVM we're only allowed to have one personality per function
914// definition. The call to the `try` intrinsic is being inlined into the
915// function calling it, and that function may already have other personality
916// functions in play. By calling a shim we're guaranteed that our shim will have
917// the right personality function.
918fn codegen_gnu_try<'ll, 'tcx>(
919    bx: &mut Builder<'_, 'll, 'tcx>,
920    try_func: &'ll Value,
921    data: &'ll Value,
922    catch_func: &'ll Value,
923    dest: PlaceRef<'tcx, &'ll Value>,
924) {
925    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
926        // Codegens the shims described above:
927        //
928        //   bx:
929        //      invoke %try_func(%data) normal %normal unwind %catch
930        //
931        //   normal:
932        //      ret 0
933        //
934        //   catch:
935        //      (%ptr, _) = landingpad
936        //      call %catch_func(%data, %ptr)
937        //      ret 1
938        let then = bx.append_sibling_block("then");
939        let catch = bx.append_sibling_block("catch");
940
941        let try_func = llvm::get_param(bx.llfn(), 0);
942        let data = llvm::get_param(bx.llfn(), 1);
943        let catch_func = llvm::get_param(bx.llfn(), 2);
944        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
945        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
946
947        bx.switch_to_block(then);
948        bx.ret(bx.const_i32(0));
949
950        // Type indicator for the exception being thrown.
951        //
952        // The first value in this tuple is a pointer to the exception object
953        // being thrown. The second value is a "selector" indicating which of
954        // the landing pad clauses the exception's type had been matched to.
955        // rust_try ignores the selector.
956        bx.switch_to_block(catch);
957        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
958        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
959        let tydesc = bx.const_null(bx.type_ptr());
960        bx.add_clause(vals, tydesc);
961        let ptr = bx.extract_value(vals, 0);
962        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
963        bx.call(catch_ty, None, None, catch_func, &[data, ptr], None, None);
964        bx.ret(bx.const_i32(1));
965    });
966
967    // Note that no invoke is used here because by definition this function
968    // can't panic (that's what it's catching).
969    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
970    OperandValue::Immediate(ret).store(bx, dest);
971}
972
973// Variant of codegen_gnu_try used for emscripten where Rust panics are
974// implemented using C++ exceptions. Here we use exceptions of a specific type
975// (`struct rust_panic`) to represent Rust panics.
976fn codegen_emcc_try<'ll, 'tcx>(
977    bx: &mut Builder<'_, 'll, 'tcx>,
978    try_func: &'ll Value,
979    data: &'ll Value,
980    catch_func: &'ll Value,
981    dest: PlaceRef<'tcx, &'ll Value>,
982) {
983    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
984        // Codegens the shims described above:
985        //
986        //   bx:
987        //      invoke %try_func(%data) normal %normal unwind %catch
988        //
989        //   normal:
990        //      ret 0
991        //
992        //   catch:
993        //      (%ptr, %selector) = landingpad
994        //      %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
995        //      %is_rust_panic = %selector == %rust_typeid
996        //      %catch_data = alloca { i8*, i8 }
997        //      %catch_data[0] = %ptr
998        //      %catch_data[1] = %is_rust_panic
999        //      call %catch_func(%data, %catch_data)
1000        //      ret 1
1001        let then = bx.append_sibling_block("then");
1002        let catch = bx.append_sibling_block("catch");
1003
1004        let try_func = llvm::get_param(bx.llfn(), 0);
1005        let data = llvm::get_param(bx.llfn(), 1);
1006        let catch_func = llvm::get_param(bx.llfn(), 2);
1007        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1008        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
1009
1010        bx.switch_to_block(then);
1011        bx.ret(bx.const_i32(0));
1012
1013        // Type indicator for the exception being thrown.
1014        //
1015        // The first value in this tuple is a pointer to the exception object
1016        // being thrown. The second value is a "selector" indicating which of
1017        // the landing pad clauses the exception's type had been matched to.
1018        bx.switch_to_block(catch);
1019        let tydesc = bx.eh_catch_typeinfo();
1020        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
1021        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
1022        bx.add_clause(vals, tydesc);
1023        bx.add_clause(vals, bx.const_null(bx.type_ptr()));
1024        let ptr = bx.extract_value(vals, 0);
1025        let selector = bx.extract_value(vals, 1);
1026
1027        // Check if the typeid we got is the one for a Rust panic.
1028        let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[bx.val_ty(tydesc)], &[tydesc]);
1029        let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
1030        let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
1031
1032        // We need to pass two values to catch_func (ptr and is_rust_panic), so
1033        // create an alloca and pass a pointer to that.
1034        let ptr_size = bx.tcx().data_layout.pointer_size();
1035        let ptr_align = bx.tcx().data_layout.pointer_align().abi;
1036        let i8_align = bx.tcx().data_layout.i8_align.abi;
1037        // Required in order for there to be no padding between the fields.
1038        assert!(i8_align <= ptr_align);
1039        let catch_data = bx.alloca(2 * ptr_size, ptr_align);
1040        bx.store(ptr, catch_data, ptr_align);
1041        let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
1042        bx.store(is_rust_panic, catch_data_1, i8_align);
1043
1044        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1045        bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None, None);
1046        bx.ret(bx.const_i32(1));
1047    });
1048
1049    // Note that no invoke is used here because by definition this function
1050    // can't panic (that's what it's catching).
1051    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1052    OperandValue::Immediate(ret).store(bx, dest);
1053}
1054
1055// Helper function to give a Block to a closure to codegen a shim function.
1056// This is currently primarily used for the `try` intrinsic functions above.
1057fn gen_fn<'a, 'll, 'tcx>(
1058    cx: &'a CodegenCx<'ll, 'tcx>,
1059    name: &str,
1060    rust_fn_sig: ty::PolyFnSig<'tcx>,
1061    codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1062) -> (&'ll Type, &'ll Value) {
1063    let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
1064    let llty = fn_abi.llvm_type(cx);
1065    let llfn = cx.declare_fn(name, fn_abi, None);
1066    cx.set_frame_pointer_type(llfn);
1067    cx.apply_target_cpu_attr(llfn);
1068    // FIXME(eddyb) find a nicer way to do this.
1069    llvm::set_linkage(llfn, llvm::Linkage::InternalLinkage);
1070    let llbb = Builder::append_block(cx, llfn, "entry-block");
1071    let bx = Builder::build(cx, llbb);
1072    codegen(bx);
1073    (llty, llfn)
1074}
1075
1076// Helper function used to get a handle to the `__rust_try` function used to
1077// catch exceptions.
1078//
1079// This function is only generated once and is then cached.
1080fn get_rust_try_fn<'a, 'll, 'tcx>(
1081    cx: &'a CodegenCx<'ll, 'tcx>,
1082    codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1083) -> (&'ll Type, &'ll Value) {
1084    if let Some(llfn) = cx.rust_try_fn.get() {
1085        return llfn;
1086    }
1087
1088    // Define the type up front for the signature of the rust_try function.
1089    let tcx = cx.tcx;
1090    let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
1091    // `unsafe fn(*mut i8) -> ()`
1092    let try_fn_ty = Ty::new_fn_ptr(
1093        tcx,
1094        ty::Binder::dummy(tcx.mk_fn_sig(
1095            [i8p],
1096            tcx.types.unit,
1097            false,
1098            hir::Safety::Unsafe,
1099            ExternAbi::Rust,
1100        )),
1101    );
1102    // `unsafe fn(*mut i8, *mut i8) -> ()`
1103    let catch_fn_ty = Ty::new_fn_ptr(
1104        tcx,
1105        ty::Binder::dummy(tcx.mk_fn_sig(
1106            [i8p, i8p],
1107            tcx.types.unit,
1108            false,
1109            hir::Safety::Unsafe,
1110            ExternAbi::Rust,
1111        )),
1112    );
1113    // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
1114    let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
1115        [try_fn_ty, i8p, catch_fn_ty],
1116        tcx.types.i32,
1117        false,
1118        hir::Safety::Unsafe,
1119        ExternAbi::Rust,
1120    ));
1121    let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
1122    cx.rust_try_fn.set(Some(rust_try));
1123    rust_try
1124}
1125
1126fn generic_simd_intrinsic<'ll, 'tcx>(
1127    bx: &mut Builder<'_, 'll, 'tcx>,
1128    name: Symbol,
1129    fn_args: GenericArgsRef<'tcx>,
1130    args: &[OperandRef<'tcx, &'ll Value>],
1131    ret_ty: Ty<'tcx>,
1132    llret_ty: &'ll Type,
1133    span: Span,
1134) -> Result<&'ll Value, ()> {
1135    macro_rules! return_error {
1136        ($diag: expr) => {{
1137            bx.sess().dcx().emit_err($diag);
1138            return Err(());
1139        }};
1140    }
1141
1142    macro_rules! require {
1143        ($cond: expr, $diag: expr) => {
1144            if !$cond {
1145                return_error!($diag);
1146            }
1147        };
1148    }
1149
1150    macro_rules! require_simd {
1151        ($ty: expr, $variant:ident) => {{
1152            require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty });
1153            $ty.simd_size_and_type(bx.tcx())
1154        }};
1155    }
1156
1157    /// Returns the bitwidth of the `$ty` argument if it is an `Int` or `Uint` type.
1158    macro_rules! require_int_or_uint_ty {
1159        ($ty: expr, $diag: expr) => {
1160            match $ty {
1161                ty::Int(i) => {
1162                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
1163                }
1164                ty::Uint(i) => {
1165                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
1166                }
1167                _ => {
1168                    return_error!($diag);
1169                }
1170            }
1171        };
1172    }
1173
1174    /// Converts a vector mask, where each element has a bit width equal to the data elements it is used with,
1175    /// down to an i1 based mask that can be used by llvm intrinsics.
1176    ///
1177    /// The rust simd semantics are that each element should either consist of all ones or all zeroes,
1178    /// but this information is not available to llvm. Truncating the vector effectively uses the lowest bit,
1179    /// but codegen for several targets is better if we consider the highest bit by shifting.
1180    ///
1181    /// For x86 SSE/AVX targets this is beneficial since most instructions with mask parameters only consider the highest bit.
1182    /// So even though on llvm level we have an additional shift, in the final assembly there is no shift or truncate and
1183    /// instead the mask can be used as is.
1184    ///
1185    /// For aarch64 and other targets there is a benefit because a mask from the sign bit can be more
1186    /// efficiently converted to an all ones / all zeroes mask by comparing whether each element is negative.
1187    fn vector_mask_to_bitmask<'a, 'll, 'tcx>(
1188        bx: &mut Builder<'a, 'll, 'tcx>,
1189        i_xn: &'ll Value,
1190        in_elem_bitwidth: u64,
1191        in_len: u64,
1192    ) -> &'ll Value {
1193        // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1194        let shift_idx = bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1195        let shift_indices = vec![shift_idx; in_len as _];
1196        let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1197        // Truncate vector to an <i1 x N>
1198        bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len))
1199    }
1200
1201    // Sanity-check: all vector arguments must be immediates.
1202    if cfg!(debug_assertions) {
1203        for arg in args {
1204            if arg.layout.ty.is_simd() {
1205                assert_matches!(arg.val, OperandValue::Immediate(_));
1206            }
1207        }
1208    }
1209
1210    if name == sym::simd_select_bitmask {
1211        let (len, _) = require_simd!(args[1].layout.ty, SimdArgument);
1212
1213        let expected_int_bits = len.max(8).next_power_of_two();
1214        let expected_bytes = len.div_ceil(8);
1215
1216        let mask_ty = args[0].layout.ty;
1217        let mask = match mask_ty.kind() {
1218            ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1219            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1220            ty::Array(elem, len)
1221                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1222                    && len
1223                        .try_to_target_usize(bx.tcx)
1224                        .expect("expected monomorphic const in codegen")
1225                        == expected_bytes =>
1226            {
1227                let place = PlaceRef::alloca(bx, args[0].layout);
1228                args[0].val.store(bx, place);
1229                let int_ty = bx.type_ix(expected_bytes * 8);
1230                bx.load(int_ty, place.val.llval, Align::ONE)
1231            }
1232            _ => return_error!(InvalidMonomorphization::InvalidBitmask {
1233                span,
1234                name,
1235                mask_ty,
1236                expected_int_bits,
1237                expected_bytes
1238            }),
1239        };
1240
1241        let i1 = bx.type_i1();
1242        let im = bx.type_ix(len);
1243        let i1xn = bx.type_vector(i1, len);
1244        let m_im = bx.trunc(mask, im);
1245        let m_i1s = bx.bitcast(m_im, i1xn);
1246        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1247    }
1248
1249    // every intrinsic below takes a SIMD vector as its first argument
1250    let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput);
1251    let in_ty = args[0].layout.ty;
1252
1253    let comparison = match name {
1254        sym::simd_eq => Some(BinOp::Eq),
1255        sym::simd_ne => Some(BinOp::Ne),
1256        sym::simd_lt => Some(BinOp::Lt),
1257        sym::simd_le => Some(BinOp::Le),
1258        sym::simd_gt => Some(BinOp::Gt),
1259        sym::simd_ge => Some(BinOp::Ge),
1260        _ => None,
1261    };
1262
1263    if let Some(cmp_op) = comparison {
1264        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1265
1266        require!(
1267            in_len == out_len,
1268            InvalidMonomorphization::ReturnLengthInputType {
1269                span,
1270                name,
1271                in_len,
1272                in_ty,
1273                ret_ty,
1274                out_len
1275            }
1276        );
1277        require!(
1278            bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
1279            InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
1280        );
1281
1282        return Ok(compare_simd_types(
1283            bx,
1284            args[0].immediate(),
1285            args[1].immediate(),
1286            in_elem,
1287            llret_ty,
1288            cmp_op,
1289        ));
1290    }
1291
1292    if name == sym::simd_shuffle_const_generic {
1293        let idx = fn_args[2].expect_const().to_value().valtree.unwrap_branch();
1294        let n = idx.len() as u64;
1295
1296        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1297        require!(
1298            out_len == n,
1299            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1300        );
1301        require!(
1302            in_elem == out_ty,
1303            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1304        );
1305
1306        let total_len = in_len * 2;
1307
1308        let indices: Option<Vec<_>> = idx
1309            .iter()
1310            .enumerate()
1311            .map(|(arg_idx, val)| {
1312                let idx = val.unwrap_leaf().to_i32();
1313                if idx >= i32::try_from(total_len).unwrap() {
1314                    bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
1315                        span,
1316                        name,
1317                        arg_idx: arg_idx as u64,
1318                        total_len: total_len.into(),
1319                    });
1320                    None
1321                } else {
1322                    Some(bx.const_i32(idx))
1323                }
1324            })
1325            .collect();
1326        let Some(indices) = indices else {
1327            return Ok(bx.const_null(llret_ty));
1328        };
1329
1330        return Ok(bx.shuffle_vector(
1331            args[0].immediate(),
1332            args[1].immediate(),
1333            bx.const_vector(&indices),
1334        ));
1335    }
1336
1337    if name == sym::simd_shuffle {
1338        // Make sure this is actually a SIMD vector.
1339        let idx_ty = args[2].layout.ty;
1340        let n: u64 = if idx_ty.is_simd()
1341            && matches!(idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32))
1342        {
1343            idx_ty.simd_size_and_type(bx.cx.tcx).0
1344        } else {
1345            return_error!(InvalidMonomorphization::SimdShuffle { span, name, ty: idx_ty })
1346        };
1347
1348        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1349        require!(
1350            out_len == n,
1351            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1352        );
1353        require!(
1354            in_elem == out_ty,
1355            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1356        );
1357
1358        let total_len = u128::from(in_len) * 2;
1359
1360        // Check that the indices are in-bounds.
1361        let indices = args[2].immediate();
1362        for i in 0..n {
1363            let val = bx.const_get_elt(indices, i as u64);
1364            let idx = bx
1365                .const_to_opt_u128(val, true)
1366                .unwrap_or_else(|| bug!("typeck should have already ensured that these are const"));
1367            if idx >= total_len {
1368                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1369                    span,
1370                    name,
1371                    arg_idx: i,
1372                    total_len,
1373                });
1374            }
1375        }
1376
1377        return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), indices));
1378    }
1379
1380    if name == sym::simd_insert || name == sym::simd_insert_dyn {
1381        require!(
1382            in_elem == args[2].layout.ty,
1383            InvalidMonomorphization::InsertedType {
1384                span,
1385                name,
1386                in_elem,
1387                in_ty,
1388                out_ty: args[2].layout.ty
1389            }
1390        );
1391
1392        let index_imm = if name == sym::simd_insert {
1393            let idx = bx
1394                .const_to_opt_u128(args[1].immediate(), false)
1395                .expect("typeck should have ensure that this is a const");
1396            if idx >= in_len.into() {
1397                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1398                    span,
1399                    name,
1400                    arg_idx: 1,
1401                    total_len: in_len.into(),
1402                });
1403            }
1404            bx.const_i32(idx as i32)
1405        } else {
1406            args[1].immediate()
1407        };
1408
1409        return Ok(bx.insert_element(args[0].immediate(), args[2].immediate(), index_imm));
1410    }
1411    if name == sym::simd_extract || name == sym::simd_extract_dyn {
1412        require!(
1413            ret_ty == in_elem,
1414            InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
1415        );
1416        let index_imm = if name == sym::simd_extract {
1417            let idx = bx
1418                .const_to_opt_u128(args[1].immediate(), false)
1419                .expect("typeck should have ensure that this is a const");
1420            if idx >= in_len.into() {
1421                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1422                    span,
1423                    name,
1424                    arg_idx: 1,
1425                    total_len: in_len.into(),
1426                });
1427            }
1428            bx.const_i32(idx as i32)
1429        } else {
1430            args[1].immediate()
1431        };
1432
1433        return Ok(bx.extract_element(args[0].immediate(), index_imm));
1434    }
1435
1436    if name == sym::simd_select {
1437        let m_elem_ty = in_elem;
1438        let m_len = in_len;
1439        let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument);
1440        require!(
1441            m_len == v_len,
1442            InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
1443        );
1444        let in_elem_bitwidth = require_int_or_uint_ty!(
1445            m_elem_ty.kind(),
1446            InvalidMonomorphization::MaskWrongElementType { span, name, ty: m_elem_ty }
1447        );
1448        let m_i1s = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len);
1449        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1450    }
1451
1452    if name == sym::simd_bitmask {
1453        // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a vector mask and
1454        // returns one bit for each lane (which must all be `0` or `!0`) in the form of either:
1455        // * an unsigned integer
1456        // * an array of `u8`
1457        // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
1458        //
1459        // The bit order of the result depends on the byte endianness, LSB-first for little
1460        // endian and MSB-first for big endian.
1461        let expected_int_bits = in_len.max(8).next_power_of_two();
1462        let expected_bytes = in_len.div_ceil(8);
1463
1464        // Integer vector <i{in_bitwidth} x in_len>:
1465        let in_elem_bitwidth = require_int_or_uint_ty!(
1466            in_elem.kind(),
1467            InvalidMonomorphization::MaskWrongElementType { span, name, ty: in_elem }
1468        );
1469
1470        let i1xn = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, in_len);
1471        // Bitcast <i1 x N> to iN:
1472        let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1473
1474        match ret_ty.kind() {
1475            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
1476                // Zero-extend iN to the bitmask type:
1477                return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1478            }
1479            ty::Array(elem, len)
1480                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1481                    && len
1482                        .try_to_target_usize(bx.tcx)
1483                        .expect("expected monomorphic const in codegen")
1484                        == expected_bytes =>
1485            {
1486                // Zero-extend iN to the array length:
1487                let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
1488
1489                // Convert the integer to a byte array
1490                let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
1491                bx.store(ze, ptr, Align::ONE);
1492                let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
1493                return Ok(bx.load(array_ty, ptr, Align::ONE));
1494            }
1495            _ => return_error!(InvalidMonomorphization::CannotReturn {
1496                span,
1497                name,
1498                ret_ty,
1499                expected_int_bits,
1500                expected_bytes
1501            }),
1502        }
1503    }
1504
1505    fn simd_simple_float_intrinsic<'ll, 'tcx>(
1506        name: Symbol,
1507        in_elem: Ty<'_>,
1508        in_ty: Ty<'_>,
1509        in_len: u64,
1510        bx: &mut Builder<'_, 'll, 'tcx>,
1511        span: Span,
1512        args: &[OperandRef<'tcx, &'ll Value>],
1513    ) -> Result<&'ll Value, ()> {
1514        macro_rules! return_error {
1515            ($diag: expr) => {{
1516                bx.sess().dcx().emit_err($diag);
1517                return Err(());
1518            }};
1519        }
1520
1521        let elem_ty = if let ty::Float(f) = in_elem.kind() {
1522            bx.cx.type_float_from_ty(*f)
1523        } else {
1524            return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
1525        };
1526
1527        let vec_ty = bx.type_vector(elem_ty, in_len);
1528
1529        let intr_name = match name {
1530            sym::simd_ceil => "llvm.ceil",
1531            sym::simd_fabs => "llvm.fabs",
1532            sym::simd_fcos => "llvm.cos",
1533            sym::simd_fexp2 => "llvm.exp2",
1534            sym::simd_fexp => "llvm.exp",
1535            sym::simd_flog10 => "llvm.log10",
1536            sym::simd_flog2 => "llvm.log2",
1537            sym::simd_flog => "llvm.log",
1538            sym::simd_floor => "llvm.floor",
1539            sym::simd_fma => "llvm.fma",
1540            sym::simd_relaxed_fma => "llvm.fmuladd",
1541            sym::simd_fsin => "llvm.sin",
1542            sym::simd_fsqrt => "llvm.sqrt",
1543            sym::simd_round => "llvm.round",
1544            sym::simd_round_ties_even => "llvm.rint",
1545            sym::simd_trunc => "llvm.trunc",
1546            _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
1547        };
1548        Ok(bx.call_intrinsic(
1549            intr_name,
1550            &[vec_ty],
1551            &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1552        ))
1553    }
1554
1555    if std::matches!(
1556        name,
1557        sym::simd_ceil
1558            | sym::simd_fabs
1559            | sym::simd_fcos
1560            | sym::simd_fexp2
1561            | sym::simd_fexp
1562            | sym::simd_flog10
1563            | sym::simd_flog2
1564            | sym::simd_flog
1565            | sym::simd_floor
1566            | sym::simd_fma
1567            | sym::simd_fsin
1568            | sym::simd_fsqrt
1569            | sym::simd_relaxed_fma
1570            | sym::simd_round
1571            | sym::simd_round_ties_even
1572            | sym::simd_trunc
1573    ) {
1574        return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
1575    }
1576
1577    fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
1578        let elem_ty = match *elem_ty.kind() {
1579            ty::Int(v) => cx.type_int_from_ty(v),
1580            ty::Uint(v) => cx.type_uint_from_ty(v),
1581            ty::Float(v) => cx.type_float_from_ty(v),
1582            ty::RawPtr(_, _) => cx.type_ptr(),
1583            _ => unreachable!(),
1584        };
1585        cx.type_vector(elem_ty, vec_len)
1586    }
1587
1588    if name == sym::simd_gather {
1589        // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1590        //             mask: <N x i{M}>) -> <N x T>
1591        // * N: number of elements in the input vectors
1592        // * T: type of the element to load
1593        // * M: any integer width is supported, will be truncated to i1
1594
1595        // All types must be simd vector types
1596
1597        // The second argument must be a simd vector with an element type that's a pointer
1598        // to the element type of the first argument
1599        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
1600        let (out_len, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
1601        // The element type of the third argument must be a signed integer type of any width:
1602        let (out_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
1603        require_simd!(ret_ty, SimdReturn);
1604
1605        // Of the same length:
1606        require!(
1607            in_len == out_len,
1608            InvalidMonomorphization::SecondArgumentLength {
1609                span,
1610                name,
1611                in_len,
1612                in_ty,
1613                arg_ty: args[1].layout.ty,
1614                out_len
1615            }
1616        );
1617        require!(
1618            in_len == out_len2,
1619            InvalidMonomorphization::ThirdArgumentLength {
1620                span,
1621                name,
1622                in_len,
1623                in_ty,
1624                arg_ty: args[2].layout.ty,
1625                out_len: out_len2
1626            }
1627        );
1628
1629        // The return type must match the first argument type
1630        require!(
1631            ret_ty == in_ty,
1632            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
1633        );
1634
1635        require!(
1636            matches!(
1637                *element_ty1.kind(),
1638                ty::RawPtr(p_ty, _) if p_ty == in_elem && p_ty.kind() == element_ty0.kind()
1639            ),
1640            InvalidMonomorphization::ExpectedElementType {
1641                span,
1642                name,
1643                expected_element: element_ty1,
1644                second_arg: args[1].layout.ty,
1645                in_elem,
1646                in_ty,
1647                mutability: ExpectedPointerMutability::Not,
1648            }
1649        );
1650
1651        let mask_elem_bitwidth = require_int_or_uint_ty!(
1652            element_ty2.kind(),
1653            InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
1654        );
1655
1656        // Alignment of T, must be a constant integer value:
1657        let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1658
1659        // Truncate the mask vector to a vector of i1s:
1660        let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
1661
1662        // Type of the vector of pointers:
1663        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
1664
1665        // Type of the vector of elements:
1666        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
1667
1668        return Ok(bx.call_intrinsic(
1669            "llvm.masked.gather",
1670            &[llvm_elem_vec_ty, llvm_pointer_vec_ty],
1671            &[args[1].immediate(), alignment, mask, args[0].immediate()],
1672        ));
1673    }
1674
1675    if name == sym::simd_masked_load {
1676        // simd_masked_load(mask: <N x i{M}>, pointer: *_ T, values: <N x T>) -> <N x T>
1677        // * N: number of elements in the input vectors
1678        // * T: type of the element to load
1679        // * M: any integer width is supported, will be truncated to i1
1680        // Loads contiguous elements from memory behind `pointer`, but only for
1681        // those lanes whose `mask` bit is enabled.
1682        // The memory addresses corresponding to the “off” lanes are not accessed.
1683
1684        // The element type of the "mask" argument must be a signed integer type of any width
1685        let mask_ty = in_ty;
1686        let (mask_len, mask_elem) = (in_len, in_elem);
1687
1688        // The second argument must be a pointer matching the element type
1689        let pointer_ty = args[1].layout.ty;
1690
1691        // The last argument is a passthrough vector providing values for disabled lanes
1692        let values_ty = args[2].layout.ty;
1693        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1694
1695        require_simd!(ret_ty, SimdReturn);
1696
1697        // Of the same length:
1698        require!(
1699            values_len == mask_len,
1700            InvalidMonomorphization::ThirdArgumentLength {
1701                span,
1702                name,
1703                in_len: mask_len,
1704                in_ty: mask_ty,
1705                arg_ty: values_ty,
1706                out_len: values_len
1707            }
1708        );
1709
1710        // The return type must match the last argument type
1711        require!(
1712            ret_ty == values_ty,
1713            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty }
1714        );
1715
1716        require!(
1717            matches!(
1718                *pointer_ty.kind(),
1719                ty::RawPtr(p_ty, _) if p_ty == values_elem && p_ty.kind() == values_elem.kind()
1720            ),
1721            InvalidMonomorphization::ExpectedElementType {
1722                span,
1723                name,
1724                expected_element: values_elem,
1725                second_arg: pointer_ty,
1726                in_elem: values_elem,
1727                in_ty: values_ty,
1728                mutability: ExpectedPointerMutability::Not,
1729            }
1730        );
1731
1732        let m_elem_bitwidth = require_int_or_uint_ty!(
1733            mask_elem.kind(),
1734            InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
1735        );
1736
1737        let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
1738
1739        // Alignment of T, must be a constant integer value:
1740        let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
1741
1742        let llvm_pointer = bx.type_ptr();
1743
1744        // Type of the vector of elements:
1745        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
1746
1747        return Ok(bx.call_intrinsic(
1748            "llvm.masked.load",
1749            &[llvm_elem_vec_ty, llvm_pointer],
1750            &[args[1].immediate(), alignment, mask, args[2].immediate()],
1751        ));
1752    }
1753
1754    if name == sym::simd_masked_store {
1755        // simd_masked_store(mask: <N x i{M}>, pointer: *mut T, values: <N x T>) -> ()
1756        // * N: number of elements in the input vectors
1757        // * T: type of the element to load
1758        // * M: any integer width is supported, will be truncated to i1
1759        // Stores contiguous elements to memory behind `pointer`, but only for
1760        // those lanes whose `mask` bit is enabled.
1761        // The memory addresses corresponding to the “off” lanes are not accessed.
1762
1763        // The element type of the "mask" argument must be a signed integer type of any width
1764        let mask_ty = in_ty;
1765        let (mask_len, mask_elem) = (in_len, in_elem);
1766
1767        // The second argument must be a pointer matching the element type
1768        let pointer_ty = args[1].layout.ty;
1769
1770        // The last argument specifies the values to store to memory
1771        let values_ty = args[2].layout.ty;
1772        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1773
1774        // Of the same length:
1775        require!(
1776            values_len == mask_len,
1777            InvalidMonomorphization::ThirdArgumentLength {
1778                span,
1779                name,
1780                in_len: mask_len,
1781                in_ty: mask_ty,
1782                arg_ty: values_ty,
1783                out_len: values_len
1784            }
1785        );
1786
1787        // The second argument must be a mutable pointer type matching the element type
1788        require!(
1789            matches!(
1790                *pointer_ty.kind(),
1791                ty::RawPtr(p_ty, p_mutbl)
1792                    if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
1793            ),
1794            InvalidMonomorphization::ExpectedElementType {
1795                span,
1796                name,
1797                expected_element: values_elem,
1798                second_arg: pointer_ty,
1799                in_elem: values_elem,
1800                in_ty: values_ty,
1801                mutability: ExpectedPointerMutability::Mut,
1802            }
1803        );
1804
1805        let m_elem_bitwidth = require_int_or_uint_ty!(
1806            mask_elem.kind(),
1807            InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
1808        );
1809
1810        let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
1811
1812        // Alignment of T, must be a constant integer value:
1813        let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
1814
1815        let llvm_pointer = bx.type_ptr();
1816
1817        // Type of the vector of elements:
1818        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
1819
1820        return Ok(bx.call_intrinsic(
1821            "llvm.masked.store",
1822            &[llvm_elem_vec_ty, llvm_pointer],
1823            &[args[2].immediate(), args[1].immediate(), alignment, mask],
1824        ));
1825    }
1826
1827    if name == sym::simd_scatter {
1828        // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1829        //             mask: <N x i{M}>) -> ()
1830        // * N: number of elements in the input vectors
1831        // * T: type of the element to load
1832        // * M: any integer width is supported, will be truncated to i1
1833
1834        // All types must be simd vector types
1835        // The second argument must be a simd vector with an element type that's a pointer
1836        // to the element type of the first argument
1837        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
1838        let (element_len1, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
1839        let (element_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
1840
1841        // Of the same length:
1842        require!(
1843            in_len == element_len1,
1844            InvalidMonomorphization::SecondArgumentLength {
1845                span,
1846                name,
1847                in_len,
1848                in_ty,
1849                arg_ty: args[1].layout.ty,
1850                out_len: element_len1
1851            }
1852        );
1853        require!(
1854            in_len == element_len2,
1855            InvalidMonomorphization::ThirdArgumentLength {
1856                span,
1857                name,
1858                in_len,
1859                in_ty,
1860                arg_ty: args[2].layout.ty,
1861                out_len: element_len2
1862            }
1863        );
1864
1865        require!(
1866            matches!(
1867                *element_ty1.kind(),
1868                ty::RawPtr(p_ty, p_mutbl)
1869                    if p_ty == in_elem && p_mutbl.is_mut() && p_ty.kind() == element_ty0.kind()
1870            ),
1871            InvalidMonomorphization::ExpectedElementType {
1872                span,
1873                name,
1874                expected_element: element_ty1,
1875                second_arg: args[1].layout.ty,
1876                in_elem,
1877                in_ty,
1878                mutability: ExpectedPointerMutability::Mut,
1879            }
1880        );
1881
1882        // The element type of the third argument must be an integer type of any width:
1883        let mask_elem_bitwidth = require_int_or_uint_ty!(
1884            element_ty2.kind(),
1885            InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
1886        );
1887
1888        // Alignment of T, must be a constant integer value:
1889        let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1890
1891        // Truncate the mask vector to a vector of i1s:
1892        let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
1893
1894        // Type of the vector of pointers:
1895        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
1896
1897        // Type of the vector of elements:
1898        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
1899
1900        return Ok(bx.call_intrinsic(
1901            "llvm.masked.scatter",
1902            &[llvm_elem_vec_ty, llvm_pointer_vec_ty],
1903            &[args[0].immediate(), args[1].immediate(), alignment, mask],
1904        ));
1905    }
1906
1907    macro_rules! arith_red {
1908        ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
1909         $identity:expr) => {
1910            if name == sym::$name {
1911                require!(
1912                    ret_ty == in_elem,
1913                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
1914                );
1915                return match in_elem.kind() {
1916                    ty::Int(_) | ty::Uint(_) => {
1917                        let r = bx.$integer_reduce(args[0].immediate());
1918                        if $ordered {
1919                            // if overflow occurs, the result is the
1920                            // mathematical result modulo 2^n:
1921                            Ok(bx.$op(args[1].immediate(), r))
1922                        } else {
1923                            Ok(bx.$integer_reduce(args[0].immediate()))
1924                        }
1925                    }
1926                    ty::Float(f) => {
1927                        let acc = if $ordered {
1928                            // ordered arithmetic reductions take an accumulator
1929                            args[1].immediate()
1930                        } else {
1931                            // unordered arithmetic reductions use the identity accumulator
1932                            match f.bit_width() {
1933                                32 => bx.const_real(bx.type_f32(), $identity),
1934                                64 => bx.const_real(bx.type_f64(), $identity),
1935                                v => return_error!(
1936                                    InvalidMonomorphization::UnsupportedSymbolOfSize {
1937                                        span,
1938                                        name,
1939                                        symbol: sym::$name,
1940                                        in_ty,
1941                                        in_elem,
1942                                        size: v,
1943                                        ret_ty
1944                                    }
1945                                ),
1946                            }
1947                        };
1948                        Ok(bx.$float_reduce(acc, args[0].immediate()))
1949                    }
1950                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
1951                        span,
1952                        name,
1953                        symbol: sym::$name,
1954                        in_ty,
1955                        in_elem,
1956                        ret_ty
1957                    }),
1958                };
1959            }
1960        };
1961    }
1962
1963    arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, -0.0);
1964    arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
1965    arith_red!(
1966        simd_reduce_add_unordered: vector_reduce_add,
1967        vector_reduce_fadd_reassoc,
1968        false,
1969        add,
1970        -0.0
1971    );
1972    arith_red!(
1973        simd_reduce_mul_unordered: vector_reduce_mul,
1974        vector_reduce_fmul_reassoc,
1975        false,
1976        mul,
1977        1.0
1978    );
1979
1980    macro_rules! minmax_red {
1981        ($name:ident: $int_red:ident, $float_red:ident) => {
1982            if name == sym::$name {
1983                require!(
1984                    ret_ty == in_elem,
1985                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
1986                );
1987                return match in_elem.kind() {
1988                    ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
1989                    ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
1990                    ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
1991                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
1992                        span,
1993                        name,
1994                        symbol: sym::$name,
1995                        in_ty,
1996                        in_elem,
1997                        ret_ty
1998                    }),
1999                };
2000            }
2001        };
2002    }
2003
2004    minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
2005    minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
2006
2007    macro_rules! bitwise_red {
2008        ($name:ident : $red:ident, $boolean:expr) => {
2009            if name == sym::$name {
2010                let input = if !$boolean {
2011                    require!(
2012                        ret_ty == in_elem,
2013                        InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2014                    );
2015                    args[0].immediate()
2016                } else {
2017                    let bitwidth = match in_elem.kind() {
2018                        ty::Int(i) => {
2019                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
2020                        }
2021                        ty::Uint(i) => {
2022                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
2023                        }
2024                        _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2025                            span,
2026                            name,
2027                            symbol: sym::$name,
2028                            in_ty,
2029                            in_elem,
2030                            ret_ty
2031                        }),
2032                    };
2033
2034                    vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth, in_len as _)
2035                };
2036                return match in_elem.kind() {
2037                    ty::Int(_) | ty::Uint(_) => {
2038                        let r = bx.$red(input);
2039                        Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
2040                    }
2041                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2042                        span,
2043                        name,
2044                        symbol: sym::$name,
2045                        in_ty,
2046                        in_elem,
2047                        ret_ty
2048                    }),
2049                };
2050            }
2051        };
2052    }
2053
2054    bitwise_red!(simd_reduce_and: vector_reduce_and, false);
2055    bitwise_red!(simd_reduce_or: vector_reduce_or, false);
2056    bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
2057    bitwise_red!(simd_reduce_all: vector_reduce_and, true);
2058    bitwise_red!(simd_reduce_any: vector_reduce_or, true);
2059
2060    if name == sym::simd_cast_ptr {
2061        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2062        require!(
2063            in_len == out_len,
2064            InvalidMonomorphization::ReturnLengthInputType {
2065                span,
2066                name,
2067                in_len,
2068                in_ty,
2069                ret_ty,
2070                out_len
2071            }
2072        );
2073
2074        match in_elem.kind() {
2075            ty::RawPtr(p_ty, _) => {
2076                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2077                    bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2078                });
2079                require!(
2080                    metadata.is_unit(),
2081                    InvalidMonomorphization::CastWidePointer { span, name, ty: in_elem }
2082                );
2083            }
2084            _ => {
2085                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2086            }
2087        }
2088        match out_elem.kind() {
2089            ty::RawPtr(p_ty, _) => {
2090                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2091                    bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2092                });
2093                require!(
2094                    metadata.is_unit(),
2095                    InvalidMonomorphization::CastWidePointer { span, name, ty: out_elem }
2096                );
2097            }
2098            _ => {
2099                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2100            }
2101        }
2102
2103        return Ok(args[0].immediate());
2104    }
2105
2106    if name == sym::simd_expose_provenance {
2107        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2108        require!(
2109            in_len == out_len,
2110            InvalidMonomorphization::ReturnLengthInputType {
2111                span,
2112                name,
2113                in_len,
2114                in_ty,
2115                ret_ty,
2116                out_len
2117            }
2118        );
2119
2120        match in_elem.kind() {
2121            ty::RawPtr(_, _) => {}
2122            _ => {
2123                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2124            }
2125        }
2126        match out_elem.kind() {
2127            ty::Uint(ty::UintTy::Usize) => {}
2128            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
2129        }
2130
2131        return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
2132    }
2133
2134    if name == sym::simd_with_exposed_provenance {
2135        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2136        require!(
2137            in_len == out_len,
2138            InvalidMonomorphization::ReturnLengthInputType {
2139                span,
2140                name,
2141                in_len,
2142                in_ty,
2143                ret_ty,
2144                out_len
2145            }
2146        );
2147
2148        match in_elem.kind() {
2149            ty::Uint(ty::UintTy::Usize) => {}
2150            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
2151        }
2152        match out_elem.kind() {
2153            ty::RawPtr(_, _) => {}
2154            _ => {
2155                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2156            }
2157        }
2158
2159        return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
2160    }
2161
2162    if name == sym::simd_cast || name == sym::simd_as {
2163        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2164        require!(
2165            in_len == out_len,
2166            InvalidMonomorphization::ReturnLengthInputType {
2167                span,
2168                name,
2169                in_len,
2170                in_ty,
2171                ret_ty,
2172                out_len
2173            }
2174        );
2175        // casting cares about nominal type, not just structural type
2176        if in_elem == out_elem {
2177            return Ok(args[0].immediate());
2178        }
2179
2180        #[derive(Copy, Clone)]
2181        enum Sign {
2182            Unsigned,
2183            Signed,
2184        }
2185        use Sign::*;
2186
2187        enum Style {
2188            Float,
2189            Int(Sign),
2190            Unsupported,
2191        }
2192
2193        let (in_style, in_width) = match in_elem.kind() {
2194            // vectors of pointer-sized integers should've been
2195            // disallowed before here, so this unwrap is safe.
2196            ty::Int(i) => (
2197                Style::Int(Signed),
2198                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2199            ),
2200            ty::Uint(u) => (
2201                Style::Int(Unsigned),
2202                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2203            ),
2204            ty::Float(f) => (Style::Float, f.bit_width()),
2205            _ => (Style::Unsupported, 0),
2206        };
2207        let (out_style, out_width) = match out_elem.kind() {
2208            ty::Int(i) => (
2209                Style::Int(Signed),
2210                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2211            ),
2212            ty::Uint(u) => (
2213                Style::Int(Unsigned),
2214                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2215            ),
2216            ty::Float(f) => (Style::Float, f.bit_width()),
2217            _ => (Style::Unsupported, 0),
2218        };
2219
2220        match (in_style, out_style) {
2221            (Style::Int(sign), Style::Int(_)) => {
2222                return Ok(match in_width.cmp(&out_width) {
2223                    Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
2224                    Ordering::Equal => args[0].immediate(),
2225                    Ordering::Less => match sign {
2226                        Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
2227                        Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
2228                    },
2229                });
2230            }
2231            (Style::Int(Sign::Signed), Style::Float) => {
2232                return Ok(bx.sitofp(args[0].immediate(), llret_ty));
2233            }
2234            (Style::Int(Sign::Unsigned), Style::Float) => {
2235                return Ok(bx.uitofp(args[0].immediate(), llret_ty));
2236            }
2237            (Style::Float, Style::Int(sign)) => {
2238                return Ok(match (sign, name == sym::simd_as) {
2239                    (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
2240                    (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
2241                    (_, true) => bx.cast_float_to_int(
2242                        matches!(sign, Sign::Signed),
2243                        args[0].immediate(),
2244                        llret_ty,
2245                    ),
2246                });
2247            }
2248            (Style::Float, Style::Float) => {
2249                return Ok(match in_width.cmp(&out_width) {
2250                    Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
2251                    Ordering::Equal => args[0].immediate(),
2252                    Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
2253                });
2254            }
2255            _ => { /* Unsupported. Fallthrough. */ }
2256        }
2257        return_error!(InvalidMonomorphization::UnsupportedCast {
2258            span,
2259            name,
2260            in_ty,
2261            in_elem,
2262            ret_ty,
2263            out_elem
2264        });
2265    }
2266    macro_rules! arith_binary {
2267        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2268            $(if name == sym::$name {
2269                match in_elem.kind() {
2270                    $($(ty::$p(_))|* => {
2271                        return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
2272                    })*
2273                    _ => {},
2274                }
2275                return_error!(
2276                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2277                );
2278            })*
2279        }
2280    }
2281    arith_binary! {
2282        simd_add: Uint, Int => add, Float => fadd;
2283        simd_sub: Uint, Int => sub, Float => fsub;
2284        simd_mul: Uint, Int => mul, Float => fmul;
2285        simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
2286        simd_rem: Uint => urem, Int => srem, Float => frem;
2287        simd_shl: Uint, Int => shl;
2288        simd_shr: Uint => lshr, Int => ashr;
2289        simd_and: Uint, Int => and;
2290        simd_or: Uint, Int => or;
2291        simd_xor: Uint, Int => xor;
2292        simd_fmax: Float => maxnum;
2293        simd_fmin: Float => minnum;
2294
2295    }
2296    macro_rules! arith_unary {
2297        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2298            $(if name == sym::$name {
2299                match in_elem.kind() {
2300                    $($(ty::$p(_))|* => {
2301                        return Ok(bx.$call(args[0].immediate()))
2302                    })*
2303                    _ => {},
2304                }
2305                return_error!(
2306                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2307                );
2308            })*
2309        }
2310    }
2311    arith_unary! {
2312        simd_neg: Int => neg, Float => fneg;
2313    }
2314
2315    // Unary integer intrinsics
2316    if matches!(
2317        name,
2318        sym::simd_bswap
2319            | sym::simd_bitreverse
2320            | sym::simd_ctlz
2321            | sym::simd_ctpop
2322            | sym::simd_cttz
2323            | sym::simd_funnel_shl
2324            | sym::simd_funnel_shr
2325    ) {
2326        let vec_ty = bx.cx.type_vector(
2327            match *in_elem.kind() {
2328                ty::Int(i) => bx.cx.type_int_from_ty(i),
2329                ty::Uint(i) => bx.cx.type_uint_from_ty(i),
2330                _ => return_error!(InvalidMonomorphization::UnsupportedOperation {
2331                    span,
2332                    name,
2333                    in_ty,
2334                    in_elem
2335                }),
2336            },
2337            in_len as u64,
2338        );
2339        let llvm_intrinsic = match name {
2340            sym::simd_bswap => "llvm.bswap",
2341            sym::simd_bitreverse => "llvm.bitreverse",
2342            sym::simd_ctlz => "llvm.ctlz",
2343            sym::simd_ctpop => "llvm.ctpop",
2344            sym::simd_cttz => "llvm.cttz",
2345            sym::simd_funnel_shl => "llvm.fshl",
2346            sym::simd_funnel_shr => "llvm.fshr",
2347            _ => unreachable!(),
2348        };
2349        let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
2350
2351        return match name {
2352            // byte swap is no-op for i8/u8
2353            sym::simd_bswap if int_size == 8 => Ok(args[0].immediate()),
2354            sym::simd_ctlz | sym::simd_cttz => {
2355                // for the (int, i1 immediate) pair, the second arg adds `(0, true) => poison`
2356                let dont_poison_on_zero = bx.const_int(bx.type_i1(), 0);
2357                Ok(bx.call_intrinsic(
2358                    llvm_intrinsic,
2359                    &[vec_ty],
2360                    &[args[0].immediate(), dont_poison_on_zero],
2361                ))
2362            }
2363            sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop => {
2364                // simple unary argument cases
2365                Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[args[0].immediate()]))
2366            }
2367            sym::simd_funnel_shl | sym::simd_funnel_shr => Ok(bx.call_intrinsic(
2368                llvm_intrinsic,
2369                &[vec_ty],
2370                &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
2371            )),
2372            _ => unreachable!(),
2373        };
2374    }
2375
2376    if name == sym::simd_arith_offset {
2377        // This also checks that the first operand is a ptr type.
2378        let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
2379            span_bug!(span, "must be called with a vector of pointer types as first argument")
2380        });
2381        let layout = bx.layout_of(pointee);
2382        let ptrs = args[0].immediate();
2383        // The second argument must be a ptr-sized integer.
2384        // (We don't care about the signedness, this is wrapping anyway.)
2385        let (_offsets_len, offsets_elem) = args[1].layout.ty.simd_size_and_type(bx.tcx());
2386        if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
2387            span_bug!(
2388                span,
2389                "must be called with a vector of pointer-sized integers as second argument"
2390            );
2391        }
2392        let offsets = args[1].immediate();
2393
2394        return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
2395    }
2396
2397    if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
2398        let lhs = args[0].immediate();
2399        let rhs = args[1].immediate();
2400        let is_add = name == sym::simd_saturating_add;
2401        let (signed, elem_ty) = match *in_elem.kind() {
2402            ty::Int(i) => (true, bx.cx.type_int_from_ty(i)),
2403            ty::Uint(i) => (false, bx.cx.type_uint_from_ty(i)),
2404            _ => {
2405                return_error!(InvalidMonomorphization::ExpectedVectorElementType {
2406                    span,
2407                    name,
2408                    expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
2409                    vector_type: args[0].layout.ty
2410                });
2411            }
2412        };
2413        let llvm_intrinsic = format!(
2414            "llvm.{}{}.sat",
2415            if signed { 's' } else { 'u' },
2416            if is_add { "add" } else { "sub" },
2417        );
2418        let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
2419
2420        return Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[lhs, rhs]));
2421    }
2422
2423    span_bug!(span, "unknown SIMD intrinsic");
2424}