rustc_codegen_ssa/mir/
intrinsic.rs

1use rustc_abi::WrappingRange;
2use rustc_middle::bug;
3use rustc_middle::mir::SourceInfo;
4use rustc_middle::ty::{self, Ty, TyCtxt};
5use rustc_session::config::OptLevel;
6use rustc_span::sym;
7
8use super::FunctionCx;
9use super::operand::OperandRef;
10use super::place::PlaceRef;
11use crate::common::{AtomicRmwBinOp, SynchronizationScope};
12use crate::errors::InvalidMonomorphization;
13use crate::traits::*;
14use crate::{MemFlags, meth, size_of_val};
15
16fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
17    bx: &mut Bx,
18    allow_overlap: bool,
19    volatile: bool,
20    ty: Ty<'tcx>,
21    dst: Bx::Value,
22    src: Bx::Value,
23    count: Bx::Value,
24) {
25    let layout = bx.layout_of(ty);
26    let size = layout.size;
27    let align = layout.align.abi;
28    let size = bx.mul(bx.const_usize(size.bytes()), count);
29    let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
30    if allow_overlap {
31        bx.memmove(dst, align, src, align, size, flags);
32    } else {
33        bx.memcpy(dst, align, src, align, size, flags);
34    }
35}
36
37fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
38    bx: &mut Bx,
39    volatile: bool,
40    ty: Ty<'tcx>,
41    dst: Bx::Value,
42    val: Bx::Value,
43    count: Bx::Value,
44) {
45    let layout = bx.layout_of(ty);
46    let size = layout.size;
47    let align = layout.align.abi;
48    let size = bx.mul(bx.const_usize(size.bytes()), count);
49    let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
50    bx.memset(dst, val, size, align, flags);
51}
52
53impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
54    /// In the `Err` case, returns the instance that should be called instead.
55    pub fn codegen_intrinsic_call(
56        &mut self,
57        bx: &mut Bx,
58        instance: ty::Instance<'tcx>,
59        args: &[OperandRef<'tcx, Bx::Value>],
60        result: PlaceRef<'tcx, Bx::Value>,
61        source_info: SourceInfo,
62    ) -> Result<(), ty::Instance<'tcx>> {
63        let span = source_info.span;
64
65        let name = bx.tcx().item_name(instance.def_id());
66        let fn_args = instance.args;
67
68        // If we're swapping something that's *not* an `OperandValue::Ref`,
69        // then we can do it directly and avoid the alloca.
70        // Otherwise, we'll let the fallback MIR body take care of it.
71        if let sym::typed_swap_nonoverlapping = name {
72            let pointee_ty = fn_args.type_at(0);
73            let pointee_layout = bx.layout_of(pointee_ty);
74            if !bx.is_backend_ref(pointee_layout)
75                // But if we're not going to optimize, trying to use the fallback
76                // body just makes things worse, so don't bother.
77                || bx.sess().opts.optimize == OptLevel::No
78                // NOTE(eddyb) SPIR-V's Logical addressing model doesn't allow for arbitrary
79                // reinterpretation of values as (chunkable) byte arrays, and the loop in the
80                // block optimization in `ptr::swap_nonoverlapping` is hard to rewrite back
81                // into the (unoptimized) direct swapping implementation, so we disable it.
82                || bx.sess().target.arch == "spirv"
83            {
84                let align = pointee_layout.align.abi;
85                let x_place = args[0].val.deref(align);
86                let y_place = args[1].val.deref(align);
87                bx.typed_place_swap(x_place, y_place, pointee_layout);
88                return Ok(());
89            }
90        }
91
92        let invalid_monomorphization_int_type = |ty| {
93            bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
94        };
95
96        let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
97            let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
98            discr.to_atomic_ordering()
99        };
100
101        let llval = match name {
102            sym::abort => {
103                bx.abort();
104                return Ok(());
105            }
106
107            sym::caller_location => {
108                let location = self.get_caller_location(bx, source_info);
109                location.val.store(bx, result);
110                return Ok(());
111            }
112
113            sym::va_start => bx.va_start(args[0].immediate()),
114            sym::va_end => bx.va_end(args[0].immediate()),
115            sym::size_of_val => {
116                let tp_ty = fn_args.type_at(0);
117                let (_, meta) = args[0].val.pointer_parts();
118                let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
119                llsize
120            }
121            sym::align_of_val => {
122                let tp_ty = fn_args.type_at(0);
123                let (_, meta) = args[0].val.pointer_parts();
124                let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
125                llalign
126            }
127            sym::vtable_size | sym::vtable_align => {
128                let vtable = args[0].immediate();
129                let idx = match name {
130                    sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
131                    sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
132                    _ => bug!(),
133                };
134                let value = meth::VirtualIndex::from_index(idx).get_usize(
135                    bx,
136                    vtable,
137                    instance.ty(bx.tcx(), bx.typing_env()),
138                );
139                match name {
140                    // Size is always <= isize::MAX.
141                    sym::vtable_size => {
142                        let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
143                        bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
144                    }
145                    // Alignment is always nonzero.
146                    sym::vtable_align => {
147                        bx.range_metadata(value, WrappingRange { start: 1, end: !0 })
148                    }
149                    _ => {}
150                }
151                value
152            }
153            sym::needs_drop | sym::type_id | sym::type_name | sym::variant_count => {
154                let value = bx.tcx().const_eval_instance(bx.typing_env(), instance, span).unwrap();
155                OperandRef::from_const(bx, value, result.layout.ty).immediate_or_packed_pair(bx)
156            }
157            sym::arith_offset => {
158                let ty = fn_args.type_at(0);
159                let layout = bx.layout_of(ty);
160                let ptr = args[0].immediate();
161                let offset = args[1].immediate();
162                bx.gep(bx.backend_type(layout), ptr, &[offset])
163            }
164            sym::copy => {
165                copy_intrinsic(
166                    bx,
167                    true,
168                    false,
169                    fn_args.type_at(0),
170                    args[1].immediate(),
171                    args[0].immediate(),
172                    args[2].immediate(),
173                );
174                return Ok(());
175            }
176            sym::write_bytes => {
177                memset_intrinsic(
178                    bx,
179                    false,
180                    fn_args.type_at(0),
181                    args[0].immediate(),
182                    args[1].immediate(),
183                    args[2].immediate(),
184                );
185                return Ok(());
186            }
187
188            sym::volatile_copy_nonoverlapping_memory => {
189                copy_intrinsic(
190                    bx,
191                    false,
192                    true,
193                    fn_args.type_at(0),
194                    args[0].immediate(),
195                    args[1].immediate(),
196                    args[2].immediate(),
197                );
198                return Ok(());
199            }
200            sym::volatile_copy_memory => {
201                copy_intrinsic(
202                    bx,
203                    true,
204                    true,
205                    fn_args.type_at(0),
206                    args[0].immediate(),
207                    args[1].immediate(),
208                    args[2].immediate(),
209                );
210                return Ok(());
211            }
212            sym::volatile_set_memory => {
213                memset_intrinsic(
214                    bx,
215                    true,
216                    fn_args.type_at(0),
217                    args[0].immediate(),
218                    args[1].immediate(),
219                    args[2].immediate(),
220                );
221                return Ok(());
222            }
223            sym::volatile_store => {
224                let dst = args[0].deref(bx.cx());
225                args[1].val.volatile_store(bx, dst);
226                return Ok(());
227            }
228            sym::unaligned_volatile_store => {
229                let dst = args[0].deref(bx.cx());
230                args[1].val.unaligned_volatile_store(bx, dst);
231                return Ok(());
232            }
233            sym::disjoint_bitor => {
234                let a = args[0].immediate();
235                let b = args[1].immediate();
236                bx.or_disjoint(a, b)
237            }
238            sym::exact_div => {
239                let ty = args[0].layout.ty;
240                match int_type_width_signed(ty, bx.tcx()) {
241                    Some((_width, signed)) => {
242                        if signed {
243                            bx.exactsdiv(args[0].immediate(), args[1].immediate())
244                        } else {
245                            bx.exactudiv(args[0].immediate(), args[1].immediate())
246                        }
247                    }
248                    None => {
249                        bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
250                            span,
251                            name,
252                            ty,
253                        });
254                        return Ok(());
255                    }
256                }
257            }
258            sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
259                match float_type_width(args[0].layout.ty) {
260                    Some(_width) => match name {
261                        sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
262                        sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
263                        sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
264                        sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
265                        sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
266                        _ => bug!(),
267                    },
268                    None => {
269                        bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
270                            span,
271                            name,
272                            ty: args[0].layout.ty,
273                        });
274                        return Ok(());
275                    }
276                }
277            }
278            sym::fadd_algebraic
279            | sym::fsub_algebraic
280            | sym::fmul_algebraic
281            | sym::fdiv_algebraic
282            | sym::frem_algebraic => match float_type_width(args[0].layout.ty) {
283                Some(_width) => match name {
284                    sym::fadd_algebraic => {
285                        bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
286                    }
287                    sym::fsub_algebraic => {
288                        bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
289                    }
290                    sym::fmul_algebraic => {
291                        bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
292                    }
293                    sym::fdiv_algebraic => {
294                        bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
295                    }
296                    sym::frem_algebraic => {
297                        bx.frem_algebraic(args[0].immediate(), args[1].immediate())
298                    }
299                    _ => bug!(),
300                },
301                None => {
302                    bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
303                        span,
304                        name,
305                        ty: args[0].layout.ty,
306                    });
307                    return Ok(());
308                }
309            },
310
311            sym::float_to_int_unchecked => {
312                if float_type_width(args[0].layout.ty).is_none() {
313                    bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
314                        span,
315                        ty: args[0].layout.ty,
316                    });
317                    return Ok(());
318                }
319                let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx())
320                else {
321                    bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
322                        span,
323                        ty: result.layout.ty,
324                    });
325                    return Ok(());
326                };
327                if signed {
328                    bx.fptosi(args[0].immediate(), bx.backend_type(result.layout))
329                } else {
330                    bx.fptoui(args[0].immediate(), bx.backend_type(result.layout))
331                }
332            }
333
334            sym::atomic_load => {
335                let ty = fn_args.type_at(0);
336                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
337                    invalid_monomorphization_int_type(ty);
338                    return Ok(());
339                }
340                let ordering = fn_args.const_at(1).to_value();
341                let layout = bx.layout_of(ty);
342                let source = args[0].immediate();
343                bx.atomic_load(
344                    bx.backend_type(layout),
345                    source,
346                    parse_atomic_ordering(ordering),
347                    layout.size,
348                )
349            }
350            sym::atomic_store => {
351                let ty = fn_args.type_at(0);
352                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
353                    invalid_monomorphization_int_type(ty);
354                    return Ok(());
355                }
356                let ordering = fn_args.const_at(1).to_value();
357                let size = bx.layout_of(ty).size;
358                let val = args[1].immediate();
359                let ptr = args[0].immediate();
360                bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
361                return Ok(());
362            }
363            sym::atomic_cxchg | sym::atomic_cxchgweak => {
364                let ty = fn_args.type_at(0);
365                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
366                    invalid_monomorphization_int_type(ty);
367                    return Ok(());
368                }
369                let succ_ordering = fn_args.const_at(1).to_value();
370                let fail_ordering = fn_args.const_at(2).to_value();
371                let weak = name == sym::atomic_cxchgweak;
372                let dst = args[0].immediate();
373                let cmp = args[1].immediate();
374                let src = args[2].immediate();
375                let (val, success) = bx.atomic_cmpxchg(
376                    dst,
377                    cmp,
378                    src,
379                    parse_atomic_ordering(succ_ordering),
380                    parse_atomic_ordering(fail_ordering),
381                    weak,
382                );
383                let val = bx.from_immediate(val);
384                let success = bx.from_immediate(success);
385
386                let dest = result.project_field(bx, 0);
387                bx.store_to_place(val, dest.val);
388                let dest = result.project_field(bx, 1);
389                bx.store_to_place(success, dest.val);
390
391                return Ok(());
392            }
393            // These are all AtomicRMW ops
394            sym::atomic_max | sym::atomic_min => {
395                let atom_op = if name == sym::atomic_max {
396                    AtomicRmwBinOp::AtomicMax
397                } else {
398                    AtomicRmwBinOp::AtomicMin
399                };
400
401                let ty = fn_args.type_at(0);
402                if matches!(ty.kind(), ty::Int(_)) {
403                    let ordering = fn_args.const_at(1).to_value();
404                    let ptr = args[0].immediate();
405                    let val = args[1].immediate();
406                    bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
407                } else {
408                    invalid_monomorphization_int_type(ty);
409                    return Ok(());
410                }
411            }
412            sym::atomic_umax | sym::atomic_umin => {
413                let atom_op = if name == sym::atomic_umax {
414                    AtomicRmwBinOp::AtomicUMax
415                } else {
416                    AtomicRmwBinOp::AtomicUMin
417                };
418
419                let ty = fn_args.type_at(0);
420                if matches!(ty.kind(), ty::Uint(_)) {
421                    let ordering = fn_args.const_at(1).to_value();
422                    let ptr = args[0].immediate();
423                    let val = args[1].immediate();
424                    bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
425                } else {
426                    invalid_monomorphization_int_type(ty);
427                    return Ok(());
428                }
429            }
430            sym::atomic_xchg
431            | sym::atomic_xadd
432            | sym::atomic_xsub
433            | sym::atomic_and
434            | sym::atomic_nand
435            | sym::atomic_or
436            | sym::atomic_xor => {
437                let atom_op = match name {
438                    sym::atomic_xchg => AtomicRmwBinOp::AtomicXchg,
439                    sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
440                    sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
441                    sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
442                    sym::atomic_nand => AtomicRmwBinOp::AtomicNand,
443                    sym::atomic_or => AtomicRmwBinOp::AtomicOr,
444                    sym::atomic_xor => AtomicRmwBinOp::AtomicXor,
445                    _ => unreachable!(),
446                };
447
448                let ty = fn_args.type_at(0);
449                if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
450                    let ordering = fn_args.const_at(1).to_value();
451                    let ptr = args[0].immediate();
452                    let val = args[1].immediate();
453                    bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
454                } else {
455                    invalid_monomorphization_int_type(ty);
456                    return Ok(());
457                }
458            }
459            sym::atomic_fence => {
460                let ordering = fn_args.const_at(0).to_value();
461                bx.atomic_fence(parse_atomic_ordering(ordering), SynchronizationScope::CrossThread);
462                return Ok(());
463            }
464
465            sym::atomic_singlethreadfence => {
466                let ordering = fn_args.const_at(0).to_value();
467                bx.atomic_fence(
468                    parse_atomic_ordering(ordering),
469                    SynchronizationScope::SingleThread,
470                );
471                return Ok(());
472            }
473
474            sym::nontemporal_store => {
475                let dst = args[0].deref(bx.cx());
476                args[1].val.nontemporal_store(bx, dst);
477                return Ok(());
478            }
479
480            sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
481                let ty = fn_args.type_at(0);
482                let pointee_size = bx.layout_of(ty).size;
483
484                let a = args[0].immediate();
485                let b = args[1].immediate();
486                let a = bx.ptrtoint(a, bx.type_isize());
487                let b = bx.ptrtoint(b, bx.type_isize());
488                let pointee_size = bx.const_usize(pointee_size.bytes());
489                if name == sym::ptr_offset_from {
490                    // This is the same sequence that Clang emits for pointer subtraction.
491                    // It can be neither `nsw` nor `nuw` because the input is treated as
492                    // unsigned but then the output is treated as signed, so neither works.
493                    let d = bx.sub(a, b);
494                    // this is where the signed magic happens (notice the `s` in `exactsdiv`)
495                    bx.exactsdiv(d, pointee_size)
496                } else {
497                    // The `_unsigned` version knows the relative ordering of the pointers,
498                    // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
499                    let d = bx.unchecked_usub(a, b);
500                    bx.exactudiv(d, pointee_size)
501                }
502            }
503
504            sym::cold_path => {
505                // This is a no-op. The intrinsic is just a hint to the optimizer.
506                return Ok(());
507            }
508
509            _ => {
510                // Need to use backend-specific things in the implementation.
511                return bx.codegen_intrinsic_call(instance, args, result, span);
512            }
513        };
514
515        if result.layout.ty.is_bool() {
516            let val = bx.from_immediate(llval);
517            bx.store_to_place(val, result.val);
518        } else if !result.layout.ty.is_unit() {
519            bx.store_to_place(llval, result.val);
520        }
521        Ok(())
522    }
523}
524
525// Returns the width of an int Ty, and if it's signed or not
526// Returns None if the type is not an integer
527// FIXME: there’s multiple of this functions, investigate using some of the already existing
528// stuffs.
529fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
530    match ty.kind() {
531        ty::Int(t) => {
532            Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
533        }
534        ty::Uint(t) => {
535            Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
536        }
537        _ => None,
538    }
539}
540
541// Returns the width of a float Ty
542// Returns None if the type is not a float
543fn float_type_width(ty: Ty<'_>) -> Option<u64> {
544    match ty.kind() {
545        ty::Float(t) => Some(t.bit_width()),
546        _ => None,
547    }
548}