1use rustc_abi::WrappingRange;
2use rustc_middle::mir::SourceInfo;
3use rustc_middle::ty::{self, Ty, TyCtxt};
4use rustc_middle::{bug, span_bug};
5use rustc_session::config::OptLevel;
6use rustc_span::sym;
7
8use super::FunctionCx;
9use super::operand::OperandRef;
10use super::place::PlaceRef;
11use crate::common::{AtomicRmwBinOp, SynchronizationScope};
12use crate::errors::InvalidMonomorphization;
13use crate::traits::*;
14use crate::{MemFlags, meth, size_of_val};
15
16fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
17 bx: &mut Bx,
18 allow_overlap: bool,
19 volatile: bool,
20 ty: Ty<'tcx>,
21 dst: Bx::Value,
22 src: Bx::Value,
23 count: Bx::Value,
24) {
25 let layout = bx.layout_of(ty);
26 let size = layout.size;
27 let align = layout.align.abi;
28 let size = bx.mul(bx.const_usize(size.bytes()), count);
29 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
30 if allow_overlap {
31 bx.memmove(dst, align, src, align, size, flags);
32 } else {
33 bx.memcpy(dst, align, src, align, size, flags);
34 }
35}
36
37fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
38 bx: &mut Bx,
39 volatile: bool,
40 ty: Ty<'tcx>,
41 dst: Bx::Value,
42 val: Bx::Value,
43 count: Bx::Value,
44) {
45 let layout = bx.layout_of(ty);
46 let size = layout.size;
47 let align = layout.align.abi;
48 let size = bx.mul(bx.const_usize(size.bytes()), count);
49 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
50 bx.memset(dst, val, size, align, flags);
51}
52
53impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
54 pub fn codegen_intrinsic_call(
56 &mut self,
57 bx: &mut Bx,
58 instance: ty::Instance<'tcx>,
59 args: &[OperandRef<'tcx, Bx::Value>],
60 result: PlaceRef<'tcx, Bx::Value>,
61 source_info: SourceInfo,
62 ) -> Result<(), ty::Instance<'tcx>> {
63 let span = source_info.span;
64
65 let name = bx.tcx().item_name(instance.def_id());
66 let fn_args = instance.args;
67
68 if let sym::typed_swap_nonoverlapping = name {
72 let pointee_ty = fn_args.type_at(0);
73 let pointee_layout = bx.layout_of(pointee_ty);
74 if !bx.is_backend_ref(pointee_layout)
75 || bx.sess().opts.optimize == OptLevel::No
78 || bx.sess().target.arch == "spirv"
83 {
84 let align = pointee_layout.align.abi;
85 let x_place = args[0].val.deref(align);
86 let y_place = args[1].val.deref(align);
87 bx.typed_place_swap(x_place, y_place, pointee_layout);
88 return Ok(());
89 }
90 }
91
92 let invalid_monomorphization_int_type = |ty| {
93 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
94 };
95
96 let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
97 let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
98 discr.to_atomic_ordering()
99 };
100
101 if args.is_empty() {
102 match name {
103 sym::abort
104 | sym::unreachable
105 | sym::cold_path
106 | sym::breakpoint
107 | sym::assert_zero_valid
108 | sym::assert_mem_uninitialized_valid
109 | sym::assert_inhabited
110 | sym::ub_checks
111 | sym::contract_checks
112 | sym::atomic_fence
113 | sym::atomic_singlethreadfence
114 | sym::caller_location => {}
115 _ => {
116 span_bug!(span, "nullary intrinsic {name} must either be in a const block or explicitly opted out because it is inherently a runtime intrinsic
117");
118 }
119 }
120 }
121
122 let llval = match name {
123 sym::abort => {
124 bx.abort();
125 return Ok(());
126 }
127
128 sym::caller_location => {
129 let location = self.get_caller_location(bx, source_info);
130 location.val.store(bx, result);
131 return Ok(());
132 }
133
134 sym::va_start => bx.va_start(args[0].immediate()),
135 sym::va_end => bx.va_end(args[0].immediate()),
136 sym::size_of_val => {
137 let tp_ty = fn_args.type_at(0);
138 let (_, meta) = args[0].val.pointer_parts();
139 let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
140 llsize
141 }
142 sym::align_of_val => {
143 let tp_ty = fn_args.type_at(0);
144 let (_, meta) = args[0].val.pointer_parts();
145 let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
146 llalign
147 }
148 sym::vtable_size | sym::vtable_align => {
149 let vtable = args[0].immediate();
150 let idx = match name {
151 sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
152 sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
153 _ => bug!(),
154 };
155 let value = meth::VirtualIndex::from_index(idx).get_usize(
156 bx,
157 vtable,
158 instance.ty(bx.tcx(), bx.typing_env()),
159 );
160 match name {
161 sym::vtable_size => {
163 let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
164 bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
165 }
166 sym::vtable_align => {
168 bx.range_metadata(value, WrappingRange { start: 1, end: !0 })
169 }
170 _ => {}
171 }
172 value
173 }
174 sym::arith_offset => {
175 let ty = fn_args.type_at(0);
176 let layout = bx.layout_of(ty);
177 let ptr = args[0].immediate();
178 let offset = args[1].immediate();
179 bx.gep(bx.backend_type(layout), ptr, &[offset])
180 }
181 sym::copy => {
182 copy_intrinsic(
183 bx,
184 true,
185 false,
186 fn_args.type_at(0),
187 args[1].immediate(),
188 args[0].immediate(),
189 args[2].immediate(),
190 );
191 return Ok(());
192 }
193 sym::write_bytes => {
194 memset_intrinsic(
195 bx,
196 false,
197 fn_args.type_at(0),
198 args[0].immediate(),
199 args[1].immediate(),
200 args[2].immediate(),
201 );
202 return Ok(());
203 }
204
205 sym::volatile_copy_nonoverlapping_memory => {
206 copy_intrinsic(
207 bx,
208 false,
209 true,
210 fn_args.type_at(0),
211 args[0].immediate(),
212 args[1].immediate(),
213 args[2].immediate(),
214 );
215 return Ok(());
216 }
217 sym::volatile_copy_memory => {
218 copy_intrinsic(
219 bx,
220 true,
221 true,
222 fn_args.type_at(0),
223 args[0].immediate(),
224 args[1].immediate(),
225 args[2].immediate(),
226 );
227 return Ok(());
228 }
229 sym::volatile_set_memory => {
230 memset_intrinsic(
231 bx,
232 true,
233 fn_args.type_at(0),
234 args[0].immediate(),
235 args[1].immediate(),
236 args[2].immediate(),
237 );
238 return Ok(());
239 }
240 sym::volatile_store => {
241 let dst = args[0].deref(bx.cx());
242 args[1].val.volatile_store(bx, dst);
243 return Ok(());
244 }
245 sym::unaligned_volatile_store => {
246 let dst = args[0].deref(bx.cx());
247 args[1].val.unaligned_volatile_store(bx, dst);
248 return Ok(());
249 }
250 sym::disjoint_bitor => {
251 let a = args[0].immediate();
252 let b = args[1].immediate();
253 bx.or_disjoint(a, b)
254 }
255 sym::exact_div => {
256 let ty = args[0].layout.ty;
257 match int_type_width_signed(ty, bx.tcx()) {
258 Some((_width, signed)) => {
259 if signed {
260 bx.exactsdiv(args[0].immediate(), args[1].immediate())
261 } else {
262 bx.exactudiv(args[0].immediate(), args[1].immediate())
263 }
264 }
265 None => {
266 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
267 span,
268 name,
269 ty,
270 });
271 return Ok(());
272 }
273 }
274 }
275 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
276 match float_type_width(args[0].layout.ty) {
277 Some(_width) => match name {
278 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
279 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
280 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
281 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
282 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
283 _ => bug!(),
284 },
285 None => {
286 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
287 span,
288 name,
289 ty: args[0].layout.ty,
290 });
291 return Ok(());
292 }
293 }
294 }
295 sym::fadd_algebraic
296 | sym::fsub_algebraic
297 | sym::fmul_algebraic
298 | sym::fdiv_algebraic
299 | sym::frem_algebraic => match float_type_width(args[0].layout.ty) {
300 Some(_width) => match name {
301 sym::fadd_algebraic => {
302 bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
303 }
304 sym::fsub_algebraic => {
305 bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
306 }
307 sym::fmul_algebraic => {
308 bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
309 }
310 sym::fdiv_algebraic => {
311 bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
312 }
313 sym::frem_algebraic => {
314 bx.frem_algebraic(args[0].immediate(), args[1].immediate())
315 }
316 _ => bug!(),
317 },
318 None => {
319 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
320 span,
321 name,
322 ty: args[0].layout.ty,
323 });
324 return Ok(());
325 }
326 },
327
328 sym::float_to_int_unchecked => {
329 if float_type_width(args[0].layout.ty).is_none() {
330 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
331 span,
332 ty: args[0].layout.ty,
333 });
334 return Ok(());
335 }
336 let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx())
337 else {
338 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
339 span,
340 ty: result.layout.ty,
341 });
342 return Ok(());
343 };
344 if signed {
345 bx.fptosi(args[0].immediate(), bx.backend_type(result.layout))
346 } else {
347 bx.fptoui(args[0].immediate(), bx.backend_type(result.layout))
348 }
349 }
350
351 sym::atomic_load => {
352 let ty = fn_args.type_at(0);
353 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
354 invalid_monomorphization_int_type(ty);
355 return Ok(());
356 }
357 let ordering = fn_args.const_at(1).to_value();
358 let layout = bx.layout_of(ty);
359 let source = args[0].immediate();
360 bx.atomic_load(
361 bx.backend_type(layout),
362 source,
363 parse_atomic_ordering(ordering),
364 layout.size,
365 )
366 }
367 sym::atomic_store => {
368 let ty = fn_args.type_at(0);
369 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
370 invalid_monomorphization_int_type(ty);
371 return Ok(());
372 }
373 let ordering = fn_args.const_at(1).to_value();
374 let size = bx.layout_of(ty).size;
375 let val = args[1].immediate();
376 let ptr = args[0].immediate();
377 bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
378 return Ok(());
379 }
380 sym::atomic_cxchg | sym::atomic_cxchgweak => {
381 let ty = fn_args.type_at(0);
382 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
383 invalid_monomorphization_int_type(ty);
384 return Ok(());
385 }
386 let succ_ordering = fn_args.const_at(1).to_value();
387 let fail_ordering = fn_args.const_at(2).to_value();
388 let weak = name == sym::atomic_cxchgweak;
389 let dst = args[0].immediate();
390 let cmp = args[1].immediate();
391 let src = args[2].immediate();
392 let (val, success) = bx.atomic_cmpxchg(
393 dst,
394 cmp,
395 src,
396 parse_atomic_ordering(succ_ordering),
397 parse_atomic_ordering(fail_ordering),
398 weak,
399 );
400 let val = bx.from_immediate(val);
401 let success = bx.from_immediate(success);
402
403 let dest = result.project_field(bx, 0);
404 bx.store_to_place(val, dest.val);
405 let dest = result.project_field(bx, 1);
406 bx.store_to_place(success, dest.val);
407
408 return Ok(());
409 }
410 sym::atomic_max | sym::atomic_min => {
412 let atom_op = if name == sym::atomic_max {
413 AtomicRmwBinOp::AtomicMax
414 } else {
415 AtomicRmwBinOp::AtomicMin
416 };
417
418 let ty = fn_args.type_at(0);
419 if matches!(ty.kind(), ty::Int(_)) {
420 let ordering = fn_args.const_at(1).to_value();
421 let ptr = args[0].immediate();
422 let val = args[1].immediate();
423 bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
424 } else {
425 invalid_monomorphization_int_type(ty);
426 return Ok(());
427 }
428 }
429 sym::atomic_umax | sym::atomic_umin => {
430 let atom_op = if name == sym::atomic_umax {
431 AtomicRmwBinOp::AtomicUMax
432 } else {
433 AtomicRmwBinOp::AtomicUMin
434 };
435
436 let ty = fn_args.type_at(0);
437 if matches!(ty.kind(), ty::Uint(_)) {
438 let ordering = fn_args.const_at(1).to_value();
439 let ptr = args[0].immediate();
440 let val = args[1].immediate();
441 bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
442 } else {
443 invalid_monomorphization_int_type(ty);
444 return Ok(());
445 }
446 }
447 sym::atomic_xchg
448 | sym::atomic_xadd
449 | sym::atomic_xsub
450 | sym::atomic_and
451 | sym::atomic_nand
452 | sym::atomic_or
453 | sym::atomic_xor => {
454 let atom_op = match name {
455 sym::atomic_xchg => AtomicRmwBinOp::AtomicXchg,
456 sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
457 sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
458 sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
459 sym::atomic_nand => AtomicRmwBinOp::AtomicNand,
460 sym::atomic_or => AtomicRmwBinOp::AtomicOr,
461 sym::atomic_xor => AtomicRmwBinOp::AtomicXor,
462 _ => unreachable!(),
463 };
464
465 let ty = fn_args.type_at(0);
466 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
467 let ordering = fn_args.const_at(1).to_value();
468 let ptr = args[0].immediate();
469 let val = args[1].immediate();
470 bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
471 } else {
472 invalid_monomorphization_int_type(ty);
473 return Ok(());
474 }
475 }
476 sym::atomic_fence => {
477 let ordering = fn_args.const_at(0).to_value();
478 bx.atomic_fence(parse_atomic_ordering(ordering), SynchronizationScope::CrossThread);
479 return Ok(());
480 }
481
482 sym::atomic_singlethreadfence => {
483 let ordering = fn_args.const_at(0).to_value();
484 bx.atomic_fence(
485 parse_atomic_ordering(ordering),
486 SynchronizationScope::SingleThread,
487 );
488 return Ok(());
489 }
490
491 sym::nontemporal_store => {
492 let dst = args[0].deref(bx.cx());
493 args[1].val.nontemporal_store(bx, dst);
494 return Ok(());
495 }
496
497 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
498 let ty = fn_args.type_at(0);
499 let pointee_size = bx.layout_of(ty).size;
500
501 let a = args[0].immediate();
502 let b = args[1].immediate();
503 let a = bx.ptrtoint(a, bx.type_isize());
504 let b = bx.ptrtoint(b, bx.type_isize());
505 let pointee_size = bx.const_usize(pointee_size.bytes());
506 if name == sym::ptr_offset_from {
507 let d = bx.sub(a, b);
511 bx.exactsdiv(d, pointee_size)
513 } else {
514 let d = bx.unchecked_usub(a, b);
517 bx.exactudiv(d, pointee_size)
518 }
519 }
520
521 sym::cold_path => {
522 return Ok(());
524 }
525
526 _ => {
527 return bx.codegen_intrinsic_call(instance, args, result, span);
529 }
530 };
531
532 if result.layout.ty.is_bool() {
533 let val = bx.from_immediate(llval);
534 bx.store_to_place(val, result.val);
535 } else if !result.layout.ty.is_unit() {
536 bx.store_to_place(llval, result.val);
537 }
538 Ok(())
539 }
540}
541
542fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
547 match ty.kind() {
548 ty::Int(t) => {
549 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
550 }
551 ty::Uint(t) => {
552 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
553 }
554 _ => None,
555 }
556}
557
558fn float_type_width(ty: Ty<'_>) -> Option<u64> {
561 match ty.kind() {
562 ty::Float(t) => Some(t.bit_width()),
563 _ => None,
564 }
565}