rustc_ty_utils/
abi.rs

1use std::iter;
2
3use rustc_abi::Primitive::Pointer;
4use rustc_abi::{BackendRepr, ExternAbi, PointerKind, Scalar, Size};
5use rustc_hir as hir;
6use rustc_hir::lang_items::LangItem;
7use rustc_middle::bug;
8use rustc_middle::query::Providers;
9use rustc_middle::ty::layout::{
10    FnAbiError, HasTyCtxt, HasTypingEnv, LayoutCx, LayoutOf, TyAndLayout, fn_can_unwind,
11};
12use rustc_middle::ty::{self, InstanceKind, Ty, TyCtxt};
13use rustc_session::config::OptLevel;
14use rustc_span::DUMMY_SP;
15use rustc_span::def_id::DefId;
16use rustc_target::callconv::{
17    AbiMap, ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, FnAbi, PassMode,
18};
19use tracing::debug;
20
21pub(crate) fn provide(providers: &mut Providers) {
22    *providers = Providers { fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
23}
24
25// NOTE(eddyb) this is private to avoid using it from outside of
26// `fn_abi_of_instance` - any other uses are either too high-level
27// for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
28// or should go through `FnAbi` instead, to avoid losing any
29// adjustments `fn_abi_of_instance` might be performing.
30#[tracing::instrument(level = "debug", skip(tcx, typing_env))]
31fn fn_sig_for_fn_abi<'tcx>(
32    tcx: TyCtxt<'tcx>,
33    instance: ty::Instance<'tcx>,
34    typing_env: ty::TypingEnv<'tcx>,
35) -> ty::FnSig<'tcx> {
36    if let InstanceKind::ThreadLocalShim(..) = instance.def {
37        return tcx.mk_fn_sig(
38            [],
39            tcx.thread_local_ptr_ty(instance.def_id()),
40            false,
41            hir::Safety::Safe,
42            rustc_abi::ExternAbi::Unadjusted,
43        );
44    }
45
46    let ty = instance.ty(tcx, typing_env);
47    match *ty.kind() {
48        ty::FnDef(def_id, args) => {
49            let mut sig = tcx
50                .instantiate_bound_regions_with_erased(tcx.fn_sig(def_id).instantiate(tcx, args));
51
52            // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
53            if let ty::InstanceKind::VTableShim(..) = instance.def {
54                let mut inputs_and_output = sig.inputs_and_output.to_vec();
55                inputs_and_output[0] = Ty::new_mut_ptr(tcx, inputs_and_output[0]);
56                sig.inputs_and_output = tcx.mk_type_list(&inputs_and_output);
57            }
58
59            sig
60        }
61        ty::Closure(def_id, args) => {
62            let sig = tcx.instantiate_bound_regions_with_erased(args.as_closure().sig());
63            let env_ty = tcx.closure_env_ty(
64                Ty::new_closure(tcx, def_id, args),
65                args.as_closure().kind(),
66                tcx.lifetimes.re_erased,
67            );
68
69            tcx.mk_fn_sig(
70                iter::once(env_ty).chain(sig.inputs().iter().cloned()),
71                sig.output(),
72                sig.c_variadic,
73                sig.safety,
74                sig.abi,
75            )
76        }
77        ty::CoroutineClosure(def_id, args) => {
78            let coroutine_ty = Ty::new_coroutine_closure(tcx, def_id, args);
79            let sig = args.as_coroutine_closure().coroutine_closure_sig();
80
81            // When this `CoroutineClosure` comes from a `ConstructCoroutineInClosureShim`,
82            // make sure we respect the `target_kind` in that shim.
83            // FIXME(async_closures): This shouldn't be needed, and we should be populating
84            // a separate def-id for these bodies.
85            let mut coroutine_kind = args.as_coroutine_closure().kind();
86
87            let env_ty =
88                if let InstanceKind::ConstructCoroutineInClosureShim { receiver_by_ref, .. } =
89                    instance.def
90                {
91                    coroutine_kind = ty::ClosureKind::FnOnce;
92
93                    // Implementations of `FnMut` and `Fn` for coroutine-closures
94                    // still take their receiver by ref.
95                    if receiver_by_ref {
96                        Ty::new_imm_ref(tcx, tcx.lifetimes.re_erased, coroutine_ty)
97                    } else {
98                        coroutine_ty
99                    }
100                } else {
101                    tcx.closure_env_ty(coroutine_ty, coroutine_kind, tcx.lifetimes.re_erased)
102                };
103
104            let sig = tcx.instantiate_bound_regions_with_erased(sig);
105
106            tcx.mk_fn_sig(
107                iter::once(env_ty).chain([sig.tupled_inputs_ty]),
108                sig.to_coroutine_given_kind_and_upvars(
109                    tcx,
110                    args.as_coroutine_closure().parent_args(),
111                    tcx.coroutine_for_closure(def_id),
112                    coroutine_kind,
113                    tcx.lifetimes.re_erased,
114                    args.as_coroutine_closure().tupled_upvars_ty(),
115                    args.as_coroutine_closure().coroutine_captures_by_ref_ty(),
116                ),
117                sig.c_variadic,
118                sig.safety,
119                sig.abi,
120            )
121        }
122        ty::Coroutine(did, args) => {
123            let coroutine_kind = tcx.coroutine_kind(did).unwrap();
124            let sig = args.as_coroutine().sig();
125
126            let env_ty = Ty::new_mut_ref(tcx, tcx.lifetimes.re_erased, ty);
127
128            let pin_did = tcx.require_lang_item(LangItem::Pin, DUMMY_SP);
129            let pin_adt_ref = tcx.adt_def(pin_did);
130            let pin_args = tcx.mk_args(&[env_ty.into()]);
131            let env_ty = match coroutine_kind {
132                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Gen, _) => {
133                    // Iterator::next doesn't accept a pinned argument,
134                    // unlike for all other coroutine kinds.
135                    env_ty
136                }
137                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Async, _)
138                | hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::AsyncGen, _)
139                | hir::CoroutineKind::Coroutine(_) => Ty::new_adt(tcx, pin_adt_ref, pin_args),
140            };
141
142            // The `FnSig` and the `ret_ty` here is for a coroutines main
143            // `Coroutine::resume(...) -> CoroutineState` function in case we
144            // have an ordinary coroutine, the `Future::poll(...) -> Poll`
145            // function in case this is a special coroutine backing an async construct
146            // or the `Iterator::next(...) -> Option` function in case this is a
147            // special coroutine backing a gen construct.
148            let (resume_ty, ret_ty) = match coroutine_kind {
149                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Async, _) => {
150                    // The signature should be `Future::poll(_, &mut Context<'_>) -> Poll<Output>`
151                    assert_eq!(sig.yield_ty, tcx.types.unit);
152
153                    let poll_did = tcx.require_lang_item(LangItem::Poll, DUMMY_SP);
154                    let poll_adt_ref = tcx.adt_def(poll_did);
155                    let poll_args = tcx.mk_args(&[sig.return_ty.into()]);
156                    let ret_ty = Ty::new_adt(tcx, poll_adt_ref, poll_args);
157
158                    // We have to replace the `ResumeTy` that is used for type and borrow checking
159                    // with `&mut Context<'_>` which is used in codegen.
160                    #[cfg(debug_assertions)]
161                    {
162                        if let ty::Adt(resume_ty_adt, _) = sig.resume_ty.kind() {
163                            let expected_adt =
164                                tcx.adt_def(tcx.require_lang_item(LangItem::ResumeTy, DUMMY_SP));
165                            assert_eq!(*resume_ty_adt, expected_adt);
166                        } else {
167                            panic!("expected `ResumeTy`, found `{:?}`", sig.resume_ty);
168                        };
169                    }
170                    let context_mut_ref = Ty::new_task_context(tcx);
171
172                    (Some(context_mut_ref), ret_ty)
173                }
174                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Gen, _) => {
175                    // The signature should be `Iterator::next(_) -> Option<Yield>`
176                    let option_did = tcx.require_lang_item(LangItem::Option, DUMMY_SP);
177                    let option_adt_ref = tcx.adt_def(option_did);
178                    let option_args = tcx.mk_args(&[sig.yield_ty.into()]);
179                    let ret_ty = Ty::new_adt(tcx, option_adt_ref, option_args);
180
181                    assert_eq!(sig.return_ty, tcx.types.unit);
182                    assert_eq!(sig.resume_ty, tcx.types.unit);
183
184                    (None, ret_ty)
185                }
186                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::AsyncGen, _) => {
187                    // The signature should be
188                    // `AsyncIterator::poll_next(_, &mut Context<'_>) -> Poll<Option<Output>>`
189                    assert_eq!(sig.return_ty, tcx.types.unit);
190
191                    // Yield type is already `Poll<Option<yield_ty>>`
192                    let ret_ty = sig.yield_ty;
193
194                    // We have to replace the `ResumeTy` that is used for type and borrow checking
195                    // with `&mut Context<'_>` which is used in codegen.
196                    #[cfg(debug_assertions)]
197                    {
198                        if let ty::Adt(resume_ty_adt, _) = sig.resume_ty.kind() {
199                            let expected_adt =
200                                tcx.adt_def(tcx.require_lang_item(LangItem::ResumeTy, DUMMY_SP));
201                            assert_eq!(*resume_ty_adt, expected_adt);
202                        } else {
203                            panic!("expected `ResumeTy`, found `{:?}`", sig.resume_ty);
204                        };
205                    }
206                    let context_mut_ref = Ty::new_task_context(tcx);
207
208                    (Some(context_mut_ref), ret_ty)
209                }
210                hir::CoroutineKind::Coroutine(_) => {
211                    // The signature should be `Coroutine::resume(_, Resume) -> CoroutineState<Yield, Return>`
212                    let state_did = tcx.require_lang_item(LangItem::CoroutineState, DUMMY_SP);
213                    let state_adt_ref = tcx.adt_def(state_did);
214                    let state_args = tcx.mk_args(&[sig.yield_ty.into(), sig.return_ty.into()]);
215                    let ret_ty = Ty::new_adt(tcx, state_adt_ref, state_args);
216
217                    (Some(sig.resume_ty), ret_ty)
218                }
219            };
220
221            if let Some(resume_ty) = resume_ty {
222                tcx.mk_fn_sig(
223                    [env_ty, resume_ty],
224                    ret_ty,
225                    false,
226                    hir::Safety::Safe,
227                    rustc_abi::ExternAbi::Rust,
228                )
229            } else {
230                // `Iterator::next` doesn't have a `resume` argument.
231                tcx.mk_fn_sig(
232                    [env_ty],
233                    ret_ty,
234                    false,
235                    hir::Safety::Safe,
236                    rustc_abi::ExternAbi::Rust,
237                )
238            }
239        }
240        _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
241    }
242}
243
244fn fn_abi_of_fn_ptr<'tcx>(
245    tcx: TyCtxt<'tcx>,
246    query: ty::PseudoCanonicalInput<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
247) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> {
248    let ty::PseudoCanonicalInput { typing_env, value: (sig, extra_args) } = query;
249    fn_abi_new_uncached(
250        &LayoutCx::new(tcx, typing_env),
251        tcx.instantiate_bound_regions_with_erased(sig),
252        extra_args,
253        None,
254    )
255}
256
257fn fn_abi_of_instance<'tcx>(
258    tcx: TyCtxt<'tcx>,
259    query: ty::PseudoCanonicalInput<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
260) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> {
261    let ty::PseudoCanonicalInput { typing_env, value: (instance, extra_args) } = query;
262    fn_abi_new_uncached(
263        &LayoutCx::new(tcx, typing_env),
264        fn_sig_for_fn_abi(tcx, instance, typing_env),
265        extra_args,
266        Some(instance),
267    )
268}
269
270// Handle safe Rust thin and wide pointers.
271fn adjust_for_rust_scalar<'tcx>(
272    cx: LayoutCx<'tcx>,
273    attrs: &mut ArgAttributes,
274    scalar: Scalar,
275    layout: TyAndLayout<'tcx>,
276    offset: Size,
277    is_return: bool,
278    drop_target_pointee: Option<Ty<'tcx>>,
279) {
280    // Booleans are always a noundef i1 that needs to be zero-extended.
281    if scalar.is_bool() {
282        attrs.ext(ArgExtension::Zext);
283        attrs.set(ArgAttribute::NoUndef);
284        return;
285    }
286
287    if !scalar.is_uninit_valid() {
288        attrs.set(ArgAttribute::NoUndef);
289    }
290
291    // Only pointer types handled below.
292    let Scalar::Initialized { value: Pointer(_), valid_range } = scalar else { return };
293
294    // Set `nonnull` if the validity range excludes zero, or for the argument to `drop_in_place`,
295    // which must be nonnull per its documented safety requirements.
296    if !valid_range.contains(0) || drop_target_pointee.is_some() {
297        attrs.set(ArgAttribute::NonNull);
298    }
299
300    let tcx = cx.tcx();
301
302    if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
303        let kind = if let Some(kind) = pointee.safe {
304            Some(kind)
305        } else if let Some(pointee) = drop_target_pointee {
306            // The argument to `drop_in_place` is semantically equivalent to a mutable reference.
307            Some(PointerKind::MutableRef { unpin: pointee.is_unpin(tcx, cx.typing_env) })
308        } else {
309            None
310        };
311        if let Some(kind) = kind {
312            attrs.pointee_align =
313                Some(pointee.align.min(cx.tcx().sess.target.max_reliable_alignment()));
314
315            // `Box` are not necessarily dereferenceable for the entire duration of the function as
316            // they can be deallocated at any time. Same for non-frozen shared references (see
317            // <https://github.com/rust-lang/rust/pull/98017>), and for mutable references to
318            // potentially self-referential types (see
319            // <https://github.com/rust-lang/unsafe-code-guidelines/issues/381>). If LLVM had a way
320            // to say "dereferenceable on entry" we could use it here.
321            attrs.pointee_size = match kind {
322                PointerKind::Box { .. }
323                | PointerKind::SharedRef { frozen: false }
324                | PointerKind::MutableRef { unpin: false } => Size::ZERO,
325                PointerKind::SharedRef { frozen: true }
326                | PointerKind::MutableRef { unpin: true } => pointee.size,
327            };
328
329            // The aliasing rules for `Box<T>` are still not decided, but currently we emit
330            // `noalias` for it. This can be turned off using an unstable flag.
331            // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
332            let noalias_for_box = tcx.sess.opts.unstable_opts.box_noalias;
333
334            // LLVM prior to version 12 had known miscompiles in the presence of noalias attributes
335            // (see #54878), so it was conditionally disabled, but we don't support earlier
336            // versions at all anymore. We still support turning it off using -Zmutable-noalias.
337            let noalias_mut_ref = tcx.sess.opts.unstable_opts.mutable_noalias;
338
339            // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as both
340            // `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely on memory
341            // dependencies rather than pointer equality. However this only applies to arguments,
342            // not return values.
343            //
344            // `&mut T` and `Box<T>` where `T: Unpin` are unique and hence `noalias`.
345            let no_alias = match kind {
346                PointerKind::SharedRef { frozen } => frozen,
347                PointerKind::MutableRef { unpin } => unpin && noalias_mut_ref,
348                PointerKind::Box { unpin, global } => unpin && global && noalias_for_box,
349            };
350            // We can never add `noalias` in return position; that LLVM attribute has some very surprising semantics
351            // (see <https://github.com/rust-lang/unsafe-code-guidelines/issues/385#issuecomment-1368055745>).
352            if no_alias && !is_return {
353                attrs.set(ArgAttribute::NoAlias);
354            }
355
356            if matches!(kind, PointerKind::SharedRef { frozen: true }) && !is_return {
357                attrs.set(ArgAttribute::ReadOnly);
358            }
359        }
360    }
361}
362
363/// Ensure that the ABI makes basic sense.
364fn fn_abi_sanity_check<'tcx>(
365    cx: &LayoutCx<'tcx>,
366    fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
367    spec_abi: ExternAbi,
368) {
369    fn fn_arg_sanity_check<'tcx>(
370        cx: &LayoutCx<'tcx>,
371        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
372        spec_abi: ExternAbi,
373        arg: &ArgAbi<'tcx, Ty<'tcx>>,
374    ) {
375        let tcx = cx.tcx();
376
377        if spec_abi.is_rustic_abi() {
378            if arg.layout.is_zst() {
379                // Casting closures to function pointers depends on ZST closure types being
380                // omitted entirely in the calling convention.
381                assert!(arg.is_ignore());
382            }
383            if let PassMode::Indirect { on_stack, .. } = arg.mode {
384                assert!(!on_stack, "rust abi shouldn't use on_stack");
385            }
386        }
387
388        match &arg.mode {
389            PassMode::Ignore => {
390                assert!(arg.layout.is_zst());
391            }
392            PassMode::Direct(_) => {
393                // Here the Rust type is used to determine the actual ABI, so we have to be very
394                // careful. Scalar/Vector is fine, since backends will generally use
395                // `layout.backend_repr` and ignore everything else. We should just reject
396                //`Aggregate` entirely here, but some targets need to be fixed first.
397                match arg.layout.backend_repr {
398                    BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => {}
399                    BackendRepr::ScalarPair(..) => {
400                        panic!("`PassMode::Direct` used for ScalarPair type {}", arg.layout.ty)
401                    }
402                    BackendRepr::Memory { sized } => {
403                        // For an unsized type we'd only pass the sized prefix, so there is no universe
404                        // in which we ever want to allow this.
405                        assert!(sized, "`PassMode::Direct` for unsized type in ABI: {:#?}", fn_abi);
406
407                        // This really shouldn't happen even for sized aggregates, since
408                        // `immediate_llvm_type` will use `layout.fields` to turn this Rust type into an
409                        // LLVM type. This means all sorts of Rust type details leak into the ABI.
410                        // The unadjusted ABI however uses Direct for all args. It is ill-specified,
411                        // but unfortunately we need it for calling certain LLVM intrinsics.
412                        assert!(
413                            matches!(spec_abi, ExternAbi::Unadjusted),
414                            "`PassMode::Direct` for aggregates only allowed for \"unadjusted\"\n\
415                             Problematic type: {:#?}",
416                            arg.layout,
417                        );
418                    }
419                }
420            }
421            PassMode::Pair(_, _) => {
422                // Similar to `Direct`, we need to make sure that backends use `layout.backend_repr`
423                // and ignore the rest of the layout.
424                assert!(
425                    matches!(arg.layout.backend_repr, BackendRepr::ScalarPair(..)),
426                    "PassMode::Pair for type {}",
427                    arg.layout.ty
428                );
429            }
430            PassMode::Cast { .. } => {
431                // `Cast` means "transmute to `CastType`"; that only makes sense for sized types.
432                assert!(arg.layout.is_sized());
433            }
434            PassMode::Indirect { meta_attrs: None, .. } => {
435                // No metadata, must be sized.
436                // Conceptually, unsized arguments must be copied around, which requires dynamically
437                // determining their size, which we cannot do without metadata. Consult
438                // t-opsem before removing this check.
439                assert!(arg.layout.is_sized());
440            }
441            PassMode::Indirect { meta_attrs: Some(_), on_stack, .. } => {
442                // With metadata. Must be unsized and not on the stack.
443                assert!(arg.layout.is_unsized() && !on_stack);
444                // Also, must not be `extern` type.
445                let tail = tcx.struct_tail_for_codegen(arg.layout.ty, cx.typing_env);
446                if matches!(tail.kind(), ty::Foreign(..)) {
447                    // These types do not have metadata, so having `meta_attrs` is bogus.
448                    // Conceptually, unsized arguments must be copied around, which requires dynamically
449                    // determining their size. Therefore, we cannot allow `extern` types here. Consult
450                    // t-opsem before removing this check.
451                    panic!("unsized arguments must not be `extern` types");
452                }
453            }
454        }
455    }
456
457    for arg in fn_abi.args.iter() {
458        fn_arg_sanity_check(cx, fn_abi, spec_abi, arg);
459    }
460    fn_arg_sanity_check(cx, fn_abi, spec_abi, &fn_abi.ret);
461}
462
463#[tracing::instrument(level = "debug", skip(cx, instance))]
464fn fn_abi_new_uncached<'tcx>(
465    cx: &LayoutCx<'tcx>,
466    sig: ty::FnSig<'tcx>,
467    extra_args: &[Ty<'tcx>],
468    instance: Option<ty::Instance<'tcx>>,
469) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> {
470    let tcx = cx.tcx();
471    let (caller_location, determined_fn_def_id, is_virtual_call) = if let Some(instance) = instance
472    {
473        let is_virtual_call = matches!(instance.def, ty::InstanceKind::Virtual(..));
474        (
475            instance.def.requires_caller_location(tcx).then(|| tcx.caller_location_ty()),
476            if is_virtual_call { None } else { Some(instance.def_id()) },
477            is_virtual_call,
478        )
479    } else {
480        (None, None, false)
481    };
482    let sig = tcx.normalize_erasing_regions(cx.typing_env, sig);
483
484    let abi_map = AbiMap::from_target(&tcx.sess.target);
485    let conv = abi_map.canonize_abi(sig.abi, sig.c_variadic).unwrap();
486
487    let mut inputs = sig.inputs();
488    let extra_args = if sig.abi == ExternAbi::RustCall {
489        assert!(!sig.c_variadic && extra_args.is_empty());
490
491        if let Some(input) = sig.inputs().last()
492            && let ty::Tuple(tupled_arguments) = input.kind()
493        {
494            inputs = &sig.inputs()[0..sig.inputs().len() - 1];
495            tupled_arguments
496        } else {
497            bug!(
498                "argument to function with \"rust-call\" ABI \
499                    is not a tuple"
500            );
501        }
502    } else {
503        assert!(sig.c_variadic || extra_args.is_empty());
504        extra_args
505    };
506
507    let is_drop_in_place = determined_fn_def_id.is_some_and(|def_id| {
508        tcx.is_lang_item(def_id, LangItem::DropInPlace)
509            || tcx.is_lang_item(def_id, LangItem::AsyncDropInPlace)
510    });
511
512    let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, &'tcx FnAbiError<'tcx>> {
513        let span = tracing::debug_span!("arg_of");
514        let _entered = span.enter();
515        let is_return = arg_idx.is_none();
516        let is_drop_target = is_drop_in_place && arg_idx == Some(0);
517        let drop_target_pointee = is_drop_target.then(|| match ty.kind() {
518            ty::RawPtr(ty, _) => *ty,
519            _ => bug!("argument to drop_in_place is not a raw ptr: {:?}", ty),
520        });
521
522        let layout = cx.layout_of(ty).map_err(|err| &*tcx.arena.alloc(FnAbiError::Layout(*err)))?;
523        let layout = if is_virtual_call && arg_idx == Some(0) {
524            // Don't pass the vtable, it's not an argument of the virtual fn.
525            // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
526            // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
527            make_thin_self_ptr(cx, layout)
528        } else {
529            layout
530        };
531
532        let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
533            let mut attrs = ArgAttributes::new();
534            adjust_for_rust_scalar(
535                *cx,
536                &mut attrs,
537                scalar,
538                *layout,
539                offset,
540                is_return,
541                drop_target_pointee,
542            );
543            attrs
544        });
545
546        if arg.layout.is_zst() {
547            arg.mode = PassMode::Ignore;
548        }
549
550        Ok(arg)
551    };
552
553    let mut fn_abi = FnAbi {
554        ret: arg_of(sig.output(), None)?,
555        args: inputs
556            .iter()
557            .copied()
558            .chain(extra_args.iter().copied())
559            .chain(caller_location)
560            .enumerate()
561            .map(|(i, ty)| arg_of(ty, Some(i)))
562            .collect::<Result<_, _>>()?,
563        c_variadic: sig.c_variadic,
564        fixed_count: inputs.len() as u32,
565        conv,
566        can_unwind: fn_can_unwind(
567            tcx,
568            // Since `#[rustc_nounwind]` can change unwinding, we cannot infer unwinding by `fn_def_id` for a virtual call.
569            determined_fn_def_id,
570            sig.abi,
571        ),
572    };
573    fn_abi_adjust_for_abi(
574        cx,
575        &mut fn_abi,
576        sig.abi,
577        // If this is a virtual call, we cannot pass the `fn_def_id`, as it might call other
578        // functions from vtable. Internally, `deduced_param_attrs` attempts to infer attributes by
579        // visit the function body.
580        determined_fn_def_id,
581    );
582    debug!("fn_abi_new_uncached = {:?}", fn_abi);
583    fn_abi_sanity_check(cx, &fn_abi, sig.abi);
584    Ok(tcx.arena.alloc(fn_abi))
585}
586
587#[tracing::instrument(level = "trace", skip(cx))]
588fn fn_abi_adjust_for_abi<'tcx>(
589    cx: &LayoutCx<'tcx>,
590    fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
591    abi: ExternAbi,
592    fn_def_id: Option<DefId>,
593) {
594    if abi == ExternAbi::Unadjusted {
595        // The "unadjusted" ABI passes aggregates in "direct" mode. That's fragile but needed for
596        // some LLVM intrinsics.
597        fn unadjust<'tcx>(arg: &mut ArgAbi<'tcx, Ty<'tcx>>) {
598            // This still uses `PassMode::Pair` for ScalarPair types. That's unlikely to be intended,
599            // but who knows what breaks if we change this now.
600            if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) {
601                assert!(
602                    arg.layout.backend_repr.is_sized(),
603                    "'unadjusted' ABI does not support unsized arguments"
604                );
605            }
606            arg.make_direct_deprecated();
607        }
608
609        unadjust(&mut fn_abi.ret);
610        for arg in fn_abi.args.iter_mut() {
611            unadjust(arg);
612        }
613        return;
614    }
615
616    let tcx = cx.tcx();
617
618    if abi.is_rustic_abi() {
619        fn_abi.adjust_for_rust_abi(cx);
620
621        // Look up the deduced parameter attributes for this function, if we have its def ID and
622        // we're optimizing in non-incremental mode. We'll tag its parameters with those attributes
623        // as appropriate.
624        let deduced_param_attrs =
625            if tcx.sess.opts.optimize != OptLevel::No && tcx.sess.opts.incremental.is_none() {
626                fn_def_id.map(|fn_def_id| tcx.deduced_param_attrs(fn_def_id)).unwrap_or_default()
627            } else {
628                &[]
629            };
630
631        for (arg_idx, arg) in fn_abi.args.iter_mut().enumerate() {
632            if arg.is_ignore() {
633                continue;
634            }
635
636            // If we deduced that this parameter was read-only, add that to the attribute list now.
637            //
638            // The `readonly` parameter only applies to pointers, so we can only do this if the
639            // argument was passed indirectly. (If the argument is passed directly, it's an SSA
640            // value, so it's implicitly immutable.)
641            if let &mut PassMode::Indirect { ref mut attrs, .. } = &mut arg.mode {
642                // The `deduced_param_attrs` list could be empty if this is a type of function
643                // we can't deduce any parameters for, so make sure the argument index is in
644                // bounds.
645                if let Some(deduced_param_attrs) = deduced_param_attrs.get(arg_idx) {
646                    if deduced_param_attrs.read_only {
647                        attrs.regular.insert(ArgAttribute::ReadOnly);
648                        debug!("added deduced read-only attribute");
649                    }
650                }
651            }
652        }
653    } else {
654        fn_abi.adjust_for_foreign_abi(cx, abi);
655    }
656}
657
658#[tracing::instrument(level = "debug", skip(cx))]
659fn make_thin_self_ptr<'tcx>(
660    cx: &(impl HasTyCtxt<'tcx> + HasTypingEnv<'tcx>),
661    layout: TyAndLayout<'tcx>,
662) -> TyAndLayout<'tcx> {
663    let tcx = cx.tcx();
664    let wide_pointer_ty = if layout.is_unsized() {
665        // unsized `self` is passed as a pointer to `self`
666        // FIXME (mikeyhew) change this to use &own if it is ever added to the language
667        Ty::new_mut_ptr(tcx, layout.ty)
668    } else {
669        match layout.backend_repr {
670            BackendRepr::ScalarPair(..) | BackendRepr::Scalar(..) => (),
671            _ => bug!("receiver type has unsupported layout: {:?}", layout),
672        }
673
674        // In the case of Rc<Self>, we need to explicitly pass a *mut RcInner<Self>
675        // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
676        // elsewhere in the compiler as a method on a `dyn Trait`.
677        // To get the type `*mut RcInner<Self>`, we just keep unwrapping newtypes until we
678        // get a built-in pointer type
679        let mut wide_pointer_layout = layout;
680        while !wide_pointer_layout.ty.is_raw_ptr() && !wide_pointer_layout.ty.is_ref() {
681            wide_pointer_layout = wide_pointer_layout
682                .non_1zst_field(cx)
683                .expect("not exactly one non-1-ZST field in a `DispatchFromDyn` type")
684                .1
685        }
686
687        wide_pointer_layout.ty
688    };
689
690    // we now have a type like `*mut RcInner<dyn Trait>`
691    // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
692    // this is understood as a special case elsewhere in the compiler
693    let unit_ptr_ty = Ty::new_mut_ptr(tcx, tcx.types.unit);
694
695    TyAndLayout {
696        ty: wide_pointer_ty,
697
698        // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
699        // should always work because the type is always `*mut ()`.
700        ..tcx.layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(unit_ptr_ty)).unwrap()
701    }
702}