rustc_target/callconv/mod.rs
1use std::fmt::Display;
2use std::str::FromStr;
3use std::{fmt, iter};
4
5use rustc_abi::{
6 AddressSpace, Align, BackendRepr, ExternAbi, HasDataLayout, Primitive, Reg, RegKind, Scalar,
7 Size, TyAbiInterface, TyAndLayout,
8};
9use rustc_macros::HashStable_Generic;
10
11use crate::spec::{HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, RustcAbi, WasmCAbi};
12
13mod aarch64;
14mod amdgpu;
15mod arm;
16mod avr;
17mod bpf;
18mod csky;
19mod hexagon;
20mod loongarch;
21mod m68k;
22mod mips;
23mod mips64;
24mod msp430;
25mod nvptx64;
26mod powerpc;
27mod powerpc64;
28mod riscv;
29mod s390x;
30mod sparc;
31mod sparc64;
32mod wasm;
33mod x86;
34mod x86_64;
35mod x86_win32;
36mod x86_win64;
37mod xtensa;
38
39#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
40pub enum PassMode {
41 /// Ignore the argument.
42 ///
43 /// The argument is a ZST.
44 Ignore,
45 /// Pass the argument directly.
46 ///
47 /// The argument has a layout abi of `Scalar` or `Vector`.
48 /// Unfortunately due to past mistakes, in rare cases on wasm, it can also be `Aggregate`.
49 /// This is bad since it leaks LLVM implementation details into the ABI.
50 /// (Also see <https://github.com/rust-lang/rust/issues/115666>.)
51 Direct(ArgAttributes),
52 /// Pass a pair's elements directly in two arguments.
53 ///
54 /// The argument has a layout abi of `ScalarPair`.
55 Pair(ArgAttributes, ArgAttributes),
56 /// Pass the argument after casting it. See the `CastTarget` docs for details.
57 ///
58 /// `pad_i32` indicates if a `Reg::i32()` dummy argument is emitted before the real argument.
59 Cast { pad_i32: bool, cast: Box<CastTarget> },
60 /// Pass the argument indirectly via a hidden pointer.
61 ///
62 /// The `meta_attrs` value, if any, is for the metadata (vtable or length) of an unsized
63 /// argument. (This is the only mode that supports unsized arguments.)
64 ///
65 /// `on_stack` defines that the value should be passed at a fixed stack offset in accordance to
66 /// the ABI rather than passed using a pointer. This corresponds to the `byval` LLVM argument
67 /// attribute. The `byval` argument will use a byte array with the same size as the Rust type
68 /// (which ensures that padding is preserved and that we do not rely on LLVM's struct layout),
69 /// and will use the alignment specified in `attrs.pointee_align` (if `Some`) or the type's
70 /// alignment (if `None`). This means that the alignment will not always
71 /// match the Rust type's alignment; see documentation of `pass_by_stack_offset` for more info.
72 ///
73 /// `on_stack` cannot be true for unsized arguments, i.e., when `meta_attrs` is `Some`.
74 Indirect { attrs: ArgAttributes, meta_attrs: Option<ArgAttributes>, on_stack: bool },
75}
76
77impl PassMode {
78 /// Checks if these two `PassMode` are equal enough to be considered "the same for all
79 /// function call ABIs". However, the `Layout` can also impact ABI decisions,
80 /// so that needs to be compared as well!
81 pub fn eq_abi(&self, other: &Self) -> bool {
82 match (self, other) {
83 (PassMode::Ignore, PassMode::Ignore) => true,
84 (PassMode::Direct(a1), PassMode::Direct(a2)) => a1.eq_abi(a2),
85 (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => a1.eq_abi(a2) && b1.eq_abi(b2),
86 (
87 PassMode::Cast { cast: c1, pad_i32: pad1 },
88 PassMode::Cast { cast: c2, pad_i32: pad2 },
89 ) => c1.eq_abi(c2) && pad1 == pad2,
90 (
91 PassMode::Indirect { attrs: a1, meta_attrs: None, on_stack: s1 },
92 PassMode::Indirect { attrs: a2, meta_attrs: None, on_stack: s2 },
93 ) => a1.eq_abi(a2) && s1 == s2,
94 (
95 PassMode::Indirect { attrs: a1, meta_attrs: Some(e1), on_stack: s1 },
96 PassMode::Indirect { attrs: a2, meta_attrs: Some(e2), on_stack: s2 },
97 ) => a1.eq_abi(a2) && e1.eq_abi(e2) && s1 == s2,
98 _ => false,
99 }
100 }
101}
102
103// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
104// of this module
105pub use attr_impl::ArgAttribute;
106
107#[allow(non_upper_case_globals)]
108#[allow(unused)]
109mod attr_impl {
110 use rustc_macros::HashStable_Generic;
111
112 // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
113 #[derive(Clone, Copy, Default, Hash, PartialEq, Eq, HashStable_Generic)]
114 pub struct ArgAttribute(u8);
115 bitflags::bitflags! {
116 impl ArgAttribute: u8 {
117 const NoAlias = 1 << 1;
118 const NoCapture = 1 << 2;
119 const NonNull = 1 << 3;
120 const ReadOnly = 1 << 4;
121 const InReg = 1 << 5;
122 const NoUndef = 1 << 6;
123 }
124 }
125 rustc_data_structures::external_bitflags_debug! { ArgAttribute }
126}
127
128/// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum
129/// defines if this extension should be zero-extension or sign-extension when necessary. When it is
130/// not necessary to extend the argument, this enum is ignored.
131#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
132pub enum ArgExtension {
133 None,
134 Zext,
135 Sext,
136}
137
138/// A compact representation of LLVM attributes (at least those relevant for this module)
139/// that can be manipulated without interacting with LLVM's Attribute machinery.
140#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
141pub struct ArgAttributes {
142 pub regular: ArgAttribute,
143 pub arg_ext: ArgExtension,
144 /// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
145 /// (corresponding to LLVM's dereferenceable_or_null attributes, i.e., it is okay for this to be
146 /// set on a null pointer, but all non-null pointers must be dereferenceable).
147 pub pointee_size: Size,
148 /// The minimum alignment of the pointee, if any.
149 pub pointee_align: Option<Align>,
150}
151
152impl ArgAttributes {
153 pub fn new() -> Self {
154 ArgAttributes {
155 regular: ArgAttribute::default(),
156 arg_ext: ArgExtension::None,
157 pointee_size: Size::ZERO,
158 pointee_align: None,
159 }
160 }
161
162 pub fn ext(&mut self, ext: ArgExtension) -> &mut Self {
163 assert!(
164 self.arg_ext == ArgExtension::None || self.arg_ext == ext,
165 "cannot set {:?} when {:?} is already set",
166 ext,
167 self.arg_ext
168 );
169 self.arg_ext = ext;
170 self
171 }
172
173 pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
174 self.regular |= attr;
175 self
176 }
177
178 pub fn contains(&self, attr: ArgAttribute) -> bool {
179 self.regular.contains(attr)
180 }
181
182 /// Checks if these two `ArgAttributes` are equal enough to be considered "the same for all
183 /// function call ABIs".
184 pub fn eq_abi(&self, other: &Self) -> bool {
185 // There's only one regular attribute that matters for the call ABI: InReg.
186 // Everything else is things like noalias, dereferenceable, nonnull, ...
187 // (This also applies to pointee_size, pointee_align.)
188 if self.regular.contains(ArgAttribute::InReg) != other.regular.contains(ArgAttribute::InReg)
189 {
190 return false;
191 }
192 // We also compare the sign extension mode -- this could let the callee make assumptions
193 // about bits that conceptually were not even passed.
194 if self.arg_ext != other.arg_ext {
195 return false;
196 }
197 true
198 }
199}
200
201/// An argument passed entirely registers with the
202/// same kind (e.g., HFA / HVA on PPC64 and AArch64).
203#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
204pub struct Uniform {
205 pub unit: Reg,
206
207 /// The total size of the argument, which can be:
208 /// * equal to `unit.size` (one scalar/vector),
209 /// * a multiple of `unit.size` (an array of scalar/vectors),
210 /// * if `unit.kind` is `Integer`, the last element can be shorter, i.e., `{ i64, i64, i32 }`
211 /// for 64-bit integers with a total size of 20 bytes. When the argument is actually passed,
212 /// this size will be rounded up to the nearest multiple of `unit.size`.
213 pub total: Size,
214
215 /// Indicate that the argument is consecutive, in the sense that either all values need to be
216 /// passed in register, or all on the stack. If they are passed on the stack, there should be
217 /// no additional padding between elements.
218 pub is_consecutive: bool,
219}
220
221impl From<Reg> for Uniform {
222 fn from(unit: Reg) -> Uniform {
223 Uniform { unit, total: unit.size, is_consecutive: false }
224 }
225}
226
227impl Uniform {
228 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
229 self.unit.align(cx)
230 }
231
232 /// Pass using one or more values of the given type, without requiring them to be consecutive.
233 /// That is, some values may be passed in register and some on the stack.
234 pub fn new(unit: Reg, total: Size) -> Self {
235 Uniform { unit, total, is_consecutive: false }
236 }
237
238 /// Pass using one or more consecutive values of the given type. Either all values will be
239 /// passed in registers, or all on the stack.
240 pub fn consecutive(unit: Reg, total: Size) -> Self {
241 Uniform { unit, total, is_consecutive: true }
242 }
243}
244
245/// Describes the type used for `PassMode::Cast`.
246///
247/// Passing arguments in this mode works as follows: the registers in the `prefix` (the ones that
248/// are `Some`) get laid out one after the other (using `repr(C)` layout rules). Then the
249/// `rest.unit` register type gets repeated often enough to cover `rest.size`. This describes the
250/// actual type used for the call; the Rust type of the argument is then transmuted to this ABI type
251/// (and all data in the padding between the registers is dropped).
252#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
253pub struct CastTarget {
254 pub prefix: [Option<Reg>; 8],
255 pub rest: Uniform,
256 pub attrs: ArgAttributes,
257}
258
259impl From<Reg> for CastTarget {
260 fn from(unit: Reg) -> CastTarget {
261 CastTarget::from(Uniform::from(unit))
262 }
263}
264
265impl From<Uniform> for CastTarget {
266 fn from(uniform: Uniform) -> CastTarget {
267 CastTarget {
268 prefix: [None; 8],
269 rest: uniform,
270 attrs: ArgAttributes {
271 regular: ArgAttribute::default(),
272 arg_ext: ArgExtension::None,
273 pointee_size: Size::ZERO,
274 pointee_align: None,
275 },
276 }
277 }
278}
279
280impl CastTarget {
281 pub fn pair(a: Reg, b: Reg) -> CastTarget {
282 CastTarget {
283 prefix: [Some(a), None, None, None, None, None, None, None],
284 rest: Uniform::from(b),
285 attrs: ArgAttributes {
286 regular: ArgAttribute::default(),
287 arg_ext: ArgExtension::None,
288 pointee_size: Size::ZERO,
289 pointee_align: None,
290 },
291 }
292 }
293
294 /// When you only access the range containing valid data, you can use this unaligned size;
295 /// otherwise, use the safer `size` method.
296 pub fn unaligned_size<C: HasDataLayout>(&self, _cx: &C) -> Size {
297 // Prefix arguments are passed in specific designated registers
298 let prefix_size = self
299 .prefix
300 .iter()
301 .filter_map(|x| x.map(|reg| reg.size))
302 .fold(Size::ZERO, |acc, size| acc + size);
303 // Remaining arguments are passed in chunks of the unit size
304 let rest_size =
305 self.rest.unit.size * self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes());
306
307 prefix_size + rest_size
308 }
309
310 pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
311 self.unaligned_size(cx).align_to(self.align(cx))
312 }
313
314 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
315 self.prefix
316 .iter()
317 .filter_map(|x| x.map(|reg| reg.align(cx)))
318 .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| {
319 acc.max(align)
320 })
321 }
322
323 /// Checks if these two `CastTarget` are equal enough to be considered "the same for all
324 /// function call ABIs".
325 pub fn eq_abi(&self, other: &Self) -> bool {
326 let CastTarget { prefix: prefix_l, rest: rest_l, attrs: attrs_l } = self;
327 let CastTarget { prefix: prefix_r, rest: rest_r, attrs: attrs_r } = other;
328 prefix_l == prefix_r && rest_l == rest_r && attrs_l.eq_abi(attrs_r)
329 }
330}
331
332/// Information about how to pass an argument to,
333/// or return a value from, a function, under some ABI.
334#[derive(Clone, PartialEq, Eq, Hash, HashStable_Generic)]
335pub struct ArgAbi<'a, Ty> {
336 pub layout: TyAndLayout<'a, Ty>,
337 pub mode: PassMode,
338}
339
340// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
341impl<'a, Ty: fmt::Display> fmt::Debug for ArgAbi<'a, Ty> {
342 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
343 let ArgAbi { layout, mode } = self;
344 f.debug_struct("ArgAbi").field("layout", layout).field("mode", mode).finish()
345 }
346}
347
348impl<'a, Ty> ArgAbi<'a, Ty> {
349 /// This defines the "default ABI" for that type, that is then later adjusted in `fn_abi_adjust_for_abi`.
350 pub fn new(
351 cx: &impl HasDataLayout,
352 layout: TyAndLayout<'a, Ty>,
353 scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, Scalar, Size) -> ArgAttributes,
354 ) -> Self {
355 let mode = match layout.backend_repr {
356 BackendRepr::Scalar(scalar) => {
357 PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO))
358 }
359 BackendRepr::ScalarPair(a, b) => PassMode::Pair(
360 scalar_attrs(&layout, a, Size::ZERO),
361 scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
362 ),
363 BackendRepr::SimdVector { .. } => PassMode::Direct(ArgAttributes::new()),
364 BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
365 };
366 ArgAbi { layout, mode }
367 }
368
369 fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
370 let mut attrs = ArgAttributes::new();
371
372 // For non-immediate arguments the callee gets its own copy of
373 // the value on the stack, so there are no aliases. It's also
374 // program-invisible so can't possibly capture
375 attrs
376 .set(ArgAttribute::NoAlias)
377 .set(ArgAttribute::NoCapture)
378 .set(ArgAttribute::NonNull)
379 .set(ArgAttribute::NoUndef);
380 attrs.pointee_size = layout.size;
381 attrs.pointee_align = Some(layout.align.abi);
382
383 let meta_attrs = layout.is_unsized().then_some(ArgAttributes::new());
384
385 PassMode::Indirect { attrs, meta_attrs, on_stack: false }
386 }
387
388 /// Pass this argument directly instead. Should NOT be used!
389 /// Only exists because of past ABI mistakes that will take time to fix
390 /// (see <https://github.com/rust-lang/rust/issues/115666>).
391 #[track_caller]
392 pub fn make_direct_deprecated(&mut self) {
393 match self.mode {
394 PassMode::Indirect { .. } => {
395 self.mode = PassMode::Direct(ArgAttributes::new());
396 }
397 PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => {} // already direct
398 _ => panic!("Tried to make {:?} direct", self.mode),
399 }
400 }
401
402 /// Pass this argument indirectly, by passing a (thin or wide) pointer to the argument instead.
403 /// This is valid for both sized and unsized arguments.
404 #[track_caller]
405 pub fn make_indirect(&mut self) {
406 match self.mode {
407 PassMode::Direct(_) | PassMode::Pair(_, _) => {
408 self.mode = Self::indirect_pass_mode(&self.layout);
409 }
410 PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
411 // already indirect
412 }
413 _ => panic!("Tried to make {:?} indirect", self.mode),
414 }
415 }
416
417 /// Same as `make_indirect`, but for arguments that are ignored. Only needed for ABIs that pass
418 /// ZSTs indirectly.
419 #[track_caller]
420 pub fn make_indirect_from_ignore(&mut self) {
421 match self.mode {
422 PassMode::Ignore => {
423 self.mode = Self::indirect_pass_mode(&self.layout);
424 }
425 PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
426 // already indirect
427 }
428 _ => panic!("Tried to make {:?} indirect (expected `PassMode::Ignore`)", self.mode),
429 }
430 }
431
432 /// Pass this argument indirectly, by placing it at a fixed stack offset.
433 /// This corresponds to the `byval` LLVM argument attribute.
434 /// This is only valid for sized arguments.
435 ///
436 /// `byval_align` specifies the alignment of the `byval` stack slot, which does not need to
437 /// correspond to the type's alignment. This will be `Some` if the target's ABI specifies that
438 /// stack slots used for arguments passed by-value have specific alignment requirements which
439 /// differ from the alignment used in other situations.
440 ///
441 /// If `None`, the type's alignment is used.
442 ///
443 /// If the resulting alignment differs from the type's alignment,
444 /// the argument will be copied to an alloca with sufficient alignment,
445 /// either in the caller (if the type's alignment is lower than the byval alignment)
446 /// or in the callee (if the type's alignment is higher than the byval alignment),
447 /// to ensure that Rust code never sees an underaligned pointer.
448 pub fn pass_by_stack_offset(&mut self, byval_align: Option<Align>) {
449 assert!(!self.layout.is_unsized(), "used byval ABI for unsized layout");
450 self.make_indirect();
451 match self.mode {
452 PassMode::Indirect { ref mut attrs, meta_attrs: _, ref mut on_stack } => {
453 *on_stack = true;
454
455 // Some platforms, like 32-bit x86, change the alignment of the type when passing
456 // `byval`. Account for that.
457 if let Some(byval_align) = byval_align {
458 // On all targets with byval align this is currently true, so let's assert it.
459 debug_assert!(byval_align >= Align::from_bytes(4).unwrap());
460 attrs.pointee_align = Some(byval_align);
461 }
462 }
463 _ => unreachable!(),
464 }
465 }
466
467 pub fn extend_integer_width_to(&mut self, bits: u64) {
468 // Only integers have signedness
469 if let BackendRepr::Scalar(scalar) = self.layout.backend_repr {
470 if let Primitive::Int(i, signed) = scalar.primitive() {
471 if i.size().bits() < bits {
472 if let PassMode::Direct(ref mut attrs) = self.mode {
473 if signed {
474 attrs.ext(ArgExtension::Sext)
475 } else {
476 attrs.ext(ArgExtension::Zext)
477 };
478 }
479 }
480 }
481 }
482 }
483
484 pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
485 self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32: false };
486 }
487
488 pub fn cast_to_and_pad_i32<T: Into<CastTarget>>(&mut self, target: T, pad_i32: bool) {
489 self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32 };
490 }
491
492 pub fn is_indirect(&self) -> bool {
493 matches!(self.mode, PassMode::Indirect { .. })
494 }
495
496 pub fn is_sized_indirect(&self) -> bool {
497 matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ })
498 }
499
500 pub fn is_unsized_indirect(&self) -> bool {
501 matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ })
502 }
503
504 pub fn is_ignore(&self) -> bool {
505 matches!(self.mode, PassMode::Ignore)
506 }
507
508 /// Checks if these two `ArgAbi` are equal enough to be considered "the same for all
509 /// function call ABIs".
510 pub fn eq_abi(&self, other: &Self) -> bool
511 where
512 Ty: PartialEq,
513 {
514 // Ideally we'd just compare the `mode`, but that is not enough -- for some modes LLVM will look
515 // at the type.
516 self.layout.eq_abi(&other.layout) && self.mode.eq_abi(&other.mode) && {
517 // `fn_arg_sanity_check` accepts `PassMode::Direct` for some aggregates.
518 // That elevates any type difference to an ABI difference since we just use the
519 // full Rust type as the LLVM argument/return type.
520 if matches!(self.mode, PassMode::Direct(..))
521 && matches!(self.layout.backend_repr, BackendRepr::Memory { .. })
522 {
523 // For aggregates in `Direct` mode to be compatible, the types need to be equal.
524 self.layout.ty == other.layout.ty
525 } else {
526 true
527 }
528 }
529 }
530}
531
532#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
533pub enum Conv {
534 // General language calling conventions, for which every target
535 // should have its own backend (e.g. LLVM) support.
536 C,
537 Rust,
538
539 Cold,
540 PreserveMost,
541 PreserveAll,
542
543 // Target-specific calling conventions.
544 ArmAapcs,
545 CCmseNonSecureCall,
546 CCmseNonSecureEntry,
547
548 Msp430Intr,
549
550 GpuKernel,
551
552 X86Fastcall,
553 X86Intr,
554 X86Stdcall,
555 X86ThisCall,
556 X86VectorCall,
557
558 X86_64SysV,
559 X86_64Win64,
560
561 AvrInterrupt,
562 AvrNonBlockingInterrupt,
563
564 RiscvInterrupt { kind: RiscvInterruptKind },
565}
566
567#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
568pub enum RiscvInterruptKind {
569 Machine,
570 Supervisor,
571}
572
573impl RiscvInterruptKind {
574 pub fn as_str(&self) -> &'static str {
575 match self {
576 Self::Machine => "machine",
577 Self::Supervisor => "supervisor",
578 }
579 }
580}
581
582/// Metadata describing how the arguments to a native function
583/// should be passed in order to respect the native ABI.
584///
585/// The signature represented by this type may not match the MIR function signature.
586/// Certain attributes, like `#[track_caller]` can introduce additional arguments, which are present in [`FnAbi`], but not in `FnSig`.
587/// While this difference is rarely relevant, it should still be kept in mind.
588///
589/// I will do my best to describe this structure, but these
590/// comments are reverse-engineered and may be inaccurate. -NDM
591#[derive(Clone, PartialEq, Eq, Hash, HashStable_Generic)]
592pub struct FnAbi<'a, Ty> {
593 /// The type, layout, and information about how each argument is passed.
594 pub args: Box<[ArgAbi<'a, Ty>]>,
595
596 /// The layout, type, and the way a value is returned from this function.
597 pub ret: ArgAbi<'a, Ty>,
598
599 /// Marks this function as variadic (accepting a variable number of arguments).
600 pub c_variadic: bool,
601
602 /// The count of non-variadic arguments.
603 ///
604 /// Should only be different from args.len() when c_variadic is true.
605 /// This can be used to know whether an argument is variadic or not.
606 pub fixed_count: u32,
607 /// The calling convention of this function.
608 pub conv: Conv,
609 /// Indicates if an unwind may happen across a call to this function.
610 pub can_unwind: bool,
611}
612
613// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
614impl<'a, Ty: fmt::Display> fmt::Debug for FnAbi<'a, Ty> {
615 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
616 let FnAbi { args, ret, c_variadic, fixed_count, conv, can_unwind } = self;
617 f.debug_struct("FnAbi")
618 .field("args", args)
619 .field("ret", ret)
620 .field("c_variadic", c_variadic)
621 .field("fixed_count", fixed_count)
622 .field("conv", conv)
623 .field("can_unwind", can_unwind)
624 .finish()
625 }
626}
627
628impl<'a, Ty> FnAbi<'a, Ty> {
629 pub fn adjust_for_foreign_abi<C>(&mut self, cx: &C, abi: ExternAbi)
630 where
631 Ty: TyAbiInterface<'a, C> + Copy,
632 C: HasDataLayout + HasTargetSpec + HasWasmCAbiOpt + HasX86AbiOpt,
633 {
634 if abi == ExternAbi::X86Interrupt {
635 if let Some(arg) = self.args.first_mut() {
636 arg.pass_by_stack_offset(None);
637 }
638 return;
639 }
640
641 let spec = cx.target_spec();
642 match &spec.arch[..] {
643 "x86" => {
644 let (flavor, regparm) = match abi {
645 ExternAbi::Fastcall { .. } | ExternAbi::Vectorcall { .. } => {
646 (x86::Flavor::FastcallOrVectorcall, None)
647 }
648 ExternAbi::C { .. } | ExternAbi::Cdecl { .. } | ExternAbi::Stdcall { .. } => {
649 (x86::Flavor::General, cx.x86_abi_opt().regparm)
650 }
651 _ => (x86::Flavor::General, None),
652 };
653 let reg_struct_return = cx.x86_abi_opt().reg_struct_return;
654 let opts = x86::X86Options { flavor, regparm, reg_struct_return };
655 if spec.is_like_msvc {
656 x86_win32::compute_abi_info(cx, self, opts);
657 } else {
658 x86::compute_abi_info(cx, self, opts);
659 }
660 }
661 "x86_64" => match abi {
662 ExternAbi::SysV64 { .. } => x86_64::compute_abi_info(cx, self),
663 ExternAbi::Win64 { .. } | ExternAbi::Vectorcall { .. } => {
664 x86_win64::compute_abi_info(cx, self)
665 }
666 _ => {
667 if cx.target_spec().is_like_windows {
668 x86_win64::compute_abi_info(cx, self)
669 } else {
670 x86_64::compute_abi_info(cx, self)
671 }
672 }
673 },
674 "aarch64" | "arm64ec" => {
675 let kind = if cx.target_spec().is_like_darwin {
676 aarch64::AbiKind::DarwinPCS
677 } else if cx.target_spec().is_like_windows {
678 aarch64::AbiKind::Win64
679 } else {
680 aarch64::AbiKind::AAPCS
681 };
682 aarch64::compute_abi_info(cx, self, kind)
683 }
684 "amdgpu" => amdgpu::compute_abi_info(cx, self),
685 "arm" => arm::compute_abi_info(cx, self),
686 "avr" => avr::compute_abi_info(self),
687 "loongarch64" => loongarch::compute_abi_info(cx, self),
688 "m68k" => m68k::compute_abi_info(self),
689 "csky" => csky::compute_abi_info(self),
690 "mips" | "mips32r6" => mips::compute_abi_info(cx, self),
691 "mips64" | "mips64r6" => mips64::compute_abi_info(cx, self),
692 "powerpc" => powerpc::compute_abi_info(cx, self),
693 "powerpc64" => powerpc64::compute_abi_info(cx, self),
694 "s390x" => s390x::compute_abi_info(cx, self),
695 "msp430" => msp430::compute_abi_info(self),
696 "sparc" => sparc::compute_abi_info(cx, self),
697 "sparc64" => sparc64::compute_abi_info(cx, self),
698 "nvptx64" => {
699 let abi = cx.target_spec().adjust_abi(abi, self.c_variadic);
700 if abi == ExternAbi::PtxKernel || abi == ExternAbi::GpuKernel {
701 nvptx64::compute_ptx_kernel_abi_info(cx, self)
702 } else {
703 nvptx64::compute_abi_info(self)
704 }
705 }
706 "hexagon" => hexagon::compute_abi_info(self),
707 "xtensa" => xtensa::compute_abi_info(cx, self),
708 "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self),
709 "wasm32" => {
710 if spec.os == "unknown" && matches!(cx.wasm_c_abi_opt(), WasmCAbi::Legacy { .. }) {
711 wasm::compute_wasm_abi_info(self)
712 } else {
713 wasm::compute_c_abi_info(cx, self)
714 }
715 }
716 "wasm64" => wasm::compute_c_abi_info(cx, self),
717 "bpf" => bpf::compute_abi_info(self),
718 arch => panic!("no lowering implemented for {arch}"),
719 }
720 }
721
722 pub fn adjust_for_rust_abi<C>(&mut self, cx: &C)
723 where
724 Ty: TyAbiInterface<'a, C> + Copy,
725 C: HasDataLayout + HasTargetSpec,
726 {
727 let spec = cx.target_spec();
728 match &*spec.arch {
729 "x86" => x86::compute_rust_abi_info(cx, self),
730 "riscv32" | "riscv64" => riscv::compute_rust_abi_info(cx, self),
731 "loongarch64" => loongarch::compute_rust_abi_info(cx, self),
732 "aarch64" => aarch64::compute_rust_abi_info(cx, self),
733 _ => {}
734 };
735
736 // Decides whether we can pass the given SIMD argument via `PassMode::Direct`.
737 // May only return `true` if the target will always pass those arguments the same way,
738 // no matter what the user does with `-Ctarget-feature`! In other words, whatever
739 // target features are required to pass a SIMD value in registers must be listed in
740 // the `abi_required_features` for the current target and ABI.
741 let can_pass_simd_directly = |arg: &ArgAbi<'_, Ty>| match &*spec.arch {
742 // On x86, if we have SSE2 (which we have by default for x86_64), we can always pass up
743 // to 128-bit-sized vectors.
744 "x86" if spec.rustc_abi == Some(RustcAbi::X86Sse2) => arg.layout.size.bits() <= 128,
745 "x86_64" if spec.rustc_abi != Some(RustcAbi::X86Softfloat) => {
746 // FIXME once https://github.com/bytecodealliance/wasmtime/issues/10254 is fixed
747 // accept vectors up to 128bit rather than vectors of exactly 128bit.
748 arg.layout.size.bits() == 128
749 }
750 // So far, we haven't implemented this logic for any other target.
751 _ => false,
752 };
753
754 for (arg_idx, arg) in self
755 .args
756 .iter_mut()
757 .enumerate()
758 .map(|(idx, arg)| (Some(idx), arg))
759 .chain(iter::once((None, &mut self.ret)))
760 {
761 // If the logic above already picked a specific type to cast the argument to, leave that
762 // in place.
763 if matches!(arg.mode, PassMode::Ignore | PassMode::Cast { .. }) {
764 continue;
765 }
766
767 if arg_idx.is_none()
768 && arg.layout.size > Primitive::Pointer(AddressSpace::DATA).size(cx) * 2
769 && !matches!(arg.layout.backend_repr, BackendRepr::SimdVector { .. })
770 {
771 // Return values larger than 2 registers using a return area
772 // pointer. LLVM and Cranelift disagree about how to return
773 // values that don't fit in the registers designated for return
774 // values. LLVM will force the entire return value to be passed
775 // by return area pointer, while Cranelift will look at each IR level
776 // return value independently and decide to pass it in a
777 // register or not, which would result in the return value
778 // being passed partially in registers and partially through a
779 // return area pointer. For large IR-level values such as `i128`,
780 // cranelift will even split up the value into smaller chunks.
781 //
782 // While Cranelift may need to be fixed as the LLVM behavior is
783 // generally more correct with respect to the surface language,
784 // forcing this behavior in rustc itself makes it easier for
785 // other backends to conform to the Rust ABI and for the C ABI
786 // rustc already handles this behavior anyway.
787 //
788 // In addition LLVM's decision to pass the return value in
789 // registers or using a return area pointer depends on how
790 // exactly the return type is lowered to an LLVM IR type. For
791 // example `Option<u128>` can be lowered as `{ i128, i128 }`
792 // in which case the x86_64 backend would use a return area
793 // pointer, or it could be passed as `{ i32, i128 }` in which
794 // case the x86_64 backend would pass it in registers by taking
795 // advantage of an LLVM ABI extension that allows using 3
796 // registers for the x86_64 sysv call conv rather than the
797 // officially specified 2 registers.
798 //
799 // FIXME: Technically we should look at the amount of available
800 // return registers rather than guessing that there are 2
801 // registers for return values. In practice only a couple of
802 // architectures have less than 2 return registers. None of
803 // which supported by Cranelift.
804 //
805 // NOTE: This adjustment is only necessary for the Rust ABI as
806 // for other ABI's the calling convention implementations in
807 // rustc_target already ensure any return value which doesn't
808 // fit in the available amount of return registers is passed in
809 // the right way for the current target.
810 //
811 // The adjustment is not necessary nor desired for types with a vector
812 // representation; those are handled below.
813 arg.make_indirect();
814 continue;
815 }
816
817 match arg.layout.backend_repr {
818 BackendRepr::Memory { .. } => {
819 // Compute `Aggregate` ABI.
820
821 let is_indirect_not_on_stack =
822 matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
823 assert!(is_indirect_not_on_stack);
824
825 let size = arg.layout.size;
826 if arg.layout.is_sized()
827 && size <= Primitive::Pointer(AddressSpace::DATA).size(cx)
828 {
829 // We want to pass small aggregates as immediates, but using
830 // an LLVM aggregate type for this leads to bad optimizations,
831 // so we pick an appropriately sized integer type instead.
832 arg.cast_to(Reg { kind: RegKind::Integer, size });
833 }
834 }
835
836 BackendRepr::SimdVector { .. } => {
837 // This is a fun case! The gist of what this is doing is
838 // that we want callers and callees to always agree on the
839 // ABI of how they pass SIMD arguments. If we were to *not*
840 // make these arguments indirect then they'd be immediates
841 // in LLVM, which means that they'd used whatever the
842 // appropriate ABI is for the callee and the caller. That
843 // means, for example, if the caller doesn't have AVX
844 // enabled but the callee does, then passing an AVX argument
845 // across this boundary would cause corrupt data to show up.
846 //
847 // This problem is fixed by unconditionally passing SIMD
848 // arguments through memory between callers and callees
849 // which should get them all to agree on ABI regardless of
850 // target feature sets. Some more information about this
851 // issue can be found in #44367.
852 //
853 // Note that the intrinsic ABI is exempt here as those are not
854 // real functions anyway, and the backend expects very specific types.
855 if spec.simd_types_indirect && !can_pass_simd_directly(arg) {
856 arg.make_indirect();
857 }
858 }
859
860 _ => {}
861 }
862 }
863 }
864}
865
866impl FromStr for Conv {
867 type Err = String;
868
869 fn from_str(s: &str) -> Result<Self, Self::Err> {
870 match s {
871 "C" => Ok(Conv::C),
872 "Rust" => Ok(Conv::Rust),
873 "RustCold" => Ok(Conv::Rust),
874 "ArmAapcs" => Ok(Conv::ArmAapcs),
875 "CCmseNonSecureCall" => Ok(Conv::CCmseNonSecureCall),
876 "CCmseNonSecureEntry" => Ok(Conv::CCmseNonSecureEntry),
877 "Msp430Intr" => Ok(Conv::Msp430Intr),
878 "X86Fastcall" => Ok(Conv::X86Fastcall),
879 "X86Intr" => Ok(Conv::X86Intr),
880 "X86Stdcall" => Ok(Conv::X86Stdcall),
881 "X86ThisCall" => Ok(Conv::X86ThisCall),
882 "X86VectorCall" => Ok(Conv::X86VectorCall),
883 "X86_64SysV" => Ok(Conv::X86_64SysV),
884 "X86_64Win64" => Ok(Conv::X86_64Win64),
885 "GpuKernel" => Ok(Conv::GpuKernel),
886 "AvrInterrupt" => Ok(Conv::AvrInterrupt),
887 "AvrNonBlockingInterrupt" => Ok(Conv::AvrNonBlockingInterrupt),
888 "RiscvInterrupt(machine)" => {
889 Ok(Conv::RiscvInterrupt { kind: RiscvInterruptKind::Machine })
890 }
891 "RiscvInterrupt(supervisor)" => {
892 Ok(Conv::RiscvInterrupt { kind: RiscvInterruptKind::Supervisor })
893 }
894 _ => Err(format!("'{s}' is not a valid value for entry function call convention.")),
895 }
896 }
897}
898
899fn conv_to_externabi(conv: &Conv) -> ExternAbi {
900 match conv {
901 Conv::C => ExternAbi::C { unwind: false },
902 Conv::Rust => ExternAbi::Rust,
903 Conv::PreserveMost => ExternAbi::RustCold,
904 Conv::ArmAapcs => ExternAbi::Aapcs { unwind: false },
905 Conv::CCmseNonSecureCall => ExternAbi::CCmseNonSecureCall,
906 Conv::CCmseNonSecureEntry => ExternAbi::CCmseNonSecureEntry,
907 Conv::Msp430Intr => ExternAbi::Msp430Interrupt,
908 Conv::GpuKernel => ExternAbi::GpuKernel,
909 Conv::X86Fastcall => ExternAbi::Fastcall { unwind: false },
910 Conv::X86Intr => ExternAbi::X86Interrupt,
911 Conv::X86Stdcall => ExternAbi::Stdcall { unwind: false },
912 Conv::X86ThisCall => ExternAbi::Thiscall { unwind: false },
913 Conv::X86VectorCall => ExternAbi::Vectorcall { unwind: false },
914 Conv::X86_64SysV => ExternAbi::SysV64 { unwind: false },
915 Conv::X86_64Win64 => ExternAbi::Win64 { unwind: false },
916 Conv::AvrInterrupt => ExternAbi::AvrInterrupt,
917 Conv::AvrNonBlockingInterrupt => ExternAbi::AvrNonBlockingInterrupt,
918 Conv::RiscvInterrupt { kind: RiscvInterruptKind::Machine } => ExternAbi::RiscvInterruptM,
919 Conv::RiscvInterrupt { kind: RiscvInterruptKind::Supervisor } => ExternAbi::RiscvInterruptS,
920 Conv::Cold | Conv::PreserveAll => unreachable!(),
921 }
922}
923
924impl Display for Conv {
925 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
926 write!(f, "{}", conv_to_externabi(self))
927 }
928}
929
930// Some types are used a lot. Make sure they don't unintentionally get bigger.
931#[cfg(target_pointer_width = "64")]
932mod size_asserts {
933 use rustc_data_structures::static_assert_size;
934
935 use super::*;
936 // tidy-alphabetical-start
937 static_assert_size!(ArgAbi<'_, usize>, 56);
938 static_assert_size!(FnAbi<'_, usize>, 80);
939 // tidy-alphabetical-end
940}