rustc_target/callconv/
aarch64.rs1use std::iter;
2
3use rustc_abi::{BackendRepr, HasDataLayout, Primitive, TyAbiInterface};
4
5use crate::callconv::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
6use crate::spec::{Abi, HasTargetSpec, Target};
7
8#[derive(Copy, Clone, PartialEq)]
13pub(crate) enum AbiKind {
14 AAPCS,
15 DarwinPCS,
16 Win64,
17}
18
19fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
20where
21 Ty: TyAbiInterface<'a, C> + Copy,
22 C: HasDataLayout + HasTargetSpec,
23{
24 arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
25 let size = arg.layout.size;
26
27 if size > unit.size.checked_mul(4, cx).unwrap() {
29 return None;
30 }
31
32 let valid_unit = match unit.kind {
33 RegKind::Integer => false,
34 RegKind::Float => cx.target_spec().abi != Abi::SoftFloat,
37 RegKind::Vector => size.bits() == 64 || size.bits() == 128,
38 };
39
40 valid_unit.then_some(Uniform::consecutive(unit, size))
41 })
42}
43
44fn softfloat_float_abi<Ty>(target: &Target, arg: &mut ArgAbi<'_, Ty>) {
45 if target.abi != Abi::SoftFloat {
46 return;
47 }
48 if let BackendRepr::Scalar(s) = arg.layout.backend_repr
55 && let Primitive::Float(f) = s.primitive()
56 {
57 arg.cast_to(Reg { kind: RegKind::Integer, size: f.size() });
58 } else if let BackendRepr::ScalarPair(s1, s2) = arg.layout.backend_repr
59 && (matches!(s1.primitive(), Primitive::Float(_))
60 || matches!(s2.primitive(), Primitive::Float(_)))
61 {
62 if arg.layout.size.bits() <= target.pointer_width.into() {
69 arg.cast_to(Reg { kind: RegKind::Integer, size: arg.layout.size });
70 } else {
71 arg.make_indirect();
72 }
73 }
74}
75
76fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, kind: AbiKind)
77where
78 Ty: TyAbiInterface<'a, C> + Copy,
79 C: HasDataLayout + HasTargetSpec,
80{
81 if !ret.layout.is_sized() {
82 return;
84 }
85 if !ret.layout.is_aggregate() {
86 if kind == AbiKind::DarwinPCS {
87 ret.extend_integer_width_to(32)
91 }
92 softfloat_float_abi(cx.target_spec(), ret);
93 return;
94 }
95 if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
96 ret.cast_to(uniform);
97 return;
98 }
99 let size = ret.layout.size;
100 let bits = size.bits();
101 if bits <= 128 {
102 ret.cast_to(Uniform::new(Reg::i64(), size));
103 return;
104 }
105 ret.make_indirect();
106}
107
108fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, kind: AbiKind)
109where
110 Ty: TyAbiInterface<'a, C> + Copy,
111 C: HasDataLayout + HasTargetSpec,
112{
113 if !arg.layout.is_sized() {
114 return;
116 }
117 if arg.layout.pass_indirectly_in_non_rustic_abis(cx) {
118 arg.make_indirect();
119 return;
120 }
121 if !arg.layout.is_aggregate() {
122 if kind == AbiKind::DarwinPCS {
123 arg.extend_integer_width_to(32);
127 }
128 softfloat_float_abi(cx.target_spec(), arg);
129
130 return;
131 }
132 if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
133 arg.cast_to(uniform);
134 return;
135 }
136 let size = arg.layout.size;
137 let align = if kind == AbiKind::AAPCS {
138 arg.layout.unadjusted_abi_align
143 } else {
144 arg.layout.align.abi
145 };
146 if size.bits() <= 128 {
147 if align.bits() == 128 {
148 arg.cast_to(Uniform::new(Reg::i128(), size));
149 } else {
150 arg.cast_to(Uniform::new(Reg::i64(), size));
151 }
152 return;
153 }
154 arg.make_indirect();
155}
156
157pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, kind: AbiKind)
158where
159 Ty: TyAbiInterface<'a, C> + Copy,
160 C: HasDataLayout + HasTargetSpec,
161{
162 if !fn_abi.ret.is_ignore() {
163 classify_ret(cx, &mut fn_abi.ret, kind);
164 }
165
166 for arg in fn_abi.args.iter_mut() {
167 if arg.is_ignore() {
168 continue;
169 }
170 classify_arg(cx, arg, kind);
171 }
172}
173
174pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
175where
176 Ty: TyAbiInterface<'a, C> + Copy,
177 C: HasDataLayout + HasTargetSpec,
178{
179 for arg in fn_abi.args.iter_mut().chain(iter::once(&mut fn_abi.ret)) {
180 softfloat_float_abi(cx.target_spec(), arg);
181 }
182}