rustc_target/callconv/
loongarch.rs

1use rustc_abi::{
2    BackendRepr, FieldsShape, HasDataLayout, Primitive, Reg, RegKind, Size, TyAbiInterface,
3    TyAndLayout, Variants,
4};
5
6use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform};
7use crate::spec::HasTargetSpec;
8
9#[derive(Copy, Clone)]
10enum RegPassKind {
11    Float { offset_from_start: Size, ty: Reg },
12    Integer { offset_from_start: Size, ty: Reg },
13    Unknown,
14}
15
16#[derive(Copy, Clone)]
17enum FloatConv {
18    FloatPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg },
19    Float(Reg),
20    MixedPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg },
21}
22
23#[derive(Copy, Clone)]
24struct CannotUseFpConv;
25
26fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
27    match arg.layout.backend_repr {
28        BackendRepr::SimdVector { .. } => true,
29        _ => arg.layout.is_aggregate(),
30    }
31}
32
33fn should_use_fp_conv_helper<'a, Ty, C>(
34    cx: &C,
35    arg_layout: &TyAndLayout<'a, Ty>,
36    xlen: u64,
37    flen: u64,
38    field1_kind: &mut RegPassKind,
39    field2_kind: &mut RegPassKind,
40    offset_from_start: Size,
41) -> Result<(), CannotUseFpConv>
42where
43    Ty: TyAbiInterface<'a, C> + Copy,
44{
45    match arg_layout.backend_repr {
46        BackendRepr::Scalar(scalar) => match scalar.primitive() {
47            Primitive::Int(..) | Primitive::Pointer(_) => {
48                if arg_layout.size.bits() > xlen {
49                    return Err(CannotUseFpConv);
50                }
51                match (*field1_kind, *field2_kind) {
52                    (RegPassKind::Unknown, _) => {
53                        *field1_kind = RegPassKind::Integer {
54                            offset_from_start,
55                            ty: Reg { kind: RegKind::Integer, size: arg_layout.size },
56                        };
57                    }
58                    (RegPassKind::Float { .. }, RegPassKind::Unknown) => {
59                        *field2_kind = RegPassKind::Integer {
60                            offset_from_start,
61                            ty: Reg { kind: RegKind::Integer, size: arg_layout.size },
62                        };
63                    }
64                    _ => return Err(CannotUseFpConv),
65                }
66            }
67            Primitive::Float(_) => {
68                if arg_layout.size.bits() > flen {
69                    return Err(CannotUseFpConv);
70                }
71                match (*field1_kind, *field2_kind) {
72                    (RegPassKind::Unknown, _) => {
73                        *field1_kind = RegPassKind::Float {
74                            offset_from_start,
75                            ty: Reg { kind: RegKind::Float, size: arg_layout.size },
76                        };
77                    }
78                    (_, RegPassKind::Unknown) => {
79                        *field2_kind = RegPassKind::Float {
80                            offset_from_start,
81                            ty: Reg { kind: RegKind::Float, size: arg_layout.size },
82                        };
83                    }
84                    _ => return Err(CannotUseFpConv),
85                }
86            }
87        },
88        BackendRepr::SimdVector { .. } => return Err(CannotUseFpConv),
89        BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
90            FieldsShape::Primitive => {
91                unreachable!("aggregates can't have `FieldsShape::Primitive`")
92            }
93            FieldsShape::Union(_) => {
94                if !arg_layout.is_zst() {
95                    if arg_layout.is_transparent() {
96                        let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
97                        return should_use_fp_conv_helper(
98                            cx,
99                            &non_1zst_elem,
100                            xlen,
101                            flen,
102                            field1_kind,
103                            field2_kind,
104                            offset_from_start,
105                        );
106                    }
107                    return Err(CannotUseFpConv);
108                }
109            }
110            FieldsShape::Array { count, .. } => {
111                for i in 0..count {
112                    let elem_layout = arg_layout.field(cx, 0);
113                    should_use_fp_conv_helper(
114                        cx,
115                        &elem_layout,
116                        xlen,
117                        flen,
118                        field1_kind,
119                        field2_kind,
120                        offset_from_start + elem_layout.size * i,
121                    )?;
122                }
123            }
124            FieldsShape::Arbitrary { .. } => {
125                match arg_layout.variants {
126                    Variants::Multiple { .. } => return Err(CannotUseFpConv),
127                    Variants::Single { .. } | Variants::Empty => (),
128                }
129                for i in arg_layout.fields.index_by_increasing_offset() {
130                    let field = arg_layout.field(cx, i);
131                    should_use_fp_conv_helper(
132                        cx,
133                        &field,
134                        xlen,
135                        flen,
136                        field1_kind,
137                        field2_kind,
138                        offset_from_start + arg_layout.fields.offset(i),
139                    )?;
140                }
141            }
142        },
143    }
144    Ok(())
145}
146
147fn should_use_fp_conv<'a, Ty, C>(
148    cx: &C,
149    arg: &TyAndLayout<'a, Ty>,
150    xlen: u64,
151    flen: u64,
152) -> Option<FloatConv>
153where
154    Ty: TyAbiInterface<'a, C> + Copy,
155{
156    let mut field1_kind = RegPassKind::Unknown;
157    let mut field2_kind = RegPassKind::Unknown;
158    if should_use_fp_conv_helper(
159        cx,
160        arg,
161        xlen,
162        flen,
163        &mut field1_kind,
164        &mut field2_kind,
165        Size::ZERO,
166    )
167    .is_err()
168    {
169        return None;
170    }
171    match (field1_kind, field2_kind) {
172        (
173            RegPassKind::Integer { offset_from_start, .. }
174            | RegPassKind::Float { offset_from_start, .. },
175            _,
176        ) if offset_from_start != Size::ZERO => {
177            panic!("type {:?} has a first field with non-zero offset {offset_from_start:?}", arg.ty)
178        }
179        (
180            RegPassKind::Integer { ty: first_ty, .. },
181            RegPassKind::Float { offset_from_start, ty: second_ty },
182        ) => Some(FloatConv::MixedPair {
183            first_ty,
184            second_ty_offset_from_start: offset_from_start,
185            second_ty,
186        }),
187        (
188            RegPassKind::Float { ty: first_ty, .. },
189            RegPassKind::Integer { offset_from_start, ty: second_ty },
190        ) => Some(FloatConv::MixedPair {
191            first_ty,
192            second_ty_offset_from_start: offset_from_start,
193            second_ty,
194        }),
195        (
196            RegPassKind::Float { ty: first_ty, .. },
197            RegPassKind::Float { offset_from_start, ty: second_ty },
198        ) => Some(FloatConv::FloatPair {
199            first_ty,
200            second_ty_offset_from_start: offset_from_start,
201            second_ty,
202        }),
203        (RegPassKind::Float { ty, .. }, RegPassKind::Unknown) => Some(FloatConv::Float(ty)),
204        _ => None,
205    }
206}
207
208fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
209where
210    Ty: TyAbiInterface<'a, C> + Copy,
211{
212    if !arg.layout.is_sized() {
213        // Not touching this...
214        return false; // I guess? return value of this function is not documented
215    }
216    if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
217        match conv {
218            FloatConv::Float(f) => {
219                arg.cast_to(f);
220            }
221            FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty } => {
222                arg.cast_to(CastTarget::offset_pair(
223                    first_ty,
224                    second_ty_offset_from_start,
225                    second_ty,
226                ));
227            }
228            FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty } => {
229                arg.cast_to(CastTarget::offset_pair(
230                    first_ty,
231                    second_ty_offset_from_start,
232                    second_ty,
233                ));
234            }
235        }
236        return false;
237    }
238
239    let total = arg.layout.size;
240
241    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
242    // the argument list with the address."
243    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
244    // replaced in the argument list with the address, as are C++ aggregates
245    // with nontrivial copy constructors, destructors, or vtables."
246    if total.bits() > 2 * xlen {
247        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
248        if is_loongarch_aggregate(arg) {
249            arg.make_indirect();
250        }
251        return true;
252    }
253
254    let xlen_reg = match xlen {
255        32 => Reg::i32(),
256        64 => Reg::i64(),
257        _ => unreachable!("Unsupported XLEN: {}", xlen),
258    };
259    if is_loongarch_aggregate(arg) {
260        if total.bits() <= xlen {
261            arg.cast_to(xlen_reg);
262        } else {
263            arg.cast_to(Uniform::new(xlen_reg, Size::from_bits(xlen * 2)));
264        }
265        return false;
266    }
267
268    // "When passed in registers, scalars narrower than XLEN bits are widened
269    // according to the sign of their type up to 32 bits, then sign-extended to
270    // XLEN bits."
271    extend_integer_width(arg, xlen);
272    false
273}
274
275fn classify_arg<'a, Ty, C>(
276    cx: &C,
277    arg: &mut ArgAbi<'a, Ty>,
278    xlen: u64,
279    flen: u64,
280    is_vararg: bool,
281    avail_gprs: &mut u64,
282    avail_fprs: &mut u64,
283) where
284    Ty: TyAbiInterface<'a, C> + Copy,
285{
286    if !arg.layout.is_sized() {
287        // Not touching this...
288        return;
289    }
290    if arg.layout.pass_indirectly_in_non_rustic_abis(cx) {
291        arg.make_indirect();
292        *avail_gprs = (*avail_gprs).saturating_sub(1);
293        return;
294    }
295    if !is_vararg {
296        match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
297            Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
298                *avail_fprs -= 1;
299                arg.cast_to(f);
300                return;
301            }
302            Some(FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty })
303                if *avail_fprs >= 2 =>
304            {
305                *avail_fprs -= 2;
306                arg.cast_to(CastTarget::offset_pair(
307                    first_ty,
308                    second_ty_offset_from_start,
309                    second_ty,
310                ));
311                return;
312            }
313            Some(FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty })
314                if *avail_fprs >= 1 && *avail_gprs >= 1 =>
315            {
316                *avail_gprs -= 1;
317                *avail_fprs -= 1;
318                arg.cast_to(CastTarget::offset_pair(
319                    first_ty,
320                    second_ty_offset_from_start,
321                    second_ty,
322                ));
323                return;
324            }
325            _ => (),
326        }
327    }
328
329    let total = arg.layout.size;
330    let align = arg.layout.align.bits();
331
332    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
333    // the argument list with the address."
334    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
335    // replaced in the argument list with the address, as are C++ aggregates
336    // with nontrivial copy constructors, destructors, or vtables."
337    if total.bits() > 2 * xlen {
338        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
339        if is_loongarch_aggregate(arg) {
340            arg.make_indirect();
341        }
342        if *avail_gprs >= 1 {
343            *avail_gprs -= 1;
344        }
345        return;
346    }
347
348    let double_xlen_reg = match xlen {
349        32 => Reg::i64(),
350        64 => Reg::i128(),
351        _ => unreachable!("Unsupported XLEN: {}", xlen),
352    };
353
354    let xlen_reg = match xlen {
355        32 => Reg::i32(),
356        64 => Reg::i64(),
357        _ => unreachable!("Unsupported XLEN: {}", xlen),
358    };
359
360    if total.bits() > xlen {
361        let align_regs = align > xlen;
362        if is_loongarch_aggregate(arg) {
363            arg.cast_to(Uniform::new(
364                if align_regs { double_xlen_reg } else { xlen_reg },
365                Size::from_bits(xlen * 2),
366            ));
367        }
368        if align_regs && is_vararg {
369            *avail_gprs -= *avail_gprs % 2;
370        }
371        if *avail_gprs >= 2 {
372            *avail_gprs -= 2;
373        } else {
374            *avail_gprs = 0;
375        }
376        return;
377    } else if is_loongarch_aggregate(arg) {
378        arg.cast_to(xlen_reg);
379        if *avail_gprs >= 1 {
380            *avail_gprs -= 1;
381        }
382        return;
383    }
384
385    // "When passed in registers, scalars narrower than XLEN bits are widened
386    // according to the sign of their type up to 32 bits, then sign-extended to
387    // XLEN bits."
388    if *avail_gprs >= 1 {
389        extend_integer_width(arg, xlen);
390        *avail_gprs -= 1;
391    }
392}
393
394fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
395    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr
396        && let Primitive::Int(i, _) = scalar.primitive()
397        && i.size().bits() == 32
398        && xlen > 32
399        && let PassMode::Direct(ref mut attrs) = arg.mode
400    {
401        // 32-bit integers are always sign-extended
402        attrs.ext(ArgExtension::Sext);
403        return;
404    }
405
406    arg.extend_integer_width_to(xlen);
407}
408
409pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
410where
411    Ty: TyAbiInterface<'a, C> + Copy,
412    C: HasDataLayout + HasTargetSpec,
413{
414    let xlen = cx.data_layout().pointer_size().bits();
415    let flen = match &cx.target_spec().llvm_abiname[..] {
416        "ilp32f" | "lp64f" => 32,
417        "ilp32d" | "lp64d" => 64,
418        _ => 0,
419    };
420
421    let mut avail_gprs = 8;
422    let mut avail_fprs = 8;
423
424    if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
425        avail_gprs -= 1;
426    }
427
428    for (i, arg) in fn_abi.args.iter_mut().enumerate() {
429        if arg.is_ignore() {
430            continue;
431        }
432        classify_arg(
433            cx,
434            arg,
435            xlen,
436            flen,
437            i >= fn_abi.fixed_count as usize,
438            &mut avail_gprs,
439            &mut avail_fprs,
440        );
441    }
442}
443
444pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
445where
446    Ty: TyAbiInterface<'a, C> + Copy,
447    C: HasDataLayout + HasTargetSpec,
448{
449    let grlen = cx.data_layout().pointer_size().bits();
450
451    for arg in fn_abi.args.iter_mut() {
452        if arg.is_ignore() {
453            continue;
454        }
455
456        // LLVM integers types do not differentiate between signed or unsigned integers.
457        // Some LoongArch instructions do not have a `.w` suffix version, they use all the
458        // GRLEN bits. By explicitly setting the `signext` or `zeroext` attribute
459        // according to signedness to avoid unnecessary integer extending instructions.
460        //
461        // This is similar to the RISC-V case, see
462        // https://github.com/rust-lang/rust/issues/114508 for details.
463        extend_integer_width(arg, grlen);
464    }
465}