rustc_target/callconv/
riscv.rs

1// Reference: RISC-V ELF psABI specification
2// https://github.com/riscv/riscv-elf-psabi-doc
3//
4// Reference: Clang RISC-V ELF psABI lowering code
5// https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
6
7use rustc_abi::{
8    BackendRepr, FieldsShape, HasDataLayout, Primitive, Reg, RegKind, Size, TyAbiInterface,
9    TyAndLayout, Variants,
10};
11
12use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform};
13use crate::spec::HasTargetSpec;
14
15#[derive(Copy, Clone)]
16enum RegPassKind {
17    Float { offset_from_start: Size, ty: Reg },
18    Integer { offset_from_start: Size, ty: Reg },
19    Unknown,
20}
21
22#[derive(Copy, Clone)]
23enum FloatConv {
24    FloatPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg },
25    Float(Reg),
26    MixedPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg },
27}
28
29#[derive(Copy, Clone)]
30struct CannotUseFpConv;
31
32fn is_riscv_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
33    match arg.layout.backend_repr {
34        BackendRepr::SimdVector { .. } => true,
35        _ => arg.layout.is_aggregate(),
36    }
37}
38
39fn should_use_fp_conv_helper<'a, Ty, C>(
40    cx: &C,
41    arg_layout: &TyAndLayout<'a, Ty>,
42    xlen: u64,
43    flen: u64,
44    field1_kind: &mut RegPassKind,
45    field2_kind: &mut RegPassKind,
46    offset_from_start: Size,
47) -> Result<(), CannotUseFpConv>
48where
49    Ty: TyAbiInterface<'a, C> + Copy,
50{
51    match arg_layout.backend_repr {
52        BackendRepr::Scalar(scalar) => match scalar.primitive() {
53            Primitive::Int(..) | Primitive::Pointer(_) => {
54                if arg_layout.size.bits() > xlen {
55                    return Err(CannotUseFpConv);
56                }
57                match (*field1_kind, *field2_kind) {
58                    (RegPassKind::Unknown, _) => {
59                        *field1_kind = RegPassKind::Integer {
60                            offset_from_start,
61                            ty: Reg { kind: RegKind::Integer, size: arg_layout.size },
62                        };
63                    }
64                    (RegPassKind::Float { .. }, RegPassKind::Unknown) => {
65                        *field2_kind = RegPassKind::Integer {
66                            offset_from_start,
67                            ty: Reg { kind: RegKind::Integer, size: arg_layout.size },
68                        };
69                    }
70                    _ => return Err(CannotUseFpConv),
71                }
72            }
73            Primitive::Float(_) => {
74                if arg_layout.size.bits() > flen {
75                    return Err(CannotUseFpConv);
76                }
77                match (*field1_kind, *field2_kind) {
78                    (RegPassKind::Unknown, _) => {
79                        *field1_kind = RegPassKind::Float {
80                            offset_from_start,
81                            ty: Reg { kind: RegKind::Float, size: arg_layout.size },
82                        };
83                    }
84                    (_, RegPassKind::Unknown) => {
85                        *field2_kind = RegPassKind::Float {
86                            offset_from_start,
87                            ty: Reg { kind: RegKind::Float, size: arg_layout.size },
88                        };
89                    }
90                    _ => return Err(CannotUseFpConv),
91                }
92            }
93        },
94        BackendRepr::SimdVector { .. } => return Err(CannotUseFpConv),
95        BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
96            FieldsShape::Primitive => {
97                unreachable!("aggregates can't have `FieldsShape::Primitive`")
98            }
99            FieldsShape::Union(_) => {
100                if !arg_layout.is_zst() {
101                    if arg_layout.is_transparent() {
102                        let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
103                        return should_use_fp_conv_helper(
104                            cx,
105                            &non_1zst_elem,
106                            xlen,
107                            flen,
108                            field1_kind,
109                            field2_kind,
110                            offset_from_start,
111                        );
112                    }
113                    return Err(CannotUseFpConv);
114                }
115            }
116            FieldsShape::Array { count, .. } => {
117                for i in 0..count {
118                    let elem_layout = arg_layout.field(cx, 0);
119                    should_use_fp_conv_helper(
120                        cx,
121                        &elem_layout,
122                        xlen,
123                        flen,
124                        field1_kind,
125                        field2_kind,
126                        offset_from_start + elem_layout.size * i,
127                    )?;
128                }
129            }
130            FieldsShape::Arbitrary { .. } => {
131                match arg_layout.variants {
132                    Variants::Multiple { .. } => return Err(CannotUseFpConv),
133                    Variants::Single { .. } | Variants::Empty => (),
134                }
135                for i in arg_layout.fields.index_by_increasing_offset() {
136                    let field = arg_layout.field(cx, i);
137                    should_use_fp_conv_helper(
138                        cx,
139                        &field,
140                        xlen,
141                        flen,
142                        field1_kind,
143                        field2_kind,
144                        offset_from_start + arg_layout.fields.offset(i),
145                    )?;
146                }
147            }
148        },
149    }
150    Ok(())
151}
152
153fn should_use_fp_conv<'a, Ty, C>(
154    cx: &C,
155    arg: &TyAndLayout<'a, Ty>,
156    xlen: u64,
157    flen: u64,
158) -> Option<FloatConv>
159where
160    Ty: TyAbiInterface<'a, C> + Copy,
161{
162    let mut field1_kind = RegPassKind::Unknown;
163    let mut field2_kind = RegPassKind::Unknown;
164    if should_use_fp_conv_helper(
165        cx,
166        arg,
167        xlen,
168        flen,
169        &mut field1_kind,
170        &mut field2_kind,
171        Size::ZERO,
172    )
173    .is_err()
174    {
175        return None;
176    }
177    match (field1_kind, field2_kind) {
178        (
179            RegPassKind::Integer { offset_from_start, .. }
180            | RegPassKind::Float { offset_from_start, .. },
181            _,
182        ) if offset_from_start != Size::ZERO => {
183            panic!("type {:?} has a first field with non-zero offset {offset_from_start:?}", arg.ty)
184        }
185        (
186            RegPassKind::Integer { ty: first_ty, .. },
187            RegPassKind::Float { offset_from_start, ty: second_ty },
188        ) => Some(FloatConv::MixedPair {
189            first_ty,
190            second_ty_offset_from_start: offset_from_start,
191            second_ty,
192        }),
193        (
194            RegPassKind::Float { ty: first_ty, .. },
195            RegPassKind::Integer { offset_from_start, ty: second_ty },
196        ) => Some(FloatConv::MixedPair {
197            first_ty,
198            second_ty_offset_from_start: offset_from_start,
199            second_ty,
200        }),
201        (
202            RegPassKind::Float { ty: first_ty, .. },
203            RegPassKind::Float { offset_from_start, ty: second_ty },
204        ) => Some(FloatConv::FloatPair {
205            first_ty,
206            second_ty_offset_from_start: offset_from_start,
207            second_ty,
208        }),
209        (RegPassKind::Float { ty, .. }, RegPassKind::Unknown) => Some(FloatConv::Float(ty)),
210        _ => None,
211    }
212}
213
214fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
215where
216    Ty: TyAbiInterface<'a, C> + Copy,
217{
218    if !arg.layout.is_sized() {
219        // Not touching this...
220        return false; // I guess? return value of this function is not documented
221    }
222    if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
223        match conv {
224            FloatConv::Float(f) => {
225                arg.cast_to(f);
226            }
227            FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty } => {
228                arg.cast_to(CastTarget::offset_pair(
229                    first_ty,
230                    second_ty_offset_from_start,
231                    second_ty,
232                ));
233            }
234            FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty } => {
235                arg.cast_to(CastTarget::offset_pair(
236                    first_ty,
237                    second_ty_offset_from_start,
238                    second_ty,
239                ));
240            }
241        }
242        return false;
243    }
244
245    let total = arg.layout.size;
246
247    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
248    // the argument list with the address."
249    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
250    // replaced in the argument list with the address, as are C++ aggregates
251    // with nontrivial copy constructors, destructors, or vtables."
252    if total.bits() > 2 * xlen {
253        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
254        if is_riscv_aggregate(arg) {
255            arg.make_indirect();
256        }
257        return true;
258    }
259
260    let xlen_reg = match xlen {
261        32 => Reg::i32(),
262        64 => Reg::i64(),
263        _ => unreachable!("Unsupported XLEN: {}", xlen),
264    };
265    if is_riscv_aggregate(arg) {
266        if total.bits() <= xlen {
267            arg.cast_to(xlen_reg);
268        } else {
269            arg.cast_to(Uniform::new(xlen_reg, Size::from_bits(xlen * 2)));
270        }
271        return false;
272    }
273
274    // "When passed in registers, scalars narrower than XLEN bits are widened
275    // according to the sign of their type up to 32 bits, then sign-extended to
276    // XLEN bits."
277    extend_integer_width(arg, xlen);
278    false
279}
280
281fn classify_arg<'a, Ty, C>(
282    cx: &C,
283    arg: &mut ArgAbi<'a, Ty>,
284    xlen: u64,
285    flen: u64,
286    is_vararg: bool,
287    avail_gprs: &mut u64,
288    avail_fprs: &mut u64,
289) where
290    Ty: TyAbiInterface<'a, C> + Copy,
291{
292    if !arg.layout.is_sized() {
293        // Not touching this...
294        return;
295    }
296    if !is_vararg {
297        match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
298            Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
299                *avail_fprs -= 1;
300                arg.cast_to(f);
301                return;
302            }
303            Some(FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty })
304                if *avail_fprs >= 2 =>
305            {
306                *avail_fprs -= 2;
307                arg.cast_to(CastTarget::offset_pair(
308                    first_ty,
309                    second_ty_offset_from_start,
310                    second_ty,
311                ));
312                return;
313            }
314            Some(FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty })
315                if *avail_fprs >= 1 && *avail_gprs >= 1 =>
316            {
317                *avail_gprs -= 1;
318                *avail_fprs -= 1;
319                arg.cast_to(CastTarget::offset_pair(
320                    first_ty,
321                    second_ty_offset_from_start,
322                    second_ty,
323                ));
324                return;
325            }
326            _ => (),
327        }
328    }
329
330    let total = arg.layout.size;
331    let align = arg.layout.align.abi.bits();
332
333    // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
334    // the argument list with the address."
335    // "Aggregates larger than 2✕XLEN bits are passed by reference and are
336    // replaced in the argument list with the address, as are C++ aggregates
337    // with nontrivial copy constructors, destructors, or vtables."
338    if total.bits() > 2 * xlen {
339        // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
340        if is_riscv_aggregate(arg) {
341            arg.make_indirect();
342        }
343        if *avail_gprs >= 1 {
344            *avail_gprs -= 1;
345        }
346        return;
347    }
348
349    let double_xlen_reg = match xlen {
350        32 => Reg::i64(),
351        64 => Reg::i128(),
352        _ => unreachable!("Unsupported XLEN: {}", xlen),
353    };
354
355    let xlen_reg = match xlen {
356        32 => Reg::i32(),
357        64 => Reg::i64(),
358        _ => unreachable!("Unsupported XLEN: {}", xlen),
359    };
360
361    if total.bits() > xlen {
362        let align_regs = align > xlen;
363        if is_riscv_aggregate(arg) {
364            arg.cast_to(Uniform::new(
365                if align_regs { double_xlen_reg } else { xlen_reg },
366                Size::from_bits(xlen * 2),
367            ));
368        }
369        if align_regs && is_vararg {
370            *avail_gprs -= *avail_gprs % 2;
371        }
372        if *avail_gprs >= 2 {
373            *avail_gprs -= 2;
374        } else {
375            *avail_gprs = 0;
376        }
377        return;
378    } else if is_riscv_aggregate(arg) {
379        arg.cast_to(xlen_reg);
380        if *avail_gprs >= 1 {
381            *avail_gprs -= 1;
382        }
383        return;
384    }
385
386    // "When passed in registers, scalars narrower than XLEN bits are widened
387    // according to the sign of their type up to 32 bits, then sign-extended to
388    // XLEN bits."
389    if *avail_gprs >= 1 {
390        extend_integer_width(arg, xlen);
391        *avail_gprs -= 1;
392    }
393}
394
395fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
396    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
397        if let Primitive::Int(i, _) = scalar.primitive() {
398            // 32-bit integers are always sign-extended
399            if i.size().bits() == 32 && xlen > 32 {
400                if let PassMode::Direct(ref mut attrs) = arg.mode {
401                    attrs.ext(ArgExtension::Sext);
402                    return;
403                }
404            }
405        }
406    }
407
408    arg.extend_integer_width_to(xlen);
409}
410
411pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
412where
413    Ty: TyAbiInterface<'a, C> + Copy,
414    C: HasDataLayout + HasTargetSpec,
415{
416    let flen = match &cx.target_spec().llvm_abiname[..] {
417        "ilp32f" | "lp64f" => 32,
418        "ilp32d" | "lp64d" => 64,
419        _ => 0,
420    };
421    let xlen = cx.data_layout().pointer_size().bits();
422
423    let mut avail_gprs = 8;
424    let mut avail_fprs = 8;
425
426    if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
427        avail_gprs -= 1;
428    }
429
430    for (i, arg) in fn_abi.args.iter_mut().enumerate() {
431        if arg.is_ignore() {
432            continue;
433        }
434        classify_arg(
435            cx,
436            arg,
437            xlen,
438            flen,
439            i >= fn_abi.fixed_count as usize,
440            &mut avail_gprs,
441            &mut avail_fprs,
442        );
443    }
444}
445
446pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
447where
448    Ty: TyAbiInterface<'a, C> + Copy,
449    C: HasDataLayout + HasTargetSpec,
450{
451    let xlen = cx.data_layout().pointer_size().bits();
452
453    for arg in fn_abi.args.iter_mut() {
454        if arg.is_ignore() {
455            continue;
456        }
457
458        // LLVM integers types do not differentiate between signed or unsigned integers.
459        // Some RISC-V instructions do not have a `.w` suffix version, they use all the
460        // XLEN bits. By explicitly setting the `signext` or `zeroext` attribute
461        // according to signedness to avoid unnecessary integer extending instructions.
462        //
463        // See https://github.com/rust-lang/rust/issues/114508 for details.
464        extend_integer_width(arg, xlen);
465    }
466}