rustc_target/callconv/
mips64.rs

1use rustc_abi::{
2    BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Size, TyAbiInterface,
3};
4
5use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform};
6
7fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
8    // Always sign extend u32 values on 64-bit mips
9    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
10        if let Primitive::Int(i, signed) = scalar.primitive() {
11            if !signed && i.size().bits() == 32 {
12                if let PassMode::Direct(ref mut attrs) = arg.mode {
13                    attrs.ext(ArgExtension::Sext);
14                    return;
15                }
16            }
17        }
18    }
19
20    arg.extend_integer_width_to(bits);
21}
22
23fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg>
24where
25    Ty: TyAbiInterface<'a, C> + Copy,
26    C: HasDataLayout,
27{
28    match ret.layout.field(cx, i).backend_repr {
29        BackendRepr::Scalar(scalar) => match scalar.primitive() {
30            Primitive::Float(Float::F32) => Some(Reg::f32()),
31            Primitive::Float(Float::F64) => Some(Reg::f64()),
32            _ => None,
33        },
34        _ => None,
35    }
36}
37
38fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
39where
40    Ty: TyAbiInterface<'a, C> + Copy,
41    C: HasDataLayout,
42{
43    if !ret.layout.is_aggregate() {
44        extend_integer_width_mips(ret, 64);
45        return;
46    }
47
48    let size = ret.layout.size;
49    let bits = size.bits();
50    if bits <= 128 {
51        // Unlike other architectures which return aggregates in registers, MIPS n64 limits the
52        // use of float registers to structures (not unions) containing exactly one or two
53        // float fields.
54
55        if let FieldsShape::Arbitrary { .. } = ret.layout.fields {
56            if ret.layout.fields.count() == 1 {
57                if let Some(reg) = float_reg(cx, ret, 0) {
58                    ret.cast_to(reg);
59                    return;
60                }
61            } else if ret.layout.fields.count() == 2 {
62                if let Some(reg0) = float_reg(cx, ret, 0) {
63                    if let Some(reg1) = float_reg(cx, ret, 1) {
64                        ret.cast_to(CastTarget::pair(reg0, reg1));
65                        return;
66                    }
67                }
68            }
69        }
70
71        // Cast to a uniform int structure
72        ret.cast_to(Uniform::new(Reg::i64(), size));
73    } else {
74        ret.make_indirect();
75    }
76}
77
78fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
79where
80    Ty: TyAbiInterface<'a, C> + Copy,
81    C: HasDataLayout,
82{
83    if !arg.layout.is_aggregate() {
84        extend_integer_width_mips(arg, 64);
85        return;
86    }
87
88    let dl = cx.data_layout();
89    let size = arg.layout.size;
90    let mut prefix = [None; 8];
91    let mut prefix_index = 0;
92
93    match arg.layout.fields {
94        FieldsShape::Primitive => unreachable!(),
95        FieldsShape::Array { .. } => {
96            // Arrays are passed indirectly
97            arg.make_indirect();
98            return;
99        }
100        FieldsShape::Union(_) => {
101            // Unions and are always treated as a series of 64-bit integer chunks
102        }
103        FieldsShape::Arbitrary { .. } => {
104            // Structures are split up into a series of 64-bit integer chunks, but any aligned
105            // doubles not part of another aggregate are passed as floats.
106            let mut last_offset = Size::ZERO;
107
108            for i in 0..arg.layout.fields.count() {
109                let field = arg.layout.field(cx, i);
110                let offset = arg.layout.fields.offset(i);
111
112                // We only care about aligned doubles
113                if let BackendRepr::Scalar(scalar) = field.backend_repr {
114                    if scalar.primitive() == Primitive::Float(Float::F64) {
115                        if offset.is_aligned(dl.f64_align.abi) {
116                            // Insert enough integers to cover [last_offset, offset)
117                            assert!(last_offset.is_aligned(dl.f64_align.abi));
118                            for _ in 0..((offset - last_offset).bits() / 64)
119                                .min((prefix.len() - prefix_index) as u64)
120                            {
121                                prefix[prefix_index] = Some(Reg::i64());
122                                prefix_index += 1;
123                            }
124
125                            if prefix_index == prefix.len() {
126                                break;
127                            }
128
129                            prefix[prefix_index] = Some(Reg::f64());
130                            prefix_index += 1;
131                            last_offset = offset + Reg::f64().size;
132                        }
133                    }
134                }
135            }
136        }
137    };
138
139    // Extract first 8 chunks as the prefix
140    let rest_size = size - Size::from_bytes(8) * prefix_index as u64;
141    arg.cast_to(CastTarget::prefixed(prefix, Uniform::new(Reg::i64(), rest_size)));
142}
143
144pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
145where
146    Ty: TyAbiInterface<'a, C> + Copy,
147    C: HasDataLayout,
148{
149    if !fn_abi.ret.is_ignore() {
150        classify_ret(cx, &mut fn_abi.ret);
151    }
152
153    for arg in fn_abi.args.iter_mut() {
154        if arg.is_ignore() {
155            continue;
156        }
157        classify_arg(cx, arg);
158    }
159}