rustc_target/callconv/
x86_64.rs

1// The classification code for the x86_64 ABI is taken from the clay language
2// https://github.com/jckarter/clay/blob/db0bd2702ab0b6e48965cd85f8859bbd5f60e48e/compiler/externals.cpp
3
4use rustc_abi::{
5    BackendRepr, HasDataLayout, Primitive, Reg, RegKind, Size, TyAbiInterface, TyAndLayout,
6    Variants,
7};
8
9use crate::callconv::{ArgAbi, CastTarget, FnAbi};
10use crate::spec::HasTargetSpec;
11
12/// Classification of "eightbyte" components.
13// N.B., the order of the variants is from general to specific,
14// such that `unify(a, b)` is the "smaller" of `a` and `b`.
15#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
16enum Class {
17    Int,
18    Sse,
19    SseUp,
20}
21
22#[derive(Clone, Copy, Debug)]
23struct Memory;
24
25// Currently supported vector size (AVX-512).
26const LARGEST_VECTOR_SIZE: usize = 512;
27const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
28
29fn classify_arg<'a, Ty, C>(
30    cx: &C,
31    arg: &ArgAbi<'a, Ty>,
32) -> Result<[Option<Class>; MAX_EIGHTBYTES], Memory>
33where
34    Ty: TyAbiInterface<'a, C> + Copy,
35    C: HasDataLayout,
36{
37    fn classify<'a, Ty, C>(
38        cx: &C,
39        layout: TyAndLayout<'a, Ty>,
40        cls: &mut [Option<Class>],
41        off: Size,
42    ) -> Result<(), Memory>
43    where
44        Ty: TyAbiInterface<'a, C> + Copy,
45        C: HasDataLayout,
46    {
47        if !off.is_aligned(layout.align.abi) {
48            if !layout.is_zst() {
49                return Err(Memory);
50            }
51            return Ok(());
52        }
53
54        let mut c = match layout.backend_repr {
55            BackendRepr::Scalar(scalar) => match scalar.primitive() {
56                Primitive::Int(..) | Primitive::Pointer(_) => Class::Int,
57                Primitive::Float(_) => Class::Sse,
58            },
59
60            BackendRepr::SimdVector { .. } => Class::Sse,
61
62            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => {
63                for i in 0..layout.fields.count() {
64                    let field_off = off + layout.fields.offset(i);
65                    classify(cx, layout.field(cx, i), cls, field_off)?;
66                }
67
68                match &layout.variants {
69                    Variants::Single { .. } | Variants::Empty => {}
70                    Variants::Multiple { variants, .. } => {
71                        // Treat enum variants like union members.
72                        for variant_idx in variants.indices() {
73                            classify(cx, layout.for_variant(cx, variant_idx), cls, off)?;
74                        }
75                    }
76                }
77
78                return Ok(());
79            }
80        };
81
82        // Fill in `cls` for scalars (Int/Sse) and vectors (Sse).
83        let first = (off.bytes() / 8) as usize;
84        let last = ((off.bytes() + layout.size.bytes() - 1) / 8) as usize;
85        for cls in &mut cls[first..=last] {
86            *cls = Some(cls.map_or(c, |old| old.min(c)));
87
88            // Everything after the first Sse "eightbyte"
89            // component is the upper half of a register.
90            if c == Class::Sse {
91                c = Class::SseUp;
92            }
93        }
94
95        Ok(())
96    }
97
98    let n = ((arg.layout.size.bytes() + 7) / 8) as usize;
99    if n > MAX_EIGHTBYTES {
100        return Err(Memory);
101    }
102
103    let mut cls = [None; MAX_EIGHTBYTES];
104    classify(cx, arg.layout, &mut cls, Size::ZERO)?;
105    if n > 2 {
106        if cls[0] != Some(Class::Sse) {
107            return Err(Memory);
108        }
109        if cls[1..n].iter().any(|&c| c != Some(Class::SseUp)) {
110            return Err(Memory);
111        }
112    } else {
113        let mut i = 0;
114        while i < n {
115            if cls[i] == Some(Class::SseUp) {
116                cls[i] = Some(Class::Sse);
117            } else if cls[i] == Some(Class::Sse) {
118                i += 1;
119                while i != n && cls[i] == Some(Class::SseUp) {
120                    i += 1;
121                }
122            } else {
123                i += 1;
124            }
125        }
126    }
127
128    Ok(cls)
129}
130
131fn reg_component(cls: &[Option<Class>], i: &mut usize, size: Size) -> Option<Reg> {
132    if *i >= cls.len() {
133        return None;
134    }
135
136    match cls[*i] {
137        None => None,
138        Some(Class::Int) => {
139            *i += 1;
140            Some(if size.bytes() < 8 { Reg { kind: RegKind::Integer, size } } else { Reg::i64() })
141        }
142        Some(Class::Sse) => {
143            let vec_len =
144                1 + cls[*i + 1..].iter().take_while(|&&c| c == Some(Class::SseUp)).count();
145            *i += vec_len;
146            Some(if vec_len == 1 {
147                match size.bytes() {
148                    4 => Reg::f32(),
149                    _ => Reg::f64(),
150                }
151            } else {
152                Reg { kind: RegKind::Vector, size: Size::from_bytes(8) * (vec_len as u64) }
153            })
154        }
155        Some(c) => unreachable!("reg_component: unhandled class {:?}", c),
156    }
157}
158
159fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
160    let mut i = 0;
161    let lo = reg_component(cls, &mut i, size).unwrap();
162    let offset = Size::from_bytes(8) * (i as u64);
163    let mut target = CastTarget::from(lo);
164    if size > offset {
165        if let Some(hi) = reg_component(cls, &mut i, size - offset) {
166            target = CastTarget::pair(lo, hi);
167        }
168    }
169    assert_eq!(reg_component(cls, &mut i, Size::ZERO), None);
170    target
171}
172
173const MAX_INT_REGS: usize = 6; // RDI, RSI, RDX, RCX, R8, R9
174const MAX_SSE_REGS: usize = 8; // XMM0-7
175
176pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
177where
178    Ty: TyAbiInterface<'a, C> + Copy,
179    C: HasDataLayout + HasTargetSpec,
180{
181    let mut int_regs = MAX_INT_REGS;
182    let mut sse_regs = MAX_SSE_REGS;
183
184    let mut x86_64_arg_or_ret = |arg: &mut ArgAbi<'a, Ty>, is_arg: bool| {
185        if !arg.layout.is_sized() {
186            // Not touching this...
187            return;
188        }
189        let mut cls_or_mem = classify_arg(cx, arg);
190
191        if is_arg {
192            if let Ok(cls) = cls_or_mem {
193                let mut needed_int = 0;
194                let mut needed_sse = 0;
195                for c in cls {
196                    match c {
197                        Some(Class::Int) => needed_int += 1,
198                        Some(Class::Sse) => needed_sse += 1,
199                        _ => {}
200                    }
201                }
202                match (int_regs.checked_sub(needed_int), sse_regs.checked_sub(needed_sse)) {
203                    (Some(left_int), Some(left_sse)) => {
204                        int_regs = left_int;
205                        sse_regs = left_sse;
206                    }
207                    _ => {
208                        // Not enough registers for this argument, so it will be
209                        // passed on the stack, but we only mark aggregates
210                        // explicitly as indirect `byval` arguments, as LLVM will
211                        // automatically put immediates on the stack itself.
212                        if arg.layout.is_aggregate() {
213                            cls_or_mem = Err(Memory);
214                        }
215                    }
216                }
217            }
218        }
219
220        match cls_or_mem {
221            Err(Memory) => {
222                if is_arg {
223                    // The x86_64 ABI doesn't have any special requirements for `byval` alignment,
224                    // the type's alignment is always used.
225                    arg.pass_by_stack_offset(None);
226                } else {
227                    // `sret` parameter thus one less integer register available
228                    arg.make_indirect();
229                    // NOTE(eddyb) return is handled first, so no registers
230                    // should've been used yet.
231                    assert_eq!(int_regs, MAX_INT_REGS);
232                    int_regs -= 1;
233                }
234            }
235            Ok(ref cls) => {
236                // split into sized chunks passed individually
237                if arg.layout.is_aggregate() {
238                    let size = arg.layout.size;
239                    arg.cast_to(cast_target(cls, size));
240                } else if is_arg || cx.target_spec().is_like_darwin {
241                    arg.extend_integer_width_to(32);
242                }
243            }
244        }
245    };
246
247    if !fn_abi.ret.is_ignore() {
248        x86_64_arg_or_ret(&mut fn_abi.ret, false);
249    }
250
251    for arg in fn_abi.args.iter_mut() {
252        if arg.is_ignore() {
253            continue;
254        }
255        x86_64_arg_or_ret(arg, true);
256    }
257}