1use std::assert_matches::assert_matches;
2
3use rustc_abi::{BackendRepr, Float, Integer, Primitive, Scalar};
4use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
5use rustc_codegen_ssa::mir::operand::OperandValue;
6use rustc_codegen_ssa::traits::*;
7use rustc_data_structures::fx::FxHashMap;
8use rustc_middle::ty::Instance;
9use rustc_middle::ty::layout::TyAndLayout;
10use rustc_middle::{bug, span_bug};
11use rustc_span::{Pos, Span, Symbol, sym};
12use rustc_target::asm::*;
13use smallvec::SmallVec;
14use tracing::debug;
15
16use crate::builder::Builder;
17use crate::common::{AsCCharPtr, Funclet};
18use crate::context::CodegenCx;
19use crate::type_::Type;
20use crate::type_of::LayoutLlvmExt;
21use crate::value::Value;
22use crate::{attributes, llvm};
23
24impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
25 fn codegen_inline_asm(
26 &mut self,
27 template: &[InlineAsmTemplatePiece],
28 operands: &[InlineAsmOperandRef<'tcx, Self>],
29 options: InlineAsmOptions,
30 line_spans: &[Span],
31 instance: Instance<'_>,
32 dest: Option<Self::BasicBlock>,
33 catch_funclet: Option<(Self::BasicBlock, Option<&Self::Funclet>)>,
34 ) {
35 let asm_arch = self.tcx.sess.asm_arch.unwrap();
36
37 let mut constraints = vec![];
39 let mut clobbers = vec![];
40 let mut output_types = vec![];
41 let mut op_idx = FxHashMap::default();
42 let mut clobbered_x87 = false;
43 for (idx, op) in operands.iter().enumerate() {
44 match *op {
45 InlineAsmOperandRef::Out { reg, late, place } => {
46 let is_target_supported = |reg_class: InlineAsmRegClass| {
47 for &(_, feature) in reg_class.supported_types(asm_arch, true) {
48 if let Some(feature) = feature {
49 if self
50 .tcx
51 .asm_target_features(instance.def_id())
52 .contains(&feature)
53 {
54 return true;
55 }
56 } else {
57 return true;
59 }
60 }
61 false
62 };
63
64 let mut layout = None;
65 let ty = if let Some(ref place) = place {
66 layout = Some(&place.layout);
67 llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout, instance)
68 } else if matches!(
69 reg.reg_class(),
70 InlineAsmRegClass::X86(
71 X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::x87_reg
72 )
73 ) {
74 if !clobbered_x87 {
79 clobbered_x87 = true;
80 clobbers.push("~{st}".to_string());
81 for i in 1..=7 {
82 clobbers.push(format!("~{{st({})}}", i));
83 }
84 }
85 continue;
86 } else if !is_target_supported(reg.reg_class())
87 || reg.reg_class().is_clobber_only(asm_arch, true)
88 {
89 assert_matches!(reg, InlineAsmRegOrRegClass::Reg(_));
94 clobbers.push(format!("~{}", reg_to_llvm(reg, None)));
95 continue;
96 } else {
97 dummy_output_type(self.cx, reg.reg_class())
101 };
102 output_types.push(ty);
103 op_idx.insert(idx, constraints.len());
104 let prefix = if late { "=" } else { "=&" };
105 constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, layout)));
106 }
107 InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
108 let layout = if let Some(ref out_place) = out_place {
109 &out_place.layout
110 } else {
111 &in_value.layout
114 };
115 let ty = llvm_fixup_output_type(self.cx, reg.reg_class(), layout, instance);
116 output_types.push(ty);
117 op_idx.insert(idx, constraints.len());
118 let prefix = if late { "=" } else { "=&" };
119 constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, Some(layout))));
120 }
121 _ => {}
122 }
123 }
124
125 let mut inputs = vec![];
127 for (idx, op) in operands.iter().enumerate() {
128 match *op {
129 InlineAsmOperandRef::In { reg, value } => {
130 let llval = llvm_fixup_input(
131 self,
132 value.immediate(),
133 reg.reg_class(),
134 &value.layout,
135 instance,
136 );
137 inputs.push(llval);
138 op_idx.insert(idx, constraints.len());
139 constraints.push(reg_to_llvm(reg, Some(&value.layout)));
140 }
141 InlineAsmOperandRef::InOut { reg, late, in_value, out_place: _ } => {
142 let value = llvm_fixup_input(
143 self,
144 in_value.immediate(),
145 reg.reg_class(),
146 &in_value.layout,
147 instance,
148 );
149 inputs.push(value);
150
151 if late && matches!(reg, InlineAsmRegOrRegClass::Reg(_)) {
156 constraints.push(reg_to_llvm(reg, Some(&in_value.layout)));
157 } else {
158 constraints.push(format!("{}", op_idx[&idx]));
159 }
160 }
161 InlineAsmOperandRef::SymFn { instance } => {
162 inputs.push(self.cx.get_fn(instance));
163 op_idx.insert(idx, constraints.len());
164 constraints.push("s".to_string());
165 }
166 InlineAsmOperandRef::SymStatic { def_id } => {
167 inputs.push(self.cx.get_static(def_id));
168 op_idx.insert(idx, constraints.len());
169 constraints.push("s".to_string());
170 }
171 _ => {}
172 }
173 }
174
175 let mut labels = vec![];
177 let mut template_str = String::new();
178 for piece in template {
179 match *piece {
180 InlineAsmTemplatePiece::String(ref s) => {
181 if s.contains('$') {
182 for c in s.chars() {
183 if c == '$' {
184 template_str.push_str("$$");
185 } else {
186 template_str.push(c);
187 }
188 }
189 } else {
190 template_str.push_str(s)
191 }
192 }
193 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
194 match operands[operand_idx] {
195 InlineAsmOperandRef::In { reg, .. }
196 | InlineAsmOperandRef::Out { reg, .. }
197 | InlineAsmOperandRef::InOut { reg, .. } => {
198 let modifier = modifier_to_llvm(asm_arch, reg.reg_class(), modifier);
199 if let Some(modifier) = modifier {
200 template_str.push_str(&format!(
201 "${{{}:{}}}",
202 op_idx[&operand_idx], modifier
203 ));
204 } else {
205 template_str.push_str(&format!("${{{}}}", op_idx[&operand_idx]));
206 }
207 }
208 InlineAsmOperandRef::Const { ref string } => {
209 template_str.push_str(string);
211 }
212 InlineAsmOperandRef::SymFn { .. }
213 | InlineAsmOperandRef::SymStatic { .. } => {
214 template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx]));
216 }
217 InlineAsmOperandRef::Label { label } => {
218 template_str.push_str(&format!("${{{}:l}}", constraints.len()));
219 constraints.push("!i".to_owned());
220 labels.push(label);
221 }
222 }
223 }
224 }
225 }
226
227 constraints.append(&mut clobbers);
228 if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
229 match asm_arch {
230 InlineAsmArch::AArch64 | InlineAsmArch::Arm64EC | InlineAsmArch::Arm => {
231 constraints.push("~{cc}".to_string());
232 }
233 InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
234 constraints.extend_from_slice(&[
235 "~{dirflag}".to_string(),
236 "~{fpsr}".to_string(),
237 "~{flags}".to_string(),
238 ]);
239 }
240 InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
241 constraints.extend_from_slice(&[
242 "~{vtype}".to_string(),
243 "~{vl}".to_string(),
244 "~{vxsat}".to_string(),
245 "~{vxrm}".to_string(),
246 ]);
247 }
248 InlineAsmArch::Avr => {
249 constraints.push("~{sreg}".to_string());
250 }
251 InlineAsmArch::Nvptx64 => {}
252 InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {}
253 InlineAsmArch::Hexagon => {}
254 InlineAsmArch::LoongArch64 => {
255 constraints.extend_from_slice(&[
256 "~{$fcc0}".to_string(),
257 "~{$fcc1}".to_string(),
258 "~{$fcc2}".to_string(),
259 "~{$fcc3}".to_string(),
260 "~{$fcc4}".to_string(),
261 "~{$fcc5}".to_string(),
262 "~{$fcc6}".to_string(),
263 "~{$fcc7}".to_string(),
264 ]);
265 }
266 InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
267 InlineAsmArch::S390x => {
268 constraints.push("~{cc}".to_string());
269 }
270 InlineAsmArch::Sparc | InlineAsmArch::Sparc64 => {
271 constraints.push("~{icc}".to_string());
274 constraints.push("~{fcc0}".to_string());
275 constraints.push("~{fcc1}".to_string());
276 constraints.push("~{fcc2}".to_string());
277 constraints.push("~{fcc3}".to_string());
278 }
279 InlineAsmArch::SpirV => {}
280 InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {}
281 InlineAsmArch::Bpf => {}
282 InlineAsmArch::Msp430 => {
283 constraints.push("~{sr}".to_string());
284 }
285 InlineAsmArch::M68k => {
286 constraints.push("~{ccr}".to_string());
287 }
288 InlineAsmArch::CSKY => {
289 constraints.push("~{psr}".to_string());
290 }
291 }
292 }
293 if !options.contains(InlineAsmOptions::NOMEM) {
294 constraints.push("~{memory}".to_string());
298 }
299 let volatile = !options.contains(InlineAsmOptions::PURE);
300 let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
301 let output_type = match &output_types[..] {
302 [] => self.type_void(),
303 [ty] => ty,
304 tys => self.type_struct(tys, false),
305 };
306 let dialect = match asm_arch {
307 InlineAsmArch::X86 | InlineAsmArch::X86_64
308 if !options.contains(InlineAsmOptions::ATT_SYNTAX) =>
309 {
310 llvm::AsmDialect::Intel
311 }
312 _ => llvm::AsmDialect::Att,
313 };
314 let result = inline_asm_call(
315 self,
316 &template_str,
317 &constraints.join(","),
318 &inputs,
319 output_type,
320 &labels,
321 volatile,
322 alignstack,
323 dialect,
324 line_spans,
325 options.contains(InlineAsmOptions::MAY_UNWIND),
326 dest,
327 catch_funclet,
328 )
329 .unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed"));
330
331 let mut attrs = SmallVec::<[_; 2]>::new();
332 if options.contains(InlineAsmOptions::PURE) {
333 if options.contains(InlineAsmOptions::NOMEM) {
334 attrs.push(llvm::MemoryEffects::None.create_attr(self.cx.llcx));
335 } else if options.contains(InlineAsmOptions::READONLY) {
336 attrs.push(llvm::MemoryEffects::ReadOnly.create_attr(self.cx.llcx));
337 }
338 attrs.push(llvm::AttributeKind::WillReturn.create_attr(self.cx.llcx));
339 } else if options.contains(InlineAsmOptions::NOMEM) {
340 attrs.push(llvm::MemoryEffects::InaccessibleMemOnly.create_attr(self.cx.llcx));
341 } else {
342 }
344 attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs });
345
346 for block in (if options.contains(InlineAsmOptions::NORETURN) { None } else { Some(dest) })
352 .into_iter()
353 .chain(labels.iter().copied().map(Some))
354 {
355 if let Some(block) = block {
356 self.switch_to_block(block);
357 }
358
359 for (idx, op) in operands.iter().enumerate() {
360 if let InlineAsmOperandRef::Out { reg, place: Some(place), .. }
361 | InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op
362 {
363 let value = if output_types.len() == 1 {
364 result
365 } else {
366 self.extract_value(result, op_idx[&idx] as u64)
367 };
368 let value =
369 llvm_fixup_output(self, value, reg.reg_class(), &place.layout, instance);
370 OperandValue::Immediate(value).store(self, place);
371 }
372 }
373 }
374 }
375}
376
377impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
378 fn codegen_global_asm(
379 &self,
380 template: &[InlineAsmTemplatePiece],
381 operands: &[GlobalAsmOperandRef<'tcx>],
382 options: InlineAsmOptions,
383 _line_spans: &[Span],
384 ) {
385 let asm_arch = self.tcx.sess.asm_arch.unwrap();
386
387 let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
389 && !options.contains(InlineAsmOptions::ATT_SYNTAX);
390
391 let mut template_str = String::new();
393 if intel_syntax {
394 template_str.push_str(".intel_syntax\n");
395 }
396 for piece in template {
397 match *piece {
398 InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s),
399 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
400 match operands[operand_idx] {
401 GlobalAsmOperandRef::Const { ref string } => {
402 template_str.push_str(string);
406 }
407 GlobalAsmOperandRef::SymFn { instance } => {
408 let llval = self.get_fn(instance);
409 self.add_compiler_used_global(llval);
410 let symbol = llvm::build_string(|s| unsafe {
411 llvm::LLVMRustGetMangledName(llval, s);
412 })
413 .expect("symbol is not valid UTF-8");
414 template_str.push_str(&symbol);
415 }
416 GlobalAsmOperandRef::SymStatic { def_id } => {
417 let llval = self
418 .renamed_statics
419 .borrow()
420 .get(&def_id)
421 .copied()
422 .unwrap_or_else(|| self.get_static(def_id));
423 self.add_compiler_used_global(llval);
424 let symbol = llvm::build_string(|s| unsafe {
425 llvm::LLVMRustGetMangledName(llval, s);
426 })
427 .expect("symbol is not valid UTF-8");
428 template_str.push_str(&symbol);
429 }
430 }
431 }
432 }
433 }
434 if intel_syntax {
435 template_str.push_str("\n.att_syntax\n");
436 }
437
438 unsafe {
439 llvm::LLVMAppendModuleInlineAsm(
440 self.llmod,
441 template_str.as_c_char_ptr(),
442 template_str.len(),
443 );
444 }
445 }
446
447 fn mangled_name(&self, instance: Instance<'tcx>) -> String {
448 let llval = self.get_fn(instance);
449 llvm::build_string(|s| unsafe {
450 llvm::LLVMRustGetMangledName(llval, s);
451 })
452 .expect("symbol is not valid UTF-8")
453 }
454}
455
456pub(crate) fn inline_asm_call<'ll>(
457 bx: &mut Builder<'_, 'll, '_>,
458 asm: &str,
459 cons: &str,
460 inputs: &[&'ll Value],
461 output: &'ll llvm::Type,
462 labels: &[&'ll llvm::BasicBlock],
463 volatile: bool,
464 alignstack: bool,
465 dia: llvm::AsmDialect,
466 line_spans: &[Span],
467 unwind: bool,
468 dest: Option<&'ll llvm::BasicBlock>,
469 catch_funclet: Option<(&'ll llvm::BasicBlock, Option<&Funclet<'ll>>)>,
470) -> Option<&'ll Value> {
471 let volatile = if volatile { llvm::True } else { llvm::False };
472 let alignstack = if alignstack { llvm::True } else { llvm::False };
473 let can_throw = if unwind { llvm::True } else { llvm::False };
474
475 let argtys = inputs
476 .iter()
477 .map(|v| {
478 debug!("Asm Input Type: {:?}", *v);
479 bx.cx.val_ty(*v)
480 })
481 .collect::<Vec<_>>();
482
483 debug!("Asm Output Type: {:?}", output);
484 let fty = bx.cx.type_func(&argtys, output);
485 let constraints_ok =
487 unsafe { llvm::LLVMRustInlineAsmVerify(fty, cons.as_c_char_ptr(), cons.len()) };
488 debug!("constraint verification result: {:?}", constraints_ok);
489 if constraints_ok {
490 let v = unsafe {
491 llvm::LLVMRustInlineAsm(
492 fty,
493 asm.as_c_char_ptr(),
494 asm.len(),
495 cons.as_c_char_ptr(),
496 cons.len(),
497 volatile,
498 alignstack,
499 dia,
500 can_throw,
501 )
502 };
503
504 let call = if !labels.is_empty() {
505 assert!(catch_funclet.is_none());
506 bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None, None)
507 } else if let Some((catch, funclet)) = catch_funclet {
508 bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet, None)
509 } else {
510 bx.call(fty, None, None, v, inputs, None, None)
511 };
512
513 let key = "srcloc";
516 let kind = bx.get_md_kind_id(key);
517
518 let mut srcloc = vec![];
522 if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 {
523 srcloc.push(llvm::LLVMValueAsMetadata(bx.const_u64(0)));
531 }
532 srcloc.extend(line_spans.iter().map(|span| {
533 llvm::LLVMValueAsMetadata(
534 bx.const_u64(u64::from(span.lo().to_u32()) | (u64::from(span.hi().to_u32()) << 32)),
535 )
536 }));
537 let md = unsafe { llvm::LLVMMDNodeInContext2(bx.llcx, srcloc.as_ptr(), srcloc.len()) };
538 let md = bx.get_metadata_value(md);
539 llvm::LLVMSetMetadata(call, kind, md);
540
541 Some(call)
542 } else {
543 None
545 }
546}
547
548fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
550 use X86InlineAsmReg::*;
551 match reg {
552 InlineAsmReg::X86(reg) if reg as u32 >= xmm0 as u32 && reg as u32 <= xmm15 as u32 => {
553 Some(reg as u32 - xmm0 as u32)
554 }
555 InlineAsmReg::X86(reg) if reg as u32 >= ymm0 as u32 && reg as u32 <= ymm15 as u32 => {
556 Some(reg as u32 - ymm0 as u32)
557 }
558 InlineAsmReg::X86(reg) if reg as u32 >= zmm0 as u32 && reg as u32 <= zmm31 as u32 => {
559 Some(reg as u32 - zmm0 as u32)
560 }
561 _ => None,
562 }
563}
564
565fn a64_reg_index(reg: InlineAsmReg) -> Option<u32> {
567 match reg {
568 InlineAsmReg::AArch64(r) => r.reg_index(),
569 _ => None,
570 }
571}
572
573fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
575 match reg {
576 InlineAsmReg::AArch64(reg) => reg.vreg_index(),
577 _ => None,
578 }
579}
580
581fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> String {
583 use InlineAsmRegClass::*;
584 match reg {
585 InlineAsmRegOrRegClass::Reg(reg) => {
587 if let Some(idx) = xmm_reg_index(reg) {
588 let class = if let Some(layout) = layout {
589 match layout.size.bytes() {
590 64 => 'z',
591 32 => 'y',
592 _ => 'x',
593 }
594 } else {
595 'x'
597 };
598 format!("{{{}mm{}}}", class, idx)
599 } else if let Some(idx) = a64_reg_index(reg) {
600 let class = if let Some(layout) = layout {
601 match layout.size.bytes() {
602 8 => 'x',
603 _ => 'w',
604 }
605 } else {
606 'w'
608 };
609 if class == 'x' && reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) {
610 "{lr}".to_string()
612 } else {
613 format!("{{{}{}}}", class, idx)
614 }
615 } else if let Some(idx) = a64_vreg_index(reg) {
616 let class = if let Some(layout) = layout {
617 match layout.size.bytes() {
618 16 => 'q',
619 8 => 'd',
620 4 => 's',
621 2 => 'h',
622 1 => 'd', _ => unreachable!(),
624 }
625 } else {
626 'q'
628 };
629 format!("{{{}{}}}", class, idx)
630 } else if reg == InlineAsmReg::Arm(ArmInlineAsmReg::r14) {
631 "{lr}".to_string()
633 } else {
634 format!("{{{}}}", reg.name())
635 }
636 }
637 InlineAsmRegOrRegClass::RegClass(reg) => match reg {
640 AArch64(AArch64InlineAsmRegClass::reg) => "r",
641 AArch64(AArch64InlineAsmRegClass::vreg) => "w",
642 AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
643 AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"),
644 Arm(ArmInlineAsmRegClass::reg) => "r",
645 Arm(ArmInlineAsmRegClass::sreg)
646 | Arm(ArmInlineAsmRegClass::dreg_low16)
647 | Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
648 Arm(ArmInlineAsmRegClass::sreg_low16)
649 | Arm(ArmInlineAsmRegClass::dreg_low8)
650 | Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
651 Arm(ArmInlineAsmRegClass::dreg) | Arm(ArmInlineAsmRegClass::qreg) => "w",
652 Hexagon(HexagonInlineAsmRegClass::reg) => "r",
653 Hexagon(HexagonInlineAsmRegClass::preg) => unreachable!("clobber-only"),
654 LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
655 LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
656 Mips(MipsInlineAsmRegClass::reg) => "r",
657 Mips(MipsInlineAsmRegClass::freg) => "f",
658 Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
659 Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
660 Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
661 PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
662 PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
663 PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
664 PowerPC(PowerPCInlineAsmRegClass::vreg) => "v",
665 PowerPC(PowerPCInlineAsmRegClass::cr) | PowerPC(PowerPCInlineAsmRegClass::xer) => {
666 unreachable!("clobber-only")
667 }
668 RiscV(RiscVInlineAsmRegClass::reg) => "r",
669 RiscV(RiscVInlineAsmRegClass::freg) => "f",
670 RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"),
671 X86(X86InlineAsmRegClass::reg) => "r",
672 X86(X86InlineAsmRegClass::reg_abcd) => "Q",
673 X86(X86InlineAsmRegClass::reg_byte) => "q",
674 X86(X86InlineAsmRegClass::xmm_reg) | X86(X86InlineAsmRegClass::ymm_reg) => "x",
675 X86(X86InlineAsmRegClass::zmm_reg) => "v",
676 X86(X86InlineAsmRegClass::kreg) => "^Yk",
677 X86(
678 X86InlineAsmRegClass::x87_reg
679 | X86InlineAsmRegClass::mmx_reg
680 | X86InlineAsmRegClass::kreg0
681 | X86InlineAsmRegClass::tmm_reg,
682 ) => unreachable!("clobber-only"),
683 Wasm(WasmInlineAsmRegClass::local) => "r",
684 Bpf(BpfInlineAsmRegClass::reg) => "r",
685 Bpf(BpfInlineAsmRegClass::wreg) => "w",
686 Avr(AvrInlineAsmRegClass::reg) => "r",
687 Avr(AvrInlineAsmRegClass::reg_upper) => "d",
688 Avr(AvrInlineAsmRegClass::reg_pair) => "r",
689 Avr(AvrInlineAsmRegClass::reg_iw) => "w",
690 Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
691 S390x(S390xInlineAsmRegClass::reg) => "r",
692 S390x(S390xInlineAsmRegClass::reg_addr) => "a",
693 S390x(S390xInlineAsmRegClass::freg) => "f",
694 S390x(S390xInlineAsmRegClass::vreg) => "v",
695 S390x(S390xInlineAsmRegClass::areg) => {
696 unreachable!("clobber-only")
697 }
698 Sparc(SparcInlineAsmRegClass::reg) => "r",
699 Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"),
700 Msp430(Msp430InlineAsmRegClass::reg) => "r",
701 M68k(M68kInlineAsmRegClass::reg) => "r",
702 M68k(M68kInlineAsmRegClass::reg_addr) => "a",
703 M68k(M68kInlineAsmRegClass::reg_data) => "d",
704 CSKY(CSKYInlineAsmRegClass::reg) => "r",
705 CSKY(CSKYInlineAsmRegClass::freg) => "f",
706 SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"),
707 Err => unreachable!(),
708 }
709 .to_string(),
710 }
711}
712
713fn modifier_to_llvm(
715 arch: InlineAsmArch,
716 reg: InlineAsmRegClass,
717 modifier: Option<char>,
718) -> Option<char> {
719 use InlineAsmRegClass::*;
720 match reg {
723 AArch64(AArch64InlineAsmRegClass::reg) => modifier,
724 AArch64(AArch64InlineAsmRegClass::vreg) | AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
725 if modifier == Some('v') {
726 None
727 } else {
728 modifier
729 }
730 }
731 AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"),
732 Arm(ArmInlineAsmRegClass::reg) => None,
733 Arm(ArmInlineAsmRegClass::sreg) | Arm(ArmInlineAsmRegClass::sreg_low16) => None,
734 Arm(ArmInlineAsmRegClass::dreg)
735 | Arm(ArmInlineAsmRegClass::dreg_low16)
736 | Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
737 Arm(ArmInlineAsmRegClass::qreg)
738 | Arm(ArmInlineAsmRegClass::qreg_low8)
739 | Arm(ArmInlineAsmRegClass::qreg_low4) => {
740 if modifier.is_none() {
741 Some('q')
742 } else {
743 modifier
744 }
745 }
746 Hexagon(_) => None,
747 LoongArch(_) => None,
748 Mips(_) => None,
749 Nvptx(_) => None,
750 PowerPC(_) => None,
751 RiscV(RiscVInlineAsmRegClass::reg) | RiscV(RiscVInlineAsmRegClass::freg) => None,
752 RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"),
753 X86(X86InlineAsmRegClass::reg) | X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
754 None if arch == InlineAsmArch::X86_64 => Some('q'),
755 None => Some('k'),
756 Some('l') => Some('b'),
757 Some('h') => Some('h'),
758 Some('x') => Some('w'),
759 Some('e') => Some('k'),
760 Some('r') => Some('q'),
761 _ => unreachable!(),
762 },
763 X86(X86InlineAsmRegClass::reg_byte) => None,
764 X86(reg @ X86InlineAsmRegClass::xmm_reg)
765 | X86(reg @ X86InlineAsmRegClass::ymm_reg)
766 | X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
767 (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
768 (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
769 (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
770 (_, Some('x')) => Some('x'),
771 (_, Some('y')) => Some('t'),
772 (_, Some('z')) => Some('g'),
773 _ => unreachable!(),
774 },
775 X86(X86InlineAsmRegClass::kreg) => None,
776 X86(
777 X86InlineAsmRegClass::x87_reg
778 | X86InlineAsmRegClass::mmx_reg
779 | X86InlineAsmRegClass::kreg0
780 | X86InlineAsmRegClass::tmm_reg,
781 ) => unreachable!("clobber-only"),
782 Wasm(WasmInlineAsmRegClass::local) => None,
783 Bpf(_) => None,
784 Avr(AvrInlineAsmRegClass::reg_pair)
785 | Avr(AvrInlineAsmRegClass::reg_iw)
786 | Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
787 Some('h') => Some('B'),
788 Some('l') => Some('A'),
789 _ => None,
790 },
791 Avr(_) => None,
792 S390x(_) => None,
793 Sparc(_) => None,
794 Msp430(_) => None,
795 SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"),
796 M68k(_) => None,
797 CSKY(_) => None,
798 Err => unreachable!(),
799 }
800}
801
802fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'ll Type {
805 use InlineAsmRegClass::*;
806 match reg {
807 AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
808 AArch64(AArch64InlineAsmRegClass::vreg) | AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
809 cx.type_vector(cx.type_i64(), 2)
810 }
811 AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"),
812 Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
813 Arm(ArmInlineAsmRegClass::sreg) | Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
814 Arm(ArmInlineAsmRegClass::dreg)
815 | Arm(ArmInlineAsmRegClass::dreg_low16)
816 | Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
817 Arm(ArmInlineAsmRegClass::qreg)
818 | Arm(ArmInlineAsmRegClass::qreg_low8)
819 | Arm(ArmInlineAsmRegClass::qreg_low4) => cx.type_vector(cx.type_i64(), 2),
820 Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
821 Hexagon(HexagonInlineAsmRegClass::preg) => unreachable!("clobber-only"),
822 LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(),
823 LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(),
824 Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
825 Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
826 Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
827 Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
828 Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
829 PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
830 PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
831 PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
832 PowerPC(PowerPCInlineAsmRegClass::vreg) => cx.type_vector(cx.type_i32(), 4),
833 PowerPC(PowerPCInlineAsmRegClass::cr) | PowerPC(PowerPCInlineAsmRegClass::xer) => {
834 unreachable!("clobber-only")
835 }
836 RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
837 RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
838 RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"),
839 X86(X86InlineAsmRegClass::reg) | X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
840 X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
841 X86(X86InlineAsmRegClass::xmm_reg)
842 | X86(X86InlineAsmRegClass::ymm_reg)
843 | X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
844 X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
845 X86(
846 X86InlineAsmRegClass::x87_reg
847 | X86InlineAsmRegClass::mmx_reg
848 | X86InlineAsmRegClass::kreg0
849 | X86InlineAsmRegClass::tmm_reg,
850 ) => unreachable!("clobber-only"),
851 Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
852 Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
853 Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
854 Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
855 Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
856 Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
857 Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
858 Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
859 S390x(S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr) => cx.type_i32(),
860 S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
861 S390x(S390xInlineAsmRegClass::vreg) => cx.type_vector(cx.type_i64(), 2),
862 S390x(S390xInlineAsmRegClass::areg) => {
863 unreachable!("clobber-only")
864 }
865 Sparc(SparcInlineAsmRegClass::reg) => cx.type_i32(),
866 Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"),
867 Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
868 M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
869 M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
870 M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
871 CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
872 CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
873 SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"),
874 Err => unreachable!(),
875 }
876}
877
878fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
881 let dl = &cx.tcx.data_layout;
882 match scalar.primitive() {
883 Primitive::Int(Integer::I8, _) => cx.type_i8(),
884 Primitive::Int(Integer::I16, _) => cx.type_i16(),
885 Primitive::Int(Integer::I32, _) => cx.type_i32(),
886 Primitive::Int(Integer::I64, _) => cx.type_i64(),
887 Primitive::Float(Float::F16) => cx.type_f16(),
888 Primitive::Float(Float::F32) => cx.type_f32(),
889 Primitive::Float(Float::F64) => cx.type_f64(),
890 Primitive::Float(Float::F128) => cx.type_f128(),
891 Primitive::Pointer(_) => cx.type_from_integer(dl.ptr_sized_integer()),
893 _ => unreachable!(),
894 }
895}
896
897fn any_target_feature_enabled(
898 cx: &CodegenCx<'_, '_>,
899 instance: Instance<'_>,
900 features: &[Symbol],
901) -> bool {
902 let enabled = cx.tcx.asm_target_features(instance.def_id());
903 features.iter().any(|feat| enabled.contains(feat))
904}
905
906fn llvm_fixup_input<'ll, 'tcx>(
908 bx: &mut Builder<'_, 'll, 'tcx>,
909 mut value: &'ll Value,
910 reg: InlineAsmRegClass,
911 layout: &TyAndLayout<'tcx>,
912 instance: Instance<'_>,
913) -> &'ll Value {
914 use InlineAsmRegClass::*;
915 let dl = &bx.tcx.data_layout;
916 match (reg, layout.backend_repr) {
917 (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
918 if let Primitive::Int(Integer::I8, _) = s.primitive() {
919 let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
920 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
921 } else {
922 value
923 }
924 }
925 (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
926 if s.primitive() != Primitive::Float(Float::F128) =>
927 {
928 let elem_ty = llvm_asm_scalar_type(bx.cx, s);
929 let count = 16 / layout.size.bytes();
930 let vec_ty = bx.cx.type_vector(elem_ty, count);
931 if let Primitive::Pointer(_) = s.primitive() {
933 let t = bx.type_from_integer(dl.ptr_sized_integer());
934 value = bx.ptrtoint(value, t);
935 }
936 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
937 }
938 (
939 AArch64(AArch64InlineAsmRegClass::vreg_low16),
940 BackendRepr::SimdVector { element, count },
941 ) if layout.size.bytes() == 8 => {
942 let elem_ty = llvm_asm_scalar_type(bx.cx, element);
943 let vec_ty = bx.cx.type_vector(elem_ty, count);
944 let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
945 bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
946 }
947 (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
948 if s.primitive() == Primitive::Float(Float::F64) =>
949 {
950 bx.bitcast(value, bx.cx.type_i64())
951 }
952 (
953 X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
954 BackendRepr::SimdVector { .. },
955 ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
956 (
957 X86(
958 X86InlineAsmRegClass::xmm_reg
959 | X86InlineAsmRegClass::ymm_reg
960 | X86InlineAsmRegClass::zmm_reg,
961 ),
962 BackendRepr::Scalar(s),
963 ) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
964 && s.primitive() == Primitive::Float(Float::F128) =>
965 {
966 bx.bitcast(value, bx.type_vector(bx.type_i32(), 4))
967 }
968 (
969 X86(
970 X86InlineAsmRegClass::xmm_reg
971 | X86InlineAsmRegClass::ymm_reg
972 | X86InlineAsmRegClass::zmm_reg,
973 ),
974 BackendRepr::Scalar(s),
975 ) if s.primitive() == Primitive::Float(Float::F16) => {
976 let value = bx.insert_element(
977 bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
978 value,
979 bx.const_usize(0),
980 );
981 bx.bitcast(value, bx.type_vector(bx.type_i16(), 8))
982 }
983 (
984 X86(
985 X86InlineAsmRegClass::xmm_reg
986 | X86InlineAsmRegClass::ymm_reg
987 | X86InlineAsmRegClass::zmm_reg,
988 ),
989 BackendRepr::SimdVector { element, count: count @ (8 | 16) },
990 ) if element.primitive() == Primitive::Float(Float::F16) => {
991 bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
992 }
993 (
994 Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
995 BackendRepr::Scalar(s),
996 ) => {
997 if let Primitive::Int(Integer::I32, _) = s.primitive() {
998 bx.bitcast(value, bx.cx.type_f32())
999 } else {
1000 value
1001 }
1002 }
1003 (
1004 Arm(
1005 ArmInlineAsmRegClass::dreg
1006 | ArmInlineAsmRegClass::dreg_low8
1007 | ArmInlineAsmRegClass::dreg_low16,
1008 ),
1009 BackendRepr::Scalar(s),
1010 ) => {
1011 if let Primitive::Int(Integer::I64, _) = s.primitive() {
1012 bx.bitcast(value, bx.cx.type_f64())
1013 } else {
1014 value
1015 }
1016 }
1017 (
1018 Arm(
1019 ArmInlineAsmRegClass::dreg
1020 | ArmInlineAsmRegClass::dreg_low8
1021 | ArmInlineAsmRegClass::dreg_low16
1022 | ArmInlineAsmRegClass::qreg
1023 | ArmInlineAsmRegClass::qreg_low4
1024 | ArmInlineAsmRegClass::qreg_low8,
1025 ),
1026 BackendRepr::SimdVector { element, count: count @ (4 | 8) },
1027 ) if element.primitive() == Primitive::Float(Float::F16) => {
1028 bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
1029 }
1030 (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
1031 match s.primitive() {
1032 Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
1034 Primitive::Float(Float::F32) => bx.bitcast(value, bx.cx.type_i32()),
1035 Primitive::Float(Float::F64) => bx.bitcast(value, bx.cx.type_i64()),
1036 _ => value,
1037 }
1038 }
1039 (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
1040 if s.primitive() == Primitive::Float(Float::F16)
1041 && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
1042 {
1043 let value = bx.bitcast(value, bx.type_i16());
1045 let value = bx.zext(value, bx.type_i32());
1046 let value = bx.or(value, bx.const_u32(0xFFFF_0000));
1047 bx.bitcast(value, bx.type_f32())
1048 }
1049 (PowerPC(PowerPCInlineAsmRegClass::vreg), BackendRepr::Scalar(s))
1050 if s.primitive() == Primitive::Float(Float::F32) =>
1051 {
1052 let value = bx.insert_element(
1053 bx.const_undef(bx.type_vector(bx.type_f32(), 4)),
1054 value,
1055 bx.const_usize(0),
1056 );
1057 bx.bitcast(value, bx.type_vector(bx.type_f32(), 4))
1058 }
1059 (PowerPC(PowerPCInlineAsmRegClass::vreg), BackendRepr::Scalar(s))
1060 if s.primitive() == Primitive::Float(Float::F64) =>
1061 {
1062 let value = bx.insert_element(
1063 bx.const_undef(bx.type_vector(bx.type_f64(), 2)),
1064 value,
1065 bx.const_usize(0),
1066 );
1067 bx.bitcast(value, bx.type_vector(bx.type_f64(), 2))
1068 }
1069 _ => value,
1070 }
1071}
1072
1073fn llvm_fixup_output<'ll, 'tcx>(
1075 bx: &mut Builder<'_, 'll, 'tcx>,
1076 mut value: &'ll Value,
1077 reg: InlineAsmRegClass,
1078 layout: &TyAndLayout<'tcx>,
1079 instance: Instance<'_>,
1080) -> &'ll Value {
1081 use InlineAsmRegClass::*;
1082 match (reg, layout.backend_repr) {
1083 (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
1084 if let Primitive::Int(Integer::I8, _) = s.primitive() {
1085 bx.extract_element(value, bx.const_i32(0))
1086 } else {
1087 value
1088 }
1089 }
1090 (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
1091 if s.primitive() != Primitive::Float(Float::F128) =>
1092 {
1093 value = bx.extract_element(value, bx.const_i32(0));
1094 if let Primitive::Pointer(_) = s.primitive() {
1095 value = bx.inttoptr(value, layout.llvm_type(bx.cx));
1096 }
1097 value
1098 }
1099 (
1100 AArch64(AArch64InlineAsmRegClass::vreg_low16),
1101 BackendRepr::SimdVector { element, count },
1102 ) if layout.size.bytes() == 8 => {
1103 let elem_ty = llvm_asm_scalar_type(bx.cx, element);
1104 let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
1105 let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
1106 bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
1107 }
1108 (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
1109 if s.primitive() == Primitive::Float(Float::F64) =>
1110 {
1111 bx.bitcast(value, bx.cx.type_f64())
1112 }
1113 (
1114 X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
1115 BackendRepr::SimdVector { .. },
1116 ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
1117 (
1118 X86(
1119 X86InlineAsmRegClass::xmm_reg
1120 | X86InlineAsmRegClass::ymm_reg
1121 | X86InlineAsmRegClass::zmm_reg,
1122 ),
1123 BackendRepr::Scalar(s),
1124 ) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
1125 && s.primitive() == Primitive::Float(Float::F128) =>
1126 {
1127 bx.bitcast(value, bx.type_f128())
1128 }
1129 (
1130 X86(
1131 X86InlineAsmRegClass::xmm_reg
1132 | X86InlineAsmRegClass::ymm_reg
1133 | X86InlineAsmRegClass::zmm_reg,
1134 ),
1135 BackendRepr::Scalar(s),
1136 ) if s.primitive() == Primitive::Float(Float::F16) => {
1137 let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
1138 bx.extract_element(value, bx.const_usize(0))
1139 }
1140 (
1141 X86(
1142 X86InlineAsmRegClass::xmm_reg
1143 | X86InlineAsmRegClass::ymm_reg
1144 | X86InlineAsmRegClass::zmm_reg,
1145 ),
1146 BackendRepr::SimdVector { element, count: count @ (8 | 16) },
1147 ) if element.primitive() == Primitive::Float(Float::F16) => {
1148 bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
1149 }
1150 (
1151 Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
1152 BackendRepr::Scalar(s),
1153 ) => {
1154 if let Primitive::Int(Integer::I32, _) = s.primitive() {
1155 bx.bitcast(value, bx.cx.type_i32())
1156 } else {
1157 value
1158 }
1159 }
1160 (
1161 Arm(
1162 ArmInlineAsmRegClass::dreg
1163 | ArmInlineAsmRegClass::dreg_low8
1164 | ArmInlineAsmRegClass::dreg_low16,
1165 ),
1166 BackendRepr::Scalar(s),
1167 ) => {
1168 if let Primitive::Int(Integer::I64, _) = s.primitive() {
1169 bx.bitcast(value, bx.cx.type_i64())
1170 } else {
1171 value
1172 }
1173 }
1174 (
1175 Arm(
1176 ArmInlineAsmRegClass::dreg
1177 | ArmInlineAsmRegClass::dreg_low8
1178 | ArmInlineAsmRegClass::dreg_low16
1179 | ArmInlineAsmRegClass::qreg
1180 | ArmInlineAsmRegClass::qreg_low4
1181 | ArmInlineAsmRegClass::qreg_low8,
1182 ),
1183 BackendRepr::SimdVector { element, count: count @ (4 | 8) },
1184 ) if element.primitive() == Primitive::Float(Float::F16) => {
1185 bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
1186 }
1187 (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
1188 match s.primitive() {
1189 Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
1191 Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
1192 Primitive::Float(Float::F32) => bx.bitcast(value, bx.cx.type_f32()),
1193 Primitive::Float(Float::F64) => bx.bitcast(value, bx.cx.type_f64()),
1194 _ => value,
1195 }
1196 }
1197 (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
1198 if s.primitive() == Primitive::Float(Float::F16)
1199 && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
1200 {
1201 let value = bx.bitcast(value, bx.type_i32());
1202 let value = bx.trunc(value, bx.type_i16());
1203 bx.bitcast(value, bx.type_f16())
1204 }
1205 (PowerPC(PowerPCInlineAsmRegClass::vreg), BackendRepr::Scalar(s))
1206 if s.primitive() == Primitive::Float(Float::F32) =>
1207 {
1208 let value = bx.bitcast(value, bx.type_vector(bx.type_f32(), 4));
1209 bx.extract_element(value, bx.const_usize(0))
1210 }
1211 (PowerPC(PowerPCInlineAsmRegClass::vreg), BackendRepr::Scalar(s))
1212 if s.primitive() == Primitive::Float(Float::F64) =>
1213 {
1214 let value = bx.bitcast(value, bx.type_vector(bx.type_f64(), 2));
1215 bx.extract_element(value, bx.const_usize(0))
1216 }
1217 _ => value,
1218 }
1219}
1220
1221fn llvm_fixup_output_type<'ll, 'tcx>(
1223 cx: &CodegenCx<'ll, 'tcx>,
1224 reg: InlineAsmRegClass,
1225 layout: &TyAndLayout<'tcx>,
1226 instance: Instance<'_>,
1227) -> &'ll Type {
1228 use InlineAsmRegClass::*;
1229 match (reg, layout.backend_repr) {
1230 (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
1231 if let Primitive::Int(Integer::I8, _) = s.primitive() {
1232 cx.type_vector(cx.type_i8(), 8)
1233 } else {
1234 layout.llvm_type(cx)
1235 }
1236 }
1237 (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
1238 if s.primitive() != Primitive::Float(Float::F128) =>
1239 {
1240 let elem_ty = llvm_asm_scalar_type(cx, s);
1241 let count = 16 / layout.size.bytes();
1242 cx.type_vector(elem_ty, count)
1243 }
1244 (
1245 AArch64(AArch64InlineAsmRegClass::vreg_low16),
1246 BackendRepr::SimdVector { element, count },
1247 ) if layout.size.bytes() == 8 => {
1248 let elem_ty = llvm_asm_scalar_type(cx, element);
1249 cx.type_vector(elem_ty, count * 2)
1250 }
1251 (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
1252 if s.primitive() == Primitive::Float(Float::F64) =>
1253 {
1254 cx.type_i64()
1255 }
1256 (
1257 X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
1258 BackendRepr::SimdVector { .. },
1259 ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
1260 (
1261 X86(
1262 X86InlineAsmRegClass::xmm_reg
1263 | X86InlineAsmRegClass::ymm_reg
1264 | X86InlineAsmRegClass::zmm_reg,
1265 ),
1266 BackendRepr::Scalar(s),
1267 ) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
1268 && s.primitive() == Primitive::Float(Float::F128) =>
1269 {
1270 cx.type_vector(cx.type_i32(), 4)
1271 }
1272 (
1273 X86(
1274 X86InlineAsmRegClass::xmm_reg
1275 | X86InlineAsmRegClass::ymm_reg
1276 | X86InlineAsmRegClass::zmm_reg,
1277 ),
1278 BackendRepr::Scalar(s),
1279 ) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
1280 (
1281 X86(
1282 X86InlineAsmRegClass::xmm_reg
1283 | X86InlineAsmRegClass::ymm_reg
1284 | X86InlineAsmRegClass::zmm_reg,
1285 ),
1286 BackendRepr::SimdVector { element, count: count @ (8 | 16) },
1287 ) if element.primitive() == Primitive::Float(Float::F16) => {
1288 cx.type_vector(cx.type_i16(), count)
1289 }
1290 (
1291 Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
1292 BackendRepr::Scalar(s),
1293 ) => {
1294 if let Primitive::Int(Integer::I32, _) = s.primitive() {
1295 cx.type_f32()
1296 } else {
1297 layout.llvm_type(cx)
1298 }
1299 }
1300 (
1301 Arm(
1302 ArmInlineAsmRegClass::dreg
1303 | ArmInlineAsmRegClass::dreg_low8
1304 | ArmInlineAsmRegClass::dreg_low16,
1305 ),
1306 BackendRepr::Scalar(s),
1307 ) => {
1308 if let Primitive::Int(Integer::I64, _) = s.primitive() {
1309 cx.type_f64()
1310 } else {
1311 layout.llvm_type(cx)
1312 }
1313 }
1314 (
1315 Arm(
1316 ArmInlineAsmRegClass::dreg
1317 | ArmInlineAsmRegClass::dreg_low8
1318 | ArmInlineAsmRegClass::dreg_low16
1319 | ArmInlineAsmRegClass::qreg
1320 | ArmInlineAsmRegClass::qreg_low4
1321 | ArmInlineAsmRegClass::qreg_low8,
1322 ),
1323 BackendRepr::SimdVector { element, count: count @ (4 | 8) },
1324 ) if element.primitive() == Primitive::Float(Float::F16) => {
1325 cx.type_vector(cx.type_i16(), count)
1326 }
1327 (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
1328 match s.primitive() {
1329 Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
1331 Primitive::Float(Float::F32) => cx.type_i32(),
1332 Primitive::Float(Float::F64) => cx.type_i64(),
1333 _ => layout.llvm_type(cx),
1334 }
1335 }
1336 (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
1337 if s.primitive() == Primitive::Float(Float::F16)
1338 && !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
1339 {
1340 cx.type_f32()
1341 }
1342 (PowerPC(PowerPCInlineAsmRegClass::vreg), BackendRepr::Scalar(s))
1343 if s.primitive() == Primitive::Float(Float::F32) =>
1344 {
1345 cx.type_vector(cx.type_f32(), 4)
1346 }
1347 (PowerPC(PowerPCInlineAsmRegClass::vreg), BackendRepr::Scalar(s))
1348 if s.primitive() == Primitive::Float(Float::F64) =>
1349 {
1350 cx.type_vector(cx.type_f64(), 2)
1351 }
1352 _ => layout.llvm_type(cx),
1353 }
1354}