1use std::assert_matches::assert_matches;
2
3use rustc_abi::{self as abi, FIRST_VARIANT};
4use rustc_middle::ty::adjustment::PointerCoercion;
5use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
6use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
7use rustc_middle::{bug, mir, span_bug};
8use rustc_session::config::OptLevel;
9use rustc_span::{DUMMY_SP, Span};
10use tracing::{debug, instrument};
11
12use super::operand::{OperandRef, OperandValue};
13use super::place::PlaceRef;
14use super::{FunctionCx, LocalRef};
15use crate::common::IntPredicate;
16use crate::traits::*;
17use crate::{MemFlags, base};
18
19impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
20 #[instrument(level = "trace", skip(self, bx))]
21 pub(crate) fn codegen_rvalue(
22 &mut self,
23 bx: &mut Bx,
24 dest: PlaceRef<'tcx, Bx::Value>,
25 rvalue: &mir::Rvalue<'tcx>,
26 ) {
27 match *rvalue {
28 mir::Rvalue::Use(ref operand) => {
29 let cg_operand = self.codegen_operand(bx, operand);
30 cg_operand.val.store(bx, dest);
33 }
34
35 mir::Rvalue::Cast(
36 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
37 ref source,
38 _,
39 ) => {
40 if bx.cx().is_backend_scalar_pair(dest.layout) {
43 let temp = self.codegen_rvalue_operand(bx, rvalue);
46 temp.val.store(bx, dest);
47 return;
48 }
49
50 let operand = self.codegen_operand(bx, source);
55 match operand.val {
56 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
57 debug!("codegen_rvalue: creating ugly alloca");
64 let scratch = PlaceRef::alloca(bx, operand.layout);
65 scratch.storage_live(bx);
66 operand.val.store(bx, scratch);
67 base::coerce_unsized_into(bx, scratch, dest);
68 scratch.storage_dead(bx);
69 }
70 OperandValue::Ref(val) => {
71 if val.llextra.is_some() {
72 bug!("unsized coercion on an unsized rvalue");
73 }
74 base::coerce_unsized_into(bx, val.with_type(operand.layout), dest);
75 }
76 OperandValue::ZeroSized => {
77 bug!("unsized coercion on a ZST rvalue");
78 }
79 }
80 }
81
82 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
83 let src = self.codegen_operand(bx, operand);
84 self.codegen_transmute(bx, src, dest);
85 }
86
87 mir::Rvalue::Repeat(ref elem, count) => {
88 if dest.layout.is_zst() {
90 return;
91 }
92
93 if let mir::Operand::Constant(const_op) = elem {
96 let val = self.eval_mir_constant(const_op);
97 if val.all_bytes_uninit(self.cx.tcx()) {
98 let size = bx.const_usize(dest.layout.size.bytes());
99 bx.memset(
100 dest.val.llval,
101 bx.const_undef(bx.type_i8()),
102 size,
103 dest.val.align,
104 MemFlags::empty(),
105 );
106 return;
107 }
108 }
109
110 let cg_elem = self.codegen_operand(bx, elem);
111
112 let try_init_all_same = |bx: &mut Bx, v| {
113 let start = dest.val.llval;
114 let size = bx.const_usize(dest.layout.size.bytes());
115
116 if let Some(int) = bx.cx().const_to_opt_u128(v, false) {
118 let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()];
119 let first = bytes[0];
120 if bytes[1..].iter().all(|&b| b == first) {
121 let fill = bx.cx().const_u8(first);
122 bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
123 return true;
124 }
125 }
126
127 let v = bx.from_immediate(v);
129 if bx.cx().val_ty(v) == bx.cx().type_i8() {
130 bx.memset(start, v, size, dest.val.align, MemFlags::empty());
131 return true;
132 }
133 false
134 };
135
136 match cg_elem.val {
137 OperandValue::Immediate(v) => {
138 if try_init_all_same(bx, v) {
139 return;
140 }
141 }
142 _ => (),
143 }
144
145 let count = self
146 .monomorphize(count)
147 .try_to_target_usize(bx.tcx())
148 .expect("expected monomorphic const in codegen");
149
150 bx.write_operand_repeatedly(cg_elem, count, dest);
151 }
152
153 mir::Rvalue::Aggregate(ref kind, ref operands)
156 if !matches!(**kind, mir::AggregateKind::RawPtr(..)) =>
157 {
158 let (variant_index, variant_dest, active_field_index) = match **kind {
159 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
160 let variant_dest = dest.project_downcast(bx, variant_index);
161 (variant_index, variant_dest, active_field_index)
162 }
163 _ => (FIRST_VARIANT, dest, None),
164 };
165 if active_field_index.is_some() {
166 assert_eq!(operands.len(), 1);
167 }
168 for (i, operand) in operands.iter_enumerated() {
169 let op = self.codegen_operand(bx, operand);
170 if !op.layout.is_zst() {
172 let field_index = active_field_index.unwrap_or(i);
173 let field = if let mir::AggregateKind::Array(_) = **kind {
174 let llindex = bx.cx().const_usize(field_index.as_u32().into());
175 variant_dest.project_index(bx, llindex)
176 } else {
177 variant_dest.project_field(bx, field_index.as_usize())
178 };
179 op.val.store(bx, field);
180 }
181 }
182 dest.codegen_set_discr(bx, variant_index);
183 }
184
185 _ => {
186 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
187 let temp = self.codegen_rvalue_operand(bx, rvalue);
188 temp.val.store(bx, dest);
189 }
190 }
191 }
192
193 fn codegen_transmute(
194 &mut self,
195 bx: &mut Bx,
196 src: OperandRef<'tcx, Bx::Value>,
197 dst: PlaceRef<'tcx, Bx::Value>,
198 ) {
199 assert!(src.layout.is_sized());
201 assert!(dst.layout.is_sized());
202
203 if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) {
204 val.store(bx, dst);
205 return;
206 }
207
208 match src.val {
209 OperandValue::Ref(..) | OperandValue::ZeroSized => {
210 span_bug!(
211 self.mir.span,
212 "Operand path should have handled transmute \
213 from {src:?} to place {dst:?}"
214 );
215 }
216 OperandValue::Immediate(..) | OperandValue::Pair(..) => {
217 src.val.store(bx, dst.val.with_type(src.layout));
220 }
221 }
222 }
223
224 pub(crate) fn codegen_transmute_operand(
229 &mut self,
230 bx: &mut Bx,
231 operand: OperandRef<'tcx, Bx::Value>,
232 cast: TyAndLayout<'tcx>,
233 ) -> Option<OperandValue<Bx::Value>> {
234 if operand.layout.size != cast.size
236 || operand.layout.is_uninhabited()
237 || cast.is_uninhabited()
238 {
239 if !operand.layout.is_uninhabited() {
240 bx.abort();
243 }
244
245 return Some(OperandValue::poison(bx, cast));
248 }
249
250 let operand_kind = self.value_kind(operand.layout);
251 let cast_kind = self.value_kind(cast);
252
253 match operand.val {
254 OperandValue::Ref(source_place_val) => {
255 assert_eq!(source_place_val.llextra, None);
256 assert_matches!(operand_kind, OperandValueKind::Ref);
257 Some(bx.load_operand(source_place_val.with_type(cast)).val)
260 }
261 OperandValue::ZeroSized => {
262 let OperandValueKind::ZeroSized = operand_kind else {
263 bug!("Found {operand_kind:?} for operand {operand:?}");
264 };
265 if let OperandValueKind::ZeroSized = cast_kind {
266 Some(OperandValue::ZeroSized)
267 } else {
268 None
269 }
270 }
271 OperandValue::Immediate(imm) => {
272 let OperandValueKind::Immediate(from_scalar) = operand_kind else {
273 bug!("Found {operand_kind:?} for operand {operand:?}");
274 };
275 if let OperandValueKind::Immediate(to_scalar) = cast_kind
276 && from_scalar.size(self.cx) == to_scalar.size(self.cx)
277 {
278 let from_backend_ty = bx.backend_type(operand.layout);
279 let to_backend_ty = bx.backend_type(cast);
280 Some(OperandValue::Immediate(transmute_immediate(
281 bx,
282 imm,
283 from_scalar,
284 from_backend_ty,
285 to_scalar,
286 to_backend_ty,
287 )))
288 } else {
289 None
290 }
291 }
292 OperandValue::Pair(imm_a, imm_b) => {
293 let OperandValueKind::Pair(in_a, in_b) = operand_kind else {
294 bug!("Found {operand_kind:?} for operand {operand:?}");
295 };
296 if let OperandValueKind::Pair(out_a, out_b) = cast_kind
297 && in_a.size(self.cx) == out_a.size(self.cx)
298 && in_b.size(self.cx) == out_b.size(self.cx)
299 {
300 let in_a_ibty = bx.scalar_pair_element_backend_type(operand.layout, 0, false);
301 let in_b_ibty = bx.scalar_pair_element_backend_type(operand.layout, 1, false);
302 let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false);
303 let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false);
304 Some(OperandValue::Pair(
305 transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty),
306 transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty),
307 ))
308 } else {
309 None
310 }
311 }
312 }
313 }
314
315 fn cast_immediate(
320 &self,
321 bx: &mut Bx,
322 mut imm: Bx::Value,
323 from_scalar: abi::Scalar,
324 from_backend_ty: Bx::Type,
325 to_scalar: abi::Scalar,
326 to_backend_ty: Bx::Type,
327 ) -> Option<Bx::Value> {
328 use abi::Primitive::*;
329
330 assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
335
336 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
337 (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
338 (Float(_), Float(_)) => {
339 let srcsz = bx.cx().float_width(from_backend_ty);
340 let dstsz = bx.cx().float_width(to_backend_ty);
341 if dstsz > srcsz {
342 bx.fpext(imm, to_backend_ty)
343 } else if srcsz > dstsz {
344 bx.fptrunc(imm, to_backend_ty)
345 } else {
346 imm
347 }
348 }
349 (Int(_, is_signed), Float(_)) => {
350 if is_signed {
351 bx.sitofp(imm, to_backend_ty)
352 } else {
353 bx.uitofp(imm, to_backend_ty)
354 }
355 }
356 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
357 (Int(_, is_signed), Pointer(..)) => {
358 let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
359 bx.inttoptr(usize_imm, to_backend_ty)
360 }
361 (Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
362 _ => return None,
363 };
364 Some(imm)
365 }
366
367 pub(crate) fn codegen_rvalue_unsized(
368 &mut self,
369 bx: &mut Bx,
370 indirect_dest: PlaceRef<'tcx, Bx::Value>,
371 rvalue: &mir::Rvalue<'tcx>,
372 ) {
373 debug!(
374 "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
375 indirect_dest.val.llval, rvalue
376 );
377
378 match *rvalue {
379 mir::Rvalue::Use(ref operand) => {
380 let cg_operand = self.codegen_operand(bx, operand);
381 cg_operand.val.store_unsized(bx, indirect_dest);
382 }
383
384 _ => bug!("unsized assignment other than `Rvalue::Use`"),
385 }
386 }
387
388 pub(crate) fn codegen_rvalue_operand(
389 &mut self,
390 bx: &mut Bx,
391 rvalue: &mir::Rvalue<'tcx>,
392 ) -> OperandRef<'tcx, Bx::Value> {
393 assert!(
394 self.rvalue_creates_operand(rvalue, DUMMY_SP),
395 "cannot codegen {rvalue:?} to operand",
396 );
397
398 match *rvalue {
399 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
400 let operand = self.codegen_operand(bx, source);
401 debug!("cast operand is {:?}", operand);
402 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
403
404 let val = match *kind {
405 mir::CastKind::PointerExposeProvenance => {
406 assert!(bx.cx().is_backend_immediate(cast));
407 let llptr = operand.immediate();
408 let llcast_ty = bx.cx().immediate_backend_type(cast);
409 let lladdr = bx.ptrtoint(llptr, llcast_ty);
410 OperandValue::Immediate(lladdr)
411 }
412 mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
413 match *operand.layout.ty.kind() {
414 ty::FnDef(def_id, args) => {
415 let instance = ty::Instance::resolve_for_fn_ptr(
416 bx.tcx(),
417 bx.typing_env(),
418 def_id,
419 args,
420 )
421 .unwrap();
422 OperandValue::Immediate(bx.get_fn_addr(instance))
423 }
424 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
425 }
426 }
427 mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
428 match *operand.layout.ty.kind() {
429 ty::Closure(def_id, args) => {
430 let instance = Instance::resolve_closure(
431 bx.cx().tcx(),
432 def_id,
433 args,
434 ty::ClosureKind::FnOnce,
435 );
436 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
437 }
438 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
439 }
440 }
441 mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
442 operand.val
444 }
445 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
446 assert!(bx.cx().is_backend_scalar_pair(cast));
447 let (lldata, llextra) = operand.val.pointer_parts();
448 let (lldata, llextra) =
449 base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
450 OperandValue::Pair(lldata, llextra)
451 }
452 mir::CastKind::PointerCoercion(
453 PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, _
454 ) => {
455 bug!("{kind:?} is for borrowck, and should never appear in codegen");
456 }
457 mir::CastKind::PtrToPtr
458 if bx.cx().is_backend_scalar_pair(operand.layout) =>
459 {
460 if let OperandValue::Pair(data_ptr, meta) = operand.val {
461 if bx.cx().is_backend_scalar_pair(cast) {
462 OperandValue::Pair(data_ptr, meta)
463 } else {
464 OperandValue::Immediate(data_ptr)
466 }
467 } else {
468 bug!("unexpected non-pair operand");
469 }
470 }
471 mir::CastKind::PointerCoercion(PointerCoercion::DynStar, _) => {
472 let (lldata, llextra) = operand.val.pointer_parts();
473 let (lldata, llextra) =
474 base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra);
475 OperandValue::Pair(lldata, llextra)
476 }
477 | mir::CastKind::IntToInt
478 | mir::CastKind::FloatToInt
479 | mir::CastKind::FloatToFloat
480 | mir::CastKind::IntToFloat
481 | mir::CastKind::PtrToPtr
482 | mir::CastKind::FnPtrToPtr
483 | mir::CastKind::PointerWithExposedProvenance => {
487 let imm = operand.immediate();
488 let operand_kind = self.value_kind(operand.layout);
489 let OperandValueKind::Immediate(from_scalar) = operand_kind else {
490 bug!("Found {operand_kind:?} for operand {operand:?}");
491 };
492 let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
493
494 assert!(bx.cx().is_backend_immediate(cast));
495 let to_backend_ty = bx.cx().immediate_backend_type(cast);
496 if operand.layout.is_uninhabited() {
497 let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
498 return OperandRef { val, layout: cast };
499 }
500 let cast_kind = self.value_kind(cast);
501 let OperandValueKind::Immediate(to_scalar) = cast_kind else {
502 bug!("Found {cast_kind:?} for operand {cast:?}");
503 };
504
505 self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
506 .map(OperandValue::Immediate)
507 .unwrap_or_else(|| {
508 bug!("Unsupported cast of {operand:?} to {cast:?}");
509 })
510 }
511 mir::CastKind::Transmute => {
512 self.codegen_transmute_operand(bx, operand, cast).unwrap_or_else(|| {
513 bug!("Unsupported transmute-as-operand of {operand:?} to {cast:?}");
514 })
515 }
516 };
517 OperandRef { val, layout: cast }
518 }
519
520 mir::Rvalue::Ref(_, bk, place) => {
521 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
522 Ty::new_ref(tcx, tcx.lifetimes.re_erased, ty, bk.to_mutbl_lossy())
523 };
524 self.codegen_place_to_pointer(bx, place, mk_ref)
525 }
526
527 mir::Rvalue::CopyForDeref(place) => {
528 self.codegen_operand(bx, &mir::Operand::Copy(place))
529 }
530 mir::Rvalue::RawPtr(kind, place) => {
531 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
532 Ty::new_ptr(tcx, ty, kind.to_mutbl_lossy())
533 };
534 self.codegen_place_to_pointer(bx, place, mk_ptr)
535 }
536
537 mir::Rvalue::Len(place) => {
538 let size = self.evaluate_array_len(bx, place);
539 OperandRef {
540 val: OperandValue::Immediate(size),
541 layout: bx.cx().layout_of(bx.tcx().types.usize),
542 }
543 }
544
545 mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs))
546 if let Some(op) = op_with_overflow.overflowing_to_wrapping() =>
547 {
548 let lhs = self.codegen_operand(bx, lhs);
549 let rhs = self.codegen_operand(bx, rhs);
550 let result = self.codegen_scalar_checked_binop(
551 bx,
552 op,
553 lhs.immediate(),
554 rhs.immediate(),
555 lhs.layout.ty,
556 );
557 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
558 let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
559 OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
560 }
561 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
562 let lhs = self.codegen_operand(bx, lhs);
563 let rhs = self.codegen_operand(bx, rhs);
564 let llresult = match (lhs.val, rhs.val) {
565 (
566 OperandValue::Pair(lhs_addr, lhs_extra),
567 OperandValue::Pair(rhs_addr, rhs_extra),
568 ) => self.codegen_wide_ptr_binop(
569 bx,
570 op,
571 lhs_addr,
572 lhs_extra,
573 rhs_addr,
574 rhs_extra,
575 lhs.layout.ty,
576 ),
577
578 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => self
579 .codegen_scalar_binop(
580 bx,
581 op,
582 lhs_val,
583 rhs_val,
584 lhs.layout.ty,
585 rhs.layout.ty,
586 ),
587
588 _ => bug!(),
589 };
590 OperandRef {
591 val: OperandValue::Immediate(llresult),
592 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
593 }
594 }
595
596 mir::Rvalue::UnaryOp(op, ref operand) => {
597 let operand = self.codegen_operand(bx, operand);
598 let is_float = operand.layout.ty.is_floating_point();
599 let (val, layout) = match op {
600 mir::UnOp::Not => {
601 let llval = bx.not(operand.immediate());
602 (OperandValue::Immediate(llval), operand.layout)
603 }
604 mir::UnOp::Neg => {
605 let llval = if is_float {
606 bx.fneg(operand.immediate())
607 } else {
608 bx.neg(operand.immediate())
609 };
610 (OperandValue::Immediate(llval), operand.layout)
611 }
612 mir::UnOp::PtrMetadata => {
613 assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),);
614 let (_, meta) = operand.val.pointer_parts();
615 assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
616 if let Some(meta) = meta {
617 (OperandValue::Immediate(meta), operand.layout.field(self.cx, 1))
618 } else {
619 (OperandValue::ZeroSized, bx.cx().layout_of(bx.tcx().types.unit))
620 }
621 }
622 };
623 assert!(
624 val.is_expected_variant_for_type(self.cx, layout),
625 "Made wrong variant {val:?} for type {layout:?}",
626 );
627 OperandRef { val, layout }
628 }
629
630 mir::Rvalue::Discriminant(ref place) => {
631 let discr_ty = rvalue.ty(self.mir, bx.tcx());
632 let discr_ty = self.monomorphize(discr_ty);
633 let operand = self.codegen_consume(bx, place.as_ref());
634 let discr = operand.codegen_get_discr(self, bx, discr_ty);
635 OperandRef {
636 val: OperandValue::Immediate(discr),
637 layout: self.cx.layout_of(discr_ty),
638 }
639 }
640
641 mir::Rvalue::NullaryOp(ref null_op, ty) => {
642 let ty = self.monomorphize(ty);
643 let layout = bx.cx().layout_of(ty);
644 let val = match null_op {
645 mir::NullOp::SizeOf => {
646 assert!(bx.cx().type_is_sized(ty));
647 let val = layout.size.bytes();
648 bx.cx().const_usize(val)
649 }
650 mir::NullOp::AlignOf => {
651 assert!(bx.cx().type_is_sized(ty));
652 let val = layout.align.abi.bytes();
653 bx.cx().const_usize(val)
654 }
655 mir::NullOp::OffsetOf(fields) => {
656 let val = bx
657 .tcx()
658 .offset_of_subfield(bx.typing_env(), layout, fields.iter())
659 .bytes();
660 bx.cx().const_usize(val)
661 }
662 mir::NullOp::UbChecks => {
663 let val = bx.tcx().sess.ub_checks();
664 bx.cx().const_bool(val)
665 }
666 mir::NullOp::ContractChecks => {
667 let val = bx.tcx().sess.contract_checks();
668 bx.cx().const_bool(val)
669 }
670 };
671 let tcx = self.cx.tcx();
672 OperandRef {
673 val: OperandValue::Immediate(val),
674 layout: self.cx.layout_of(null_op.ty(tcx)),
675 }
676 }
677
678 mir::Rvalue::ThreadLocalRef(def_id) => {
679 assert!(bx.cx().tcx().is_static(def_id));
680 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
681 let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
682 {
683 let instance = ty::Instance {
684 def: ty::InstanceKind::ThreadLocalShim(def_id),
685 args: ty::GenericArgs::empty(),
686 };
687 let fn_ptr = bx.get_fn_addr(instance);
688 let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
689 let fn_ty = bx.fn_decl_backend_type(fn_abi);
690 let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
691 Some(bx.tcx().codegen_fn_attrs(instance.def_id()))
692 } else {
693 None
694 };
695 bx.call(fn_ty, fn_attrs, Some(fn_abi), fn_ptr, &[], None, Some(instance))
696 } else {
697 bx.get_static(def_id)
698 };
699 OperandRef { val: OperandValue::Immediate(static_), layout }
700 }
701 mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
702 mir::Rvalue::Repeat(..) => bug!("{rvalue:?} in codegen_rvalue_operand"),
703 mir::Rvalue::Aggregate(_, ref fields) => {
704 let ty = rvalue.ty(self.mir, self.cx.tcx());
705 let ty = self.monomorphize(ty);
706 let layout = self.cx.layout_of(ty);
707
708 let Some(mut builder) = OperandRef::builder(layout) else {
711 bug!("Cannot use type in operand builder: {layout:?}")
712 };
713 for (field_idx, field) in fields.iter_enumerated() {
714 let op = self.codegen_operand(bx, field);
715 builder.insert_field(bx, FIRST_VARIANT, field_idx, op);
716 }
717
718 builder.build()
719 }
720 mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
721 let operand = self.codegen_operand(bx, operand);
722 let val = operand.immediate();
723
724 let content_ty = self.monomorphize(content_ty);
725 let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
726
727 OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
728 }
729 mir::Rvalue::WrapUnsafeBinder(ref operand, binder_ty) => {
730 let operand = self.codegen_operand(bx, operand);
731 let binder_ty = self.monomorphize(binder_ty);
732 let layout = bx.cx().layout_of(binder_ty);
733 OperandRef { val: operand.val, layout }
734 }
735 }
736 }
737
738 fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
739 if let Some(index) = place.as_local()
742 && let LocalRef::Operand(op) = self.locals[index]
743 && let ty::Array(_, n) = op.layout.ty.kind()
744 {
745 let n = n.try_to_target_usize(bx.tcx()).expect("expected monomorphic const in codegen");
746 return bx.cx().const_usize(n);
747 }
748 let cg_value = self.codegen_place(bx, place.as_ref());
750 cg_value.len(bx.cx())
751 }
752
753 fn codegen_place_to_pointer(
755 &mut self,
756 bx: &mut Bx,
757 place: mir::Place<'tcx>,
758 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
759 ) -> OperandRef<'tcx, Bx::Value> {
760 let cg_place = self.codegen_place(bx, place.as_ref());
761 let val = cg_place.val.address();
762
763 let ty = cg_place.layout.ty;
764 assert!(
765 if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) {
766 matches!(val, OperandValue::Pair(..))
767 } else {
768 matches!(val, OperandValue::Immediate(..))
769 },
770 "Address of place was unexpectedly {val:?} for pointee type {ty:?}",
771 );
772
773 OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
774 }
775
776 fn codegen_scalar_binop(
777 &mut self,
778 bx: &mut Bx,
779 op: mir::BinOp,
780 lhs: Bx::Value,
781 rhs: Bx::Value,
782 lhs_ty: Ty<'tcx>,
783 rhs_ty: Ty<'tcx>,
784 ) -> Bx::Value {
785 let is_float = lhs_ty.is_floating_point();
786 let is_signed = lhs_ty.is_signed();
787 match op {
788 mir::BinOp::Add => {
789 if is_float {
790 bx.fadd(lhs, rhs)
791 } else {
792 bx.add(lhs, rhs)
793 }
794 }
795 mir::BinOp::AddUnchecked => {
796 if is_signed {
797 bx.unchecked_sadd(lhs, rhs)
798 } else {
799 bx.unchecked_uadd(lhs, rhs)
800 }
801 }
802 mir::BinOp::Sub => {
803 if is_float {
804 bx.fsub(lhs, rhs)
805 } else {
806 bx.sub(lhs, rhs)
807 }
808 }
809 mir::BinOp::SubUnchecked => {
810 if is_signed {
811 bx.unchecked_ssub(lhs, rhs)
812 } else {
813 bx.unchecked_usub(lhs, rhs)
814 }
815 }
816 mir::BinOp::Mul => {
817 if is_float {
818 bx.fmul(lhs, rhs)
819 } else {
820 bx.mul(lhs, rhs)
821 }
822 }
823 mir::BinOp::MulUnchecked => {
824 if is_signed {
825 bx.unchecked_smul(lhs, rhs)
826 } else {
827 bx.unchecked_umul(lhs, rhs)
828 }
829 }
830 mir::BinOp::Div => {
831 if is_float {
832 bx.fdiv(lhs, rhs)
833 } else if is_signed {
834 bx.sdiv(lhs, rhs)
835 } else {
836 bx.udiv(lhs, rhs)
837 }
838 }
839 mir::BinOp::Rem => {
840 if is_float {
841 bx.frem(lhs, rhs)
842 } else if is_signed {
843 bx.srem(lhs, rhs)
844 } else {
845 bx.urem(lhs, rhs)
846 }
847 }
848 mir::BinOp::BitOr => bx.or(lhs, rhs),
849 mir::BinOp::BitAnd => bx.and(lhs, rhs),
850 mir::BinOp::BitXor => bx.xor(lhs, rhs),
851 mir::BinOp::Offset => {
852 let pointee_type = lhs_ty
853 .builtin_deref(true)
854 .unwrap_or_else(|| bug!("deref of non-pointer {:?}", lhs_ty));
855 let pointee_layout = bx.cx().layout_of(pointee_type);
856 if pointee_layout.is_zst() {
857 lhs
860 } else {
861 let llty = bx.cx().backend_type(pointee_layout);
862 if !rhs_ty.is_signed() {
863 bx.inbounds_nuw_gep(llty, lhs, &[rhs])
864 } else {
865 bx.inbounds_gep(llty, lhs, &[rhs])
866 }
867 }
868 }
869 mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
870 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
871 bx.shl(lhs, rhs)
872 }
873 mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
874 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
875 if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
876 }
877 mir::BinOp::Ne
878 | mir::BinOp::Lt
879 | mir::BinOp::Gt
880 | mir::BinOp::Eq
881 | mir::BinOp::Le
882 | mir::BinOp::Ge => {
883 if is_float {
884 bx.fcmp(base::bin_op_to_fcmp_predicate(op), lhs, rhs)
885 } else {
886 bx.icmp(base::bin_op_to_icmp_predicate(op, is_signed), lhs, rhs)
887 }
888 }
889 mir::BinOp::Cmp => {
890 use std::cmp::Ordering;
891 assert!(!is_float);
892 if let Some(value) = bx.three_way_compare(lhs_ty, lhs, rhs) {
893 return value;
894 }
895 let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed);
896 if bx.cx().tcx().sess.opts.optimize == OptLevel::No {
897 let is_gt = bx.icmp(pred(mir::BinOp::Gt), lhs, rhs);
904 let gtext = bx.zext(is_gt, bx.type_i8());
905 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
906 let ltext = bx.zext(is_lt, bx.type_i8());
907 bx.unchecked_ssub(gtext, ltext)
908 } else {
909 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
912 let is_ne = bx.icmp(pred(mir::BinOp::Ne), lhs, rhs);
913 let ge = bx.select(
914 is_ne,
915 bx.cx().const_i8(Ordering::Greater as i8),
916 bx.cx().const_i8(Ordering::Equal as i8),
917 );
918 bx.select(is_lt, bx.cx().const_i8(Ordering::Less as i8), ge)
919 }
920 }
921 mir::BinOp::AddWithOverflow
922 | mir::BinOp::SubWithOverflow
923 | mir::BinOp::MulWithOverflow => {
924 bug!("{op:?} needs to return a pair, so call codegen_scalar_checked_binop instead")
925 }
926 }
927 }
928
929 fn codegen_wide_ptr_binop(
930 &mut self,
931 bx: &mut Bx,
932 op: mir::BinOp,
933 lhs_addr: Bx::Value,
934 lhs_extra: Bx::Value,
935 rhs_addr: Bx::Value,
936 rhs_extra: Bx::Value,
937 _input_ty: Ty<'tcx>,
938 ) -> Bx::Value {
939 match op {
940 mir::BinOp::Eq => {
941 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
942 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
943 bx.and(lhs, rhs)
944 }
945 mir::BinOp::Ne => {
946 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
947 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
948 bx.or(lhs, rhs)
949 }
950 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
951 let (op, strict_op) = match op {
953 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
954 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
955 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
956 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
957 _ => bug!(),
958 };
959 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
960 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
961 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
962 let rhs = bx.and(and_lhs, and_rhs);
963 bx.or(lhs, rhs)
964 }
965 _ => {
966 bug!("unexpected wide ptr binop");
967 }
968 }
969 }
970
971 fn codegen_scalar_checked_binop(
972 &mut self,
973 bx: &mut Bx,
974 op: mir::BinOp,
975 lhs: Bx::Value,
976 rhs: Bx::Value,
977 input_ty: Ty<'tcx>,
978 ) -> OperandValue<Bx::Value> {
979 let (val, of) = match op {
980 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
982 let oop = match op {
983 mir::BinOp::Add => OverflowOp::Add,
984 mir::BinOp::Sub => OverflowOp::Sub,
985 mir::BinOp::Mul => OverflowOp::Mul,
986 _ => unreachable!(),
987 };
988 bx.checked_binop(oop, input_ty, lhs, rhs)
989 }
990 _ => bug!("Operator `{:?}` is not a checkable operator", op),
991 };
992
993 OperandValue::Pair(val, of)
994 }
995
996 pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
997 match *rvalue {
998 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
999 let operand_ty = operand.ty(self.mir, self.cx.tcx());
1000 let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
1001 let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
1002
1003 match (self.value_kind(operand_layout), self.value_kind(cast_layout)) {
1004 (OperandValueKind::Ref, _) => true,
1006
1007 (OperandValueKind::ZeroSized, OperandValueKind::ZeroSized) => true,
1009
1010 (OperandValueKind::ZeroSized, _) | (_, OperandValueKind::ZeroSized) => false,
1012
1013 (OperandValueKind::Immediate(..) | OperandValueKind::Pair(..), OperandValueKind::Ref) => false,
1015
1016 (OperandValueKind::Immediate(a), OperandValueKind::Immediate(b)) =>
1019 a.size(self.cx) == b.size(self.cx),
1020 (OperandValueKind::Pair(a0, a1), OperandValueKind::Pair(b0, b1)) =>
1021 a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
1022
1023 (OperandValueKind::Immediate(..), OperandValueKind::Pair(..)) |
1026 (OperandValueKind::Pair(..), OperandValueKind::Immediate(..)) => false,
1027 }
1028 }
1029 mir::Rvalue::Ref(..) |
1030 mir::Rvalue::CopyForDeref(..) |
1031 mir::Rvalue::RawPtr(..) |
1032 mir::Rvalue::Len(..) |
1033 mir::Rvalue::Cast(..) | mir::Rvalue::ShallowInitBox(..) | mir::Rvalue::BinaryOp(..) |
1036 mir::Rvalue::UnaryOp(..) |
1037 mir::Rvalue::Discriminant(..) |
1038 mir::Rvalue::NullaryOp(..) |
1039 mir::Rvalue::ThreadLocalRef(_) |
1040 mir::Rvalue::Use(..) |
1041 mir::Rvalue::WrapUnsafeBinder(..) => true,
1043 mir::Rvalue::Repeat(..) => false,
1046 mir::Rvalue::Aggregate(ref kind, _) => {
1047 let allowed_kind = match **kind {
1048 mir::AggregateKind::RawPtr(..) => true,
1050 mir::AggregateKind::Array(..) => false,
1051 mir::AggregateKind::Tuple => true,
1052 mir::AggregateKind::Adt(def_id, ..) => {
1053 let adt_def = self.cx.tcx().adt_def(def_id);
1054 adt_def.is_struct() && !adt_def.repr().simd()
1055 }
1056 mir::AggregateKind::Closure(..) => true,
1057 mir::AggregateKind::Coroutine(..) | mir::AggregateKind::CoroutineClosure(..) => false,
1059 };
1060 allowed_kind && {
1061 let ty = rvalue.ty(self.mir, self.cx.tcx());
1062 let ty = self.monomorphize(ty);
1063 let layout = self.cx.spanned_layout_of(ty, span);
1064 OperandRef::<Bx::Value>::builder(layout).is_some()
1065 }
1066 }
1067 }
1068
1069 }
1071
1072 fn value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind {
1074 if layout.is_zst() {
1075 OperandValueKind::ZeroSized
1076 } else if self.cx.is_backend_immediate(layout) {
1077 assert!(!self.cx.is_backend_scalar_pair(layout));
1078 OperandValueKind::Immediate(match layout.backend_repr {
1079 abi::BackendRepr::Scalar(s) => s,
1080 abi::BackendRepr::SimdVector { element, .. } => element,
1081 x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
1082 })
1083 } else if self.cx.is_backend_scalar_pair(layout) {
1084 let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else {
1085 span_bug!(
1086 self.mir.span,
1087 "Couldn't translate {:?} as backend scalar pair",
1088 layout.backend_repr,
1089 );
1090 };
1091 OperandValueKind::Pair(s1, s2)
1092 } else {
1093 OperandValueKind::Ref
1094 }
1095 }
1096}
1097
1098#[derive(Debug, Copy, Clone)]
1101enum OperandValueKind {
1102 Ref,
1103 Immediate(abi::Scalar),
1104 Pair(abi::Scalar, abi::Scalar),
1105 ZeroSized,
1106}
1107
1108pub(super) fn transmute_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1114 bx: &mut Bx,
1115 mut imm: Bx::Value,
1116 from_scalar: abi::Scalar,
1117 from_backend_ty: Bx::Type,
1118 to_scalar: abi::Scalar,
1119 to_backend_ty: Bx::Type,
1120) -> Bx::Value {
1121 assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
1122
1123 if from_scalar == to_scalar {
1127 return imm;
1128 }
1129
1130 use abi::Primitive::*;
1131 imm = bx.from_immediate(imm);
1132
1133 assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
1143
1144 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
1145 (Int(..) | Float(_), Int(..) | Float(_)) => {
1146 if from_backend_ty == to_backend_ty {
1147 imm
1148 } else {
1149 bx.bitcast(imm, to_backend_ty)
1150 }
1151 }
1152 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
1153 (Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm),
1154 (Pointer(..), Int(..)) => {
1155 bx.ptrtoint(imm, to_backend_ty)
1157 }
1158 (Float(_), Pointer(..)) => {
1159 let int_imm = bx.bitcast(imm, bx.cx().type_isize());
1160 bx.ptradd(bx.const_null(bx.type_ptr()), int_imm)
1161 }
1162 (Pointer(..), Float(_)) => {
1163 let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
1165 bx.bitcast(int_imm, to_backend_ty)
1166 }
1167 };
1168
1169 assume_scalar_range(bx, imm, to_scalar, to_backend_ty);
1175
1176 imm = bx.to_immediate_scalar(imm, to_scalar);
1177 imm
1178}
1179
1180fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1181 bx: &mut Bx,
1182 imm: Bx::Value,
1183 scalar: abi::Scalar,
1184 backend_ty: Bx::Type,
1185) {
1186 if matches!(bx.cx().sess().opts.optimize, OptLevel::No) || scalar.is_always_valid(bx.cx()) {
1187 return;
1188 }
1189
1190 match scalar.primitive() {
1191 abi::Primitive::Int(..) => {
1192 let range = scalar.valid_range(bx.cx());
1193 bx.assume_integer_range(imm, backend_ty, range);
1194 }
1195 abi::Primitive::Pointer(abi::AddressSpace::DATA)
1196 if !scalar.valid_range(bx.cx()).contains(0) =>
1197 {
1198 bx.assume_nonnull(imm);
1199 }
1200 abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
1201 }
1202}