1use rustc_abi::{self as abi, FIRST_VARIANT};
2use rustc_middle::ty::adjustment::PointerCoercion;
3use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
4use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
5use rustc_middle::{bug, mir};
6use rustc_session::config::OptLevel;
7use rustc_span::{DUMMY_SP, Span};
8use tracing::{debug, instrument};
9
10use super::operand::{OperandRef, OperandValue};
11use super::place::{PlaceRef, codegen_tag_value};
12use super::{FunctionCx, LocalRef};
13use crate::common::{IntPredicate, TypeKind};
14use crate::traits::*;
15use crate::{MemFlags, base};
16
17impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
18 #[instrument(level = "trace", skip(self, bx))]
19 pub(crate) fn codegen_rvalue(
20 &mut self,
21 bx: &mut Bx,
22 dest: PlaceRef<'tcx, Bx::Value>,
23 rvalue: &mir::Rvalue<'tcx>,
24 ) {
25 match *rvalue {
26 mir::Rvalue::Use(ref operand) => {
27 let cg_operand = self.codegen_operand(bx, operand);
28 cg_operand.val.store(bx, dest);
31 }
32
33 mir::Rvalue::Cast(
34 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
35 ref source,
36 _,
37 ) => {
38 if bx.cx().is_backend_scalar_pair(dest.layout) {
41 let temp = self.codegen_rvalue_operand(bx, rvalue);
44 temp.val.store(bx, dest);
45 return;
46 }
47
48 let operand = self.codegen_operand(bx, source);
53 match operand.val {
54 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
55 debug!("codegen_rvalue: creating ugly alloca");
62 let scratch = PlaceRef::alloca(bx, operand.layout);
63 scratch.storage_live(bx);
64 operand.val.store(bx, scratch);
65 base::coerce_unsized_into(bx, scratch, dest);
66 scratch.storage_dead(bx);
67 }
68 OperandValue::Ref(val) => {
69 if val.llextra.is_some() {
70 bug!("unsized coercion on an unsized rvalue");
71 }
72 base::coerce_unsized_into(bx, val.with_type(operand.layout), dest);
73 }
74 OperandValue::ZeroSized => {
75 bug!("unsized coercion on a ZST rvalue");
76 }
77 }
78 }
79
80 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
81 let src = self.codegen_operand(bx, operand);
82 self.codegen_transmute(bx, src, dest);
83 }
84
85 mir::Rvalue::Repeat(ref elem, count) => {
86 if dest.layout.is_zst() {
88 return;
89 }
90
91 if let mir::Operand::Constant(const_op) = elem {
94 let val = self.eval_mir_constant(const_op);
95 if val.all_bytes_uninit(self.cx.tcx()) {
96 let size = bx.const_usize(dest.layout.size.bytes());
97 bx.memset(
98 dest.val.llval,
99 bx.const_undef(bx.type_i8()),
100 size,
101 dest.val.align,
102 MemFlags::empty(),
103 );
104 return;
105 }
106 }
107
108 let cg_elem = self.codegen_operand(bx, elem);
109
110 let try_init_all_same = |bx: &mut Bx, v| {
111 let start = dest.val.llval;
112 let size = bx.const_usize(dest.layout.size.bytes());
113
114 if let Some(int) = bx.cx().const_to_opt_u128(v, false) {
116 let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()];
117 let first = bytes[0];
118 if bytes[1..].iter().all(|&b| b == first) {
119 let fill = bx.cx().const_u8(first);
120 bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
121 return true;
122 }
123 }
124
125 let v = bx.from_immediate(v);
127 if bx.cx().val_ty(v) == bx.cx().type_i8() {
128 bx.memset(start, v, size, dest.val.align, MemFlags::empty());
129 return true;
130 }
131 false
132 };
133
134 match cg_elem.val {
135 OperandValue::Immediate(v) => {
136 if try_init_all_same(bx, v) {
137 return;
138 }
139 }
140 _ => (),
141 }
142
143 let count = self
144 .monomorphize(count)
145 .try_to_target_usize(bx.tcx())
146 .expect("expected monomorphic const in codegen");
147
148 bx.write_operand_repeatedly(cg_elem, count, dest);
149 }
150
151 mir::Rvalue::Aggregate(ref kind, ref operands)
154 if !matches!(**kind, mir::AggregateKind::RawPtr(..)) =>
155 {
156 let (variant_index, variant_dest, active_field_index) = match **kind {
157 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
158 let variant_dest = dest.project_downcast(bx, variant_index);
159 (variant_index, variant_dest, active_field_index)
160 }
161 _ => (FIRST_VARIANT, dest, None),
162 };
163 if active_field_index.is_some() {
164 assert_eq!(operands.len(), 1);
165 }
166 for (i, operand) in operands.iter_enumerated() {
167 let op = self.codegen_operand(bx, operand);
168 if !op.layout.is_zst() {
170 let field_index = active_field_index.unwrap_or(i);
171 let field = if let mir::AggregateKind::Array(_) = **kind {
172 let llindex = bx.cx().const_usize(field_index.as_u32().into());
173 variant_dest.project_index(bx, llindex)
174 } else {
175 variant_dest.project_field(bx, field_index.as_usize())
176 };
177 op.val.store(bx, field);
178 }
179 }
180 dest.codegen_set_discr(bx, variant_index);
181 }
182
183 _ => {
184 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
185 let temp = self.codegen_rvalue_operand(bx, rvalue);
186 temp.val.store(bx, dest);
187 }
188 }
189 }
190
191 fn codegen_transmute(
196 &mut self,
197 bx: &mut Bx,
198 src: OperandRef<'tcx, Bx::Value>,
199 dst: PlaceRef<'tcx, Bx::Value>,
200 ) {
201 assert!(src.layout.is_sized());
203 assert!(dst.layout.is_sized());
204
205 if src.layout.size != dst.layout.size
206 || src.layout.is_uninhabited()
207 || dst.layout.is_uninhabited()
208 {
209 bx.assume(bx.cx().const_bool(false))
214 } else {
215 src.val.store(bx, dst.val.with_type(src.layout));
219 }
220 }
221
222 pub(crate) fn codegen_transmute_operand(
230 &mut self,
231 bx: &mut Bx,
232 operand: OperandRef<'tcx, Bx::Value>,
233 cast: TyAndLayout<'tcx>,
234 ) -> OperandValue<Bx::Value> {
235 if operand.layout.size != cast.size
237 || operand.layout.is_uninhabited()
238 || cast.is_uninhabited()
239 {
240 if !operand.layout.is_uninhabited() {
241 bx.abort();
244 }
245
246 return OperandValue::poison(bx, cast);
249 }
250
251 match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
252 _ if cast.is_zst() => OperandValue::ZeroSized,
253 (_, _, abi::BackendRepr::Memory { .. }) => {
254 bug!("Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}");
255 }
256 (OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
257 assert_eq!(source_place_val.llextra, None);
258 bx.load_operand(source_place_val.with_type(cast)).val
261 }
262 (
263 OperandValue::Immediate(imm),
264 abi::BackendRepr::Scalar(from_scalar),
265 abi::BackendRepr::Scalar(to_scalar),
266 ) => OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar)),
267 (
268 OperandValue::Pair(imm_a, imm_b),
269 abi::BackendRepr::ScalarPair(in_a, in_b),
270 abi::BackendRepr::ScalarPair(out_a, out_b),
271 ) => OperandValue::Pair(
272 transmute_scalar(bx, imm_a, in_a, out_a),
273 transmute_scalar(bx, imm_b, in_b, out_b),
274 ),
275 _ => bug!("Cannot `codegen_transmute_operand` {operand:?} to {cast:?}"),
276 }
277 }
278
279 fn cast_immediate(
284 &self,
285 bx: &mut Bx,
286 mut imm: Bx::Value,
287 from_scalar: abi::Scalar,
288 from_backend_ty: Bx::Type,
289 to_scalar: abi::Scalar,
290 to_backend_ty: Bx::Type,
291 ) -> Option<Bx::Value> {
292 use abi::Primitive::*;
293
294 assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
299
300 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
301 (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
302 (Float(_), Float(_)) => {
303 let srcsz = bx.cx().float_width(from_backend_ty);
304 let dstsz = bx.cx().float_width(to_backend_ty);
305 if dstsz > srcsz {
306 bx.fpext(imm, to_backend_ty)
307 } else if srcsz > dstsz {
308 bx.fptrunc(imm, to_backend_ty)
309 } else {
310 imm
311 }
312 }
313 (Int(_, is_signed), Float(_)) => {
314 if is_signed {
315 bx.sitofp(imm, to_backend_ty)
316 } else {
317 bx.uitofp(imm, to_backend_ty)
318 }
319 }
320 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
321 (Int(_, is_signed), Pointer(..)) => {
322 let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
323 bx.inttoptr(usize_imm, to_backend_ty)
324 }
325 (Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
326 _ => return None,
327 };
328 Some(imm)
329 }
330
331 pub(crate) fn codegen_rvalue_unsized(
332 &mut self,
333 bx: &mut Bx,
334 indirect_dest: PlaceRef<'tcx, Bx::Value>,
335 rvalue: &mir::Rvalue<'tcx>,
336 ) {
337 debug!(
338 "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
339 indirect_dest.val.llval, rvalue
340 );
341
342 match *rvalue {
343 mir::Rvalue::Use(ref operand) => {
344 let cg_operand = self.codegen_operand(bx, operand);
345 cg_operand.val.store_unsized(bx, indirect_dest);
346 }
347
348 _ => bug!("unsized assignment other than `Rvalue::Use`"),
349 }
350 }
351
352 pub(crate) fn codegen_rvalue_operand(
353 &mut self,
354 bx: &mut Bx,
355 rvalue: &mir::Rvalue<'tcx>,
356 ) -> OperandRef<'tcx, Bx::Value> {
357 assert!(
358 self.rvalue_creates_operand(rvalue, DUMMY_SP),
359 "cannot codegen {rvalue:?} to operand",
360 );
361
362 match *rvalue {
363 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
364 let operand = self.codegen_operand(bx, source);
365 debug!("cast operand is {:?}", operand);
366 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
367
368 let val = match *kind {
369 mir::CastKind::PointerExposeProvenance => {
370 assert!(bx.cx().is_backend_immediate(cast));
371 let llptr = operand.immediate();
372 let llcast_ty = bx.cx().immediate_backend_type(cast);
373 let lladdr = bx.ptrtoint(llptr, llcast_ty);
374 OperandValue::Immediate(lladdr)
375 }
376 mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
377 match *operand.layout.ty.kind() {
378 ty::FnDef(def_id, args) => {
379 let instance = ty::Instance::resolve_for_fn_ptr(
380 bx.tcx(),
381 bx.typing_env(),
382 def_id,
383 args,
384 )
385 .unwrap();
386 OperandValue::Immediate(bx.get_fn_addr(instance))
387 }
388 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
389 }
390 }
391 mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
392 match *operand.layout.ty.kind() {
393 ty::Closure(def_id, args) => {
394 let instance = Instance::resolve_closure(
395 bx.cx().tcx(),
396 def_id,
397 args,
398 ty::ClosureKind::FnOnce,
399 );
400 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
401 }
402 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
403 }
404 }
405 mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
406 operand.val
408 }
409 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
410 assert!(bx.cx().is_backend_scalar_pair(cast));
411 let (lldata, llextra) = operand.val.pointer_parts();
412 let (lldata, llextra) =
413 base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
414 OperandValue::Pair(lldata, llextra)
415 }
416 mir::CastKind::PointerCoercion(
417 PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, _
418 ) => {
419 bug!("{kind:?} is for borrowck, and should never appear in codegen");
420 }
421 mir::CastKind::PtrToPtr
422 if bx.cx().is_backend_scalar_pair(operand.layout) =>
423 {
424 if let OperandValue::Pair(data_ptr, meta) = operand.val {
425 if bx.cx().is_backend_scalar_pair(cast) {
426 OperandValue::Pair(data_ptr, meta)
427 } else {
428 OperandValue::Immediate(data_ptr)
430 }
431 } else {
432 bug!("unexpected non-pair operand");
433 }
434 }
435 | mir::CastKind::IntToInt
436 | mir::CastKind::FloatToInt
437 | mir::CastKind::FloatToFloat
438 | mir::CastKind::IntToFloat
439 | mir::CastKind::PtrToPtr
440 | mir::CastKind::FnPtrToPtr
441 | mir::CastKind::PointerWithExposedProvenance => {
445 let imm = operand.immediate();
446 let abi::BackendRepr::Scalar(from_scalar) = operand.layout.backend_repr else {
447 bug!("Found non-scalar for operand {operand:?}");
448 };
449 let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
450
451 assert!(bx.cx().is_backend_immediate(cast));
452 let to_backend_ty = bx.cx().immediate_backend_type(cast);
453 if operand.layout.is_uninhabited() {
454 let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
455 return OperandRef { val, layout: cast };
456 }
457 let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
458 bug!("Found non-scalar for cast {cast:?}");
459 };
460
461 self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
462 .map(OperandValue::Immediate)
463 .unwrap_or_else(|| {
464 bug!("Unsupported cast of {operand:?} to {cast:?}");
465 })
466 }
467 mir::CastKind::Transmute => {
468 self.codegen_transmute_operand(bx, operand, cast)
469 }
470 };
471 OperandRef { val, layout: cast }
472 }
473
474 mir::Rvalue::Ref(_, bk, place) => {
475 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
476 Ty::new_ref(tcx, tcx.lifetimes.re_erased, ty, bk.to_mutbl_lossy())
477 };
478 self.codegen_place_to_pointer(bx, place, mk_ref)
479 }
480
481 mir::Rvalue::CopyForDeref(place) => {
482 self.codegen_operand(bx, &mir::Operand::Copy(place))
483 }
484 mir::Rvalue::RawPtr(kind, place) => {
485 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
486 Ty::new_ptr(tcx, ty, kind.to_mutbl_lossy())
487 };
488 self.codegen_place_to_pointer(bx, place, mk_ptr)
489 }
490
491 mir::Rvalue::Len(place) => {
492 let size = self.evaluate_array_len(bx, place);
493 OperandRef {
494 val: OperandValue::Immediate(size),
495 layout: bx.cx().layout_of(bx.tcx().types.usize),
496 }
497 }
498
499 mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs))
500 if let Some(op) = op_with_overflow.overflowing_to_wrapping() =>
501 {
502 let lhs = self.codegen_operand(bx, lhs);
503 let rhs = self.codegen_operand(bx, rhs);
504 let result = self.codegen_scalar_checked_binop(
505 bx,
506 op,
507 lhs.immediate(),
508 rhs.immediate(),
509 lhs.layout.ty,
510 );
511 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
512 let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
513 OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
514 }
515 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
516 let lhs = self.codegen_operand(bx, lhs);
517 let rhs = self.codegen_operand(bx, rhs);
518 let llresult = match (lhs.val, rhs.val) {
519 (
520 OperandValue::Pair(lhs_addr, lhs_extra),
521 OperandValue::Pair(rhs_addr, rhs_extra),
522 ) => self.codegen_wide_ptr_binop(
523 bx,
524 op,
525 lhs_addr,
526 lhs_extra,
527 rhs_addr,
528 rhs_extra,
529 lhs.layout.ty,
530 ),
531
532 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => self
533 .codegen_scalar_binop(
534 bx,
535 op,
536 lhs_val,
537 rhs_val,
538 lhs.layout.ty,
539 rhs.layout.ty,
540 ),
541
542 _ => bug!(),
543 };
544 OperandRef {
545 val: OperandValue::Immediate(llresult),
546 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
547 }
548 }
549
550 mir::Rvalue::UnaryOp(op, ref operand) => {
551 let operand = self.codegen_operand(bx, operand);
552 let is_float = operand.layout.ty.is_floating_point();
553 let (val, layout) = match op {
554 mir::UnOp::Not => {
555 let llval = bx.not(operand.immediate());
556 (OperandValue::Immediate(llval), operand.layout)
557 }
558 mir::UnOp::Neg => {
559 let llval = if is_float {
560 bx.fneg(operand.immediate())
561 } else {
562 bx.neg(operand.immediate())
563 };
564 (OperandValue::Immediate(llval), operand.layout)
565 }
566 mir::UnOp::PtrMetadata => {
567 assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),);
568 let (_, meta) = operand.val.pointer_parts();
569 assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
570 if let Some(meta) = meta {
571 (OperandValue::Immediate(meta), operand.layout.field(self.cx, 1))
572 } else {
573 (OperandValue::ZeroSized, bx.cx().layout_of(bx.tcx().types.unit))
574 }
575 }
576 };
577 assert!(
578 val.is_expected_variant_for_type(self.cx, layout),
579 "Made wrong variant {val:?} for type {layout:?}",
580 );
581 OperandRef { val, layout }
582 }
583
584 mir::Rvalue::Discriminant(ref place) => {
585 let discr_ty = rvalue.ty(self.mir, bx.tcx());
586 let discr_ty = self.monomorphize(discr_ty);
587 let operand = self.codegen_consume(bx, place.as_ref());
588 let discr = operand.codegen_get_discr(self, bx, discr_ty);
589 OperandRef {
590 val: OperandValue::Immediate(discr),
591 layout: self.cx.layout_of(discr_ty),
592 }
593 }
594
595 mir::Rvalue::NullaryOp(ref null_op, ty) => {
596 let ty = self.monomorphize(ty);
597 let layout = bx.cx().layout_of(ty);
598 let val = match null_op {
599 mir::NullOp::SizeOf => {
600 assert!(bx.cx().type_is_sized(ty));
601 let val = layout.size.bytes();
602 bx.cx().const_usize(val)
603 }
604 mir::NullOp::AlignOf => {
605 assert!(bx.cx().type_is_sized(ty));
606 let val = layout.align.abi.bytes();
607 bx.cx().const_usize(val)
608 }
609 mir::NullOp::OffsetOf(fields) => {
610 let val = bx
611 .tcx()
612 .offset_of_subfield(bx.typing_env(), layout, fields.iter())
613 .bytes();
614 bx.cx().const_usize(val)
615 }
616 mir::NullOp::UbChecks => {
617 let val = bx.tcx().sess.ub_checks();
618 bx.cx().const_bool(val)
619 }
620 mir::NullOp::ContractChecks => {
621 let val = bx.tcx().sess.contract_checks();
622 bx.cx().const_bool(val)
623 }
624 };
625 let tcx = self.cx.tcx();
626 OperandRef {
627 val: OperandValue::Immediate(val),
628 layout: self.cx.layout_of(null_op.ty(tcx)),
629 }
630 }
631
632 mir::Rvalue::ThreadLocalRef(def_id) => {
633 assert!(bx.cx().tcx().is_static(def_id));
634 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
635 let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
636 {
637 let instance = ty::Instance {
638 def: ty::InstanceKind::ThreadLocalShim(def_id),
639 args: ty::GenericArgs::empty(),
640 };
641 let fn_ptr = bx.get_fn_addr(instance);
642 let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
643 let fn_ty = bx.fn_decl_backend_type(fn_abi);
644 let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
645 Some(bx.tcx().codegen_fn_attrs(instance.def_id()))
646 } else {
647 None
648 };
649 bx.call(fn_ty, fn_attrs, Some(fn_abi), fn_ptr, &[], None, Some(instance))
650 } else {
651 bx.get_static(def_id)
652 };
653 OperandRef { val: OperandValue::Immediate(static_), layout }
654 }
655 mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
656 mir::Rvalue::Repeat(..) => bug!("{rvalue:?} in codegen_rvalue_operand"),
657 mir::Rvalue::Aggregate(ref kind, ref fields) => {
658 let (variant_index, active_field_index) = match **kind {
659 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
660 (variant_index, active_field_index)
661 }
662 _ => (FIRST_VARIANT, None),
663 };
664
665 let ty = rvalue.ty(self.mir, self.cx.tcx());
666 let ty = self.monomorphize(ty);
667 let layout = self.cx.layout_of(ty);
668
669 let Some(mut builder) = OperandRef::builder(layout) else {
672 bug!("Cannot use type in operand builder: {layout:?}")
673 };
674 for (field_idx, field) in fields.iter_enumerated() {
675 let op = self.codegen_operand(bx, field);
676 let fi = active_field_index.unwrap_or(field_idx);
677 builder.insert_field(bx, variant_index, fi, op);
678 }
679
680 let tag_result = codegen_tag_value(self.cx, variant_index, layout);
681 match tag_result {
682 Err(super::place::UninhabitedVariantError) => {
683 bx.abort();
687 let val = OperandValue::poison(bx, layout);
688 OperandRef { val, layout }
689 }
690 Ok(maybe_tag_value) => {
691 if let Some((tag_field, tag_imm)) = maybe_tag_value {
692 builder.insert_imm(tag_field, tag_imm);
693 }
694 builder.build(bx.cx())
695 }
696 }
697 }
698 mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
699 let operand = self.codegen_operand(bx, operand);
700 let val = operand.immediate();
701
702 let content_ty = self.monomorphize(content_ty);
703 let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
704
705 OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
706 }
707 mir::Rvalue::WrapUnsafeBinder(ref operand, binder_ty) => {
708 let operand = self.codegen_operand(bx, operand);
709 let binder_ty = self.monomorphize(binder_ty);
710 let layout = bx.cx().layout_of(binder_ty);
711 OperandRef { val: operand.val, layout }
712 }
713 }
714 }
715
716 fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
717 if let Some(index) = place.as_local()
720 && let LocalRef::Operand(op) = self.locals[index]
721 && let ty::Array(_, n) = op.layout.ty.kind()
722 {
723 let n = n.try_to_target_usize(bx.tcx()).expect("expected monomorphic const in codegen");
724 return bx.cx().const_usize(n);
725 }
726 let cg_value = self.codegen_place(bx, place.as_ref());
728 cg_value.len(bx.cx())
729 }
730
731 fn codegen_place_to_pointer(
733 &mut self,
734 bx: &mut Bx,
735 place: mir::Place<'tcx>,
736 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
737 ) -> OperandRef<'tcx, Bx::Value> {
738 let cg_place = self.codegen_place(bx, place.as_ref());
739 let val = cg_place.val.address();
740
741 let ty = cg_place.layout.ty;
742 assert!(
743 if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) {
744 matches!(val, OperandValue::Pair(..))
745 } else {
746 matches!(val, OperandValue::Immediate(..))
747 },
748 "Address of place was unexpectedly {val:?} for pointee type {ty:?}",
749 );
750
751 OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
752 }
753
754 fn codegen_scalar_binop(
755 &mut self,
756 bx: &mut Bx,
757 op: mir::BinOp,
758 lhs: Bx::Value,
759 rhs: Bx::Value,
760 lhs_ty: Ty<'tcx>,
761 rhs_ty: Ty<'tcx>,
762 ) -> Bx::Value {
763 let is_float = lhs_ty.is_floating_point();
764 let is_signed = lhs_ty.is_signed();
765 match op {
766 mir::BinOp::Add => {
767 if is_float {
768 bx.fadd(lhs, rhs)
769 } else {
770 bx.add(lhs, rhs)
771 }
772 }
773 mir::BinOp::AddUnchecked => {
774 if is_signed {
775 bx.unchecked_sadd(lhs, rhs)
776 } else {
777 bx.unchecked_uadd(lhs, rhs)
778 }
779 }
780 mir::BinOp::Sub => {
781 if is_float {
782 bx.fsub(lhs, rhs)
783 } else {
784 bx.sub(lhs, rhs)
785 }
786 }
787 mir::BinOp::SubUnchecked => {
788 if is_signed {
789 bx.unchecked_ssub(lhs, rhs)
790 } else {
791 bx.unchecked_usub(lhs, rhs)
792 }
793 }
794 mir::BinOp::Mul => {
795 if is_float {
796 bx.fmul(lhs, rhs)
797 } else {
798 bx.mul(lhs, rhs)
799 }
800 }
801 mir::BinOp::MulUnchecked => {
802 if is_signed {
803 bx.unchecked_smul(lhs, rhs)
804 } else {
805 bx.unchecked_umul(lhs, rhs)
806 }
807 }
808 mir::BinOp::Div => {
809 if is_float {
810 bx.fdiv(lhs, rhs)
811 } else if is_signed {
812 bx.sdiv(lhs, rhs)
813 } else {
814 bx.udiv(lhs, rhs)
815 }
816 }
817 mir::BinOp::Rem => {
818 if is_float {
819 bx.frem(lhs, rhs)
820 } else if is_signed {
821 bx.srem(lhs, rhs)
822 } else {
823 bx.urem(lhs, rhs)
824 }
825 }
826 mir::BinOp::BitOr => bx.or(lhs, rhs),
827 mir::BinOp::BitAnd => bx.and(lhs, rhs),
828 mir::BinOp::BitXor => bx.xor(lhs, rhs),
829 mir::BinOp::Offset => {
830 let pointee_type = lhs_ty
831 .builtin_deref(true)
832 .unwrap_or_else(|| bug!("deref of non-pointer {:?}", lhs_ty));
833 let pointee_layout = bx.cx().layout_of(pointee_type);
834 if pointee_layout.is_zst() {
835 lhs
838 } else {
839 let llty = bx.cx().backend_type(pointee_layout);
840 if !rhs_ty.is_signed() {
841 bx.inbounds_nuw_gep(llty, lhs, &[rhs])
842 } else {
843 bx.inbounds_gep(llty, lhs, &[rhs])
844 }
845 }
846 }
847 mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
848 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
849 bx.shl(lhs, rhs)
850 }
851 mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
852 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
853 if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
854 }
855 mir::BinOp::Ne
856 | mir::BinOp::Lt
857 | mir::BinOp::Gt
858 | mir::BinOp::Eq
859 | mir::BinOp::Le
860 | mir::BinOp::Ge => {
861 if is_float {
862 bx.fcmp(base::bin_op_to_fcmp_predicate(op), lhs, rhs)
863 } else {
864 bx.icmp(base::bin_op_to_icmp_predicate(op, is_signed), lhs, rhs)
865 }
866 }
867 mir::BinOp::Cmp => {
868 use std::cmp::Ordering;
869 assert!(!is_float);
870 if let Some(value) = bx.three_way_compare(lhs_ty, lhs, rhs) {
871 return value;
872 }
873 let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed);
874 if bx.cx().tcx().sess.opts.optimize == OptLevel::No {
875 let is_gt = bx.icmp(pred(mir::BinOp::Gt), lhs, rhs);
882 let gtext = bx.zext(is_gt, bx.type_i8());
883 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
884 let ltext = bx.zext(is_lt, bx.type_i8());
885 bx.unchecked_ssub(gtext, ltext)
886 } else {
887 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
890 let is_ne = bx.icmp(pred(mir::BinOp::Ne), lhs, rhs);
891 let ge = bx.select(
892 is_ne,
893 bx.cx().const_i8(Ordering::Greater as i8),
894 bx.cx().const_i8(Ordering::Equal as i8),
895 );
896 bx.select(is_lt, bx.cx().const_i8(Ordering::Less as i8), ge)
897 }
898 }
899 mir::BinOp::AddWithOverflow
900 | mir::BinOp::SubWithOverflow
901 | mir::BinOp::MulWithOverflow => {
902 bug!("{op:?} needs to return a pair, so call codegen_scalar_checked_binop instead")
903 }
904 }
905 }
906
907 fn codegen_wide_ptr_binop(
908 &mut self,
909 bx: &mut Bx,
910 op: mir::BinOp,
911 lhs_addr: Bx::Value,
912 lhs_extra: Bx::Value,
913 rhs_addr: Bx::Value,
914 rhs_extra: Bx::Value,
915 _input_ty: Ty<'tcx>,
916 ) -> Bx::Value {
917 match op {
918 mir::BinOp::Eq => {
919 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
920 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
921 bx.and(lhs, rhs)
922 }
923 mir::BinOp::Ne => {
924 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
925 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
926 bx.or(lhs, rhs)
927 }
928 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
929 let (op, strict_op) = match op {
931 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
932 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
933 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
934 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
935 _ => bug!(),
936 };
937 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
938 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
939 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
940 let rhs = bx.and(and_lhs, and_rhs);
941 bx.or(lhs, rhs)
942 }
943 _ => {
944 bug!("unexpected wide ptr binop");
945 }
946 }
947 }
948
949 fn codegen_scalar_checked_binop(
950 &mut self,
951 bx: &mut Bx,
952 op: mir::BinOp,
953 lhs: Bx::Value,
954 rhs: Bx::Value,
955 input_ty: Ty<'tcx>,
956 ) -> OperandValue<Bx::Value> {
957 let (val, of) = match op {
958 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
960 let oop = match op {
961 mir::BinOp::Add => OverflowOp::Add,
962 mir::BinOp::Sub => OverflowOp::Sub,
963 mir::BinOp::Mul => OverflowOp::Mul,
964 _ => unreachable!(),
965 };
966 bx.checked_binop(oop, input_ty, lhs, rhs)
967 }
968 _ => bug!("Operator `{:?}` is not a checkable operator", op),
969 };
970
971 OperandValue::Pair(val, of)
972 }
973
974 pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
984 match *rvalue {
985 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
986 let operand_ty = operand.ty(self.mir, self.cx.tcx());
987 let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
988 let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
989 match (operand_layout.backend_repr, cast_layout.backend_repr) {
990 (_, abi::BackendRepr::Memory { .. }) => cast_layout.is_zst(),
993
994 (abi::BackendRepr::Memory { .. }, _) => true,
997
998 (abi::BackendRepr::Scalar(a), abi::BackendRepr::Scalar(b)) =>
1001 a.size(self.cx) == b.size(self.cx),
1002 (abi::BackendRepr::ScalarPair(a0, a1), abi::BackendRepr::ScalarPair(b0, b1)) =>
1003 a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
1004
1005 (abi::BackendRepr::Scalar(_), abi::BackendRepr::ScalarPair(_, _)) |
1008 (abi::BackendRepr::ScalarPair(_, _), abi::BackendRepr::Scalar(_)) => false,
1009
1010 (abi::BackendRepr::SimdVector { .. }, _) | (_, abi::BackendRepr::SimdVector { .. }) => false,
1014 }
1015 }
1016 mir::Rvalue::Ref(..) |
1017 mir::Rvalue::CopyForDeref(..) |
1018 mir::Rvalue::RawPtr(..) |
1019 mir::Rvalue::Len(..) |
1020 mir::Rvalue::Cast(..) | mir::Rvalue::ShallowInitBox(..) | mir::Rvalue::BinaryOp(..) |
1023 mir::Rvalue::UnaryOp(..) |
1024 mir::Rvalue::Discriminant(..) |
1025 mir::Rvalue::NullaryOp(..) |
1026 mir::Rvalue::ThreadLocalRef(_) |
1027 mir::Rvalue::Use(..) |
1028 mir::Rvalue::WrapUnsafeBinder(..) => true,
1030 mir::Rvalue::Repeat(..) => false,
1033 mir::Rvalue::Aggregate(..) => {
1034 let ty = rvalue.ty(self.mir, self.cx.tcx());
1035 let ty = self.monomorphize(ty);
1036 let layout = self.cx.spanned_layout_of(ty, span);
1037 OperandRef::<Bx::Value>::builder(layout).is_some()
1038 }
1039 }
1040
1041 }
1043}
1044
1045pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1053 bx: &mut Bx,
1054 mut imm: Bx::Value,
1055 from_scalar: abi::Scalar,
1056 to_scalar: abi::Scalar,
1057) -> Bx::Value {
1058 assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
1059 let imm_ty = bx.cx().val_ty(imm);
1060 assert_ne!(
1061 bx.cx().type_kind(imm_ty),
1062 TypeKind::Vector,
1063 "Vector type {imm_ty:?} not allowed in transmute_scalar {from_scalar:?} -> {to_scalar:?}"
1064 );
1065
1066 if from_scalar == to_scalar {
1070 return imm;
1071 }
1072
1073 use abi::Primitive::*;
1074 imm = bx.from_immediate(imm);
1075
1076 let from_backend_ty = bx.cx().type_from_scalar(from_scalar);
1077 debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty);
1078 let to_backend_ty = bx.cx().type_from_scalar(to_scalar);
1079
1080 assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
1090
1091 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
1092 (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
1093 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
1094 (Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm),
1095 (Pointer(..), Int(..)) => {
1096 bx.ptrtoint(imm, to_backend_ty)
1098 }
1099 (Float(_), Pointer(..)) => {
1100 let int_imm = bx.bitcast(imm, bx.cx().type_isize());
1101 bx.ptradd(bx.const_null(bx.type_ptr()), int_imm)
1102 }
1103 (Pointer(..), Float(_)) => {
1104 let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
1106 bx.bitcast(int_imm, to_backend_ty)
1107 }
1108 };
1109
1110 debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty);
1111
1112 assume_scalar_range(bx, imm, to_scalar, to_backend_ty);
1118
1119 imm = bx.to_immediate_scalar(imm, to_scalar);
1120 imm
1121}
1122
1123fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1124 bx: &mut Bx,
1125 imm: Bx::Value,
1126 scalar: abi::Scalar,
1127 backend_ty: Bx::Type,
1128) {
1129 if matches!(bx.cx().sess().opts.optimize, OptLevel::No) || scalar.is_always_valid(bx.cx()) {
1130 return;
1131 }
1132
1133 match scalar.primitive() {
1134 abi::Primitive::Int(..) => {
1135 let range = scalar.valid_range(bx.cx());
1136 bx.assume_integer_range(imm, backend_ty, range);
1137 }
1138 abi::Primitive::Pointer(abi::AddressSpace::ZERO)
1139 if !scalar.valid_range(bx.cx()).contains(0) =>
1140 {
1141 bx.assume_nonnull(imm);
1142 }
1143 abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
1144 }
1145}