1use rustc_abi::{self as abi, FIRST_VARIANT};
2use rustc_middle::ty::adjustment::PointerCoercion;
3use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
4use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
5use rustc_middle::{bug, mir};
6use rustc_session::config::OptLevel;
7use tracing::{debug, instrument};
8
9use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
10use super::place::{PlaceRef, codegen_tag_value};
11use super::{FunctionCx, LocalRef};
12use crate::common::{IntPredicate, TypeKind};
13use crate::traits::*;
14use crate::{MemFlags, base};
15
16impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
17 #[instrument(level = "trace", skip(self, bx))]
18 pub(crate) fn codegen_rvalue(
19 &mut self,
20 bx: &mut Bx,
21 dest: PlaceRef<'tcx, Bx::Value>,
22 rvalue: &mir::Rvalue<'tcx>,
23 ) {
24 match *rvalue {
25 mir::Rvalue::Use(ref operand) => {
26 let cg_operand = self.codegen_operand(bx, operand);
27 cg_operand.val.store(bx, dest);
30 }
31
32 mir::Rvalue::Cast(
33 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
34 ref source,
35 _,
36 ) => {
37 if bx.cx().is_backend_scalar_pair(dest.layout) {
40 let temp = self.codegen_rvalue_operand(bx, rvalue);
43 temp.val.store(bx, dest);
44 return;
45 }
46
47 let operand = self.codegen_operand(bx, source);
52 match operand.val {
53 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
54 debug!("codegen_rvalue: creating ugly alloca");
61 let scratch = PlaceRef::alloca(bx, operand.layout);
62 scratch.storage_live(bx);
63 operand.val.store(bx, scratch);
64 base::coerce_unsized_into(bx, scratch, dest);
65 scratch.storage_dead(bx);
66 }
67 OperandValue::Ref(val) => {
68 if val.llextra.is_some() {
69 bug!("unsized coercion on an unsized rvalue");
70 }
71 base::coerce_unsized_into(bx, val.with_type(operand.layout), dest);
72 }
73 OperandValue::ZeroSized => {
74 bug!("unsized coercion on a ZST rvalue");
75 }
76 }
77 }
78
79 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
80 let src = self.codegen_operand(bx, operand);
81 self.codegen_transmute(bx, src, dest);
82 }
83
84 mir::Rvalue::Repeat(ref elem, count) => {
85 if dest.layout.is_zst() {
87 return;
88 }
89
90 if let mir::Operand::Constant(const_op) = elem {
93 let val = self.eval_mir_constant(const_op);
94 if val.all_bytes_uninit(self.cx.tcx()) {
95 let size = bx.const_usize(dest.layout.size.bytes());
96 bx.memset(
97 dest.val.llval,
98 bx.const_undef(bx.type_i8()),
99 size,
100 dest.val.align,
101 MemFlags::empty(),
102 );
103 return;
104 }
105 }
106
107 let cg_elem = self.codegen_operand(bx, elem);
108
109 let try_init_all_same = |bx: &mut Bx, v| {
110 let start = dest.val.llval;
111 let size = bx.const_usize(dest.layout.size.bytes());
112
113 if let Some(int) = bx.cx().const_to_opt_u128(v, false) {
115 let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()];
116 let first = bytes[0];
117 if bytes[1..].iter().all(|&b| b == first) {
118 let fill = bx.cx().const_u8(first);
119 bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
120 return true;
121 }
122 }
123
124 let v = bx.from_immediate(v);
126 if bx.cx().val_ty(v) == bx.cx().type_i8() {
127 bx.memset(start, v, size, dest.val.align, MemFlags::empty());
128 return true;
129 }
130 false
131 };
132
133 match cg_elem.val {
134 OperandValue::Immediate(v) => {
135 if try_init_all_same(bx, v) {
136 return;
137 }
138 }
139 _ => (),
140 }
141
142 let count = self
143 .monomorphize(count)
144 .try_to_target_usize(bx.tcx())
145 .expect("expected monomorphic const in codegen");
146
147 bx.write_operand_repeatedly(cg_elem, count, dest);
148 }
149
150 mir::Rvalue::Aggregate(ref kind, ref operands)
153 if !matches!(**kind, mir::AggregateKind::RawPtr(..)) =>
154 {
155 let (variant_index, variant_dest, active_field_index) = match **kind {
156 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
157 let variant_dest = dest.project_downcast(bx, variant_index);
158 (variant_index, variant_dest, active_field_index)
159 }
160 _ => (FIRST_VARIANT, dest, None),
161 };
162 if active_field_index.is_some() {
163 assert_eq!(operands.len(), 1);
164 }
165 for (i, operand) in operands.iter_enumerated() {
166 let op = self.codegen_operand(bx, operand);
167 if !op.layout.is_zst() {
169 let field_index = active_field_index.unwrap_or(i);
170 let field = if let mir::AggregateKind::Array(_) = **kind {
171 let llindex = bx.cx().const_usize(field_index.as_u32().into());
172 variant_dest.project_index(bx, llindex)
173 } else {
174 variant_dest.project_field(bx, field_index.as_usize())
175 };
176 op.val.store(bx, field);
177 }
178 }
179 dest.codegen_set_discr(bx, variant_index);
180 }
181
182 _ => {
183 assert!(self.rvalue_creates_operand(rvalue));
184 let temp = self.codegen_rvalue_operand(bx, rvalue);
185 temp.val.store(bx, dest);
186 }
187 }
188 }
189
190 fn codegen_transmute(
195 &mut self,
196 bx: &mut Bx,
197 src: OperandRef<'tcx, Bx::Value>,
198 dst: PlaceRef<'tcx, Bx::Value>,
199 ) {
200 assert!(src.layout.is_sized());
202 assert!(dst.layout.is_sized());
203
204 if src.layout.size != dst.layout.size
205 || src.layout.is_uninhabited()
206 || dst.layout.is_uninhabited()
207 {
208 bx.unreachable_nonterminator();
211 } else {
212 src.val.store(bx, dst.val.with_type(src.layout));
216 }
217 }
218
219 pub(crate) fn codegen_transmute_operand(
227 &mut self,
228 bx: &mut Bx,
229 operand: OperandRef<'tcx, Bx::Value>,
230 cast: TyAndLayout<'tcx>,
231 ) -> OperandValue<Bx::Value> {
232 if operand.layout.size != cast.size
234 || operand.layout.is_uninhabited()
235 || cast.is_uninhabited()
236 {
237 bx.unreachable_nonterminator();
238
239 return OperandValue::poison(bx, cast);
242 }
243
244 match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
245 _ if cast.is_zst() => OperandValue::ZeroSized,
246 (_, _, abi::BackendRepr::Memory { .. }) => {
247 bug!("Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}");
248 }
249 (OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
250 assert_eq!(source_place_val.llextra, None);
251 bx.load_operand(source_place_val.with_type(cast)).val
254 }
255 (
256 OperandValue::Immediate(imm),
257 abi::BackendRepr::Scalar(from_scalar),
258 abi::BackendRepr::Scalar(to_scalar),
259 ) => OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar)),
260 (
261 OperandValue::Pair(imm_a, imm_b),
262 abi::BackendRepr::ScalarPair(in_a, in_b),
263 abi::BackendRepr::ScalarPair(out_a, out_b),
264 ) => OperandValue::Pair(
265 transmute_scalar(bx, imm_a, in_a, out_a),
266 transmute_scalar(bx, imm_b, in_b, out_b),
267 ),
268 _ => bug!("Cannot `codegen_transmute_operand` {operand:?} to {cast:?}"),
269 }
270 }
271
272 fn cast_immediate(
277 &self,
278 bx: &mut Bx,
279 mut imm: Bx::Value,
280 from_scalar: abi::Scalar,
281 from_backend_ty: Bx::Type,
282 to_scalar: abi::Scalar,
283 to_backend_ty: Bx::Type,
284 ) -> Option<Bx::Value> {
285 use abi::Primitive::*;
286
287 assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
292
293 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
294 (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
295 (Float(_), Float(_)) => {
296 let srcsz = bx.cx().float_width(from_backend_ty);
297 let dstsz = bx.cx().float_width(to_backend_ty);
298 if dstsz > srcsz {
299 bx.fpext(imm, to_backend_ty)
300 } else if srcsz > dstsz {
301 bx.fptrunc(imm, to_backend_ty)
302 } else {
303 imm
304 }
305 }
306 (Int(_, is_signed), Float(_)) => {
307 if is_signed {
308 bx.sitofp(imm, to_backend_ty)
309 } else {
310 bx.uitofp(imm, to_backend_ty)
311 }
312 }
313 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
314 (Int(_, is_signed), Pointer(..)) => {
315 let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
316 bx.inttoptr(usize_imm, to_backend_ty)
317 }
318 (Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
319 _ => return None,
320 };
321 Some(imm)
322 }
323
324 pub(crate) fn codegen_rvalue_operand(
325 &mut self,
326 bx: &mut Bx,
327 rvalue: &mir::Rvalue<'tcx>,
328 ) -> OperandRef<'tcx, Bx::Value> {
329 assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {rvalue:?} to operand",);
330
331 match *rvalue {
332 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
333 let operand = self.codegen_operand(bx, source);
334 debug!("cast operand is {:?}", operand);
335 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
336
337 let val = match *kind {
338 mir::CastKind::PointerExposeProvenance => {
339 assert!(bx.cx().is_backend_immediate(cast));
340 let llptr = operand.immediate();
341 let llcast_ty = bx.cx().immediate_backend_type(cast);
342 let lladdr = bx.ptrtoint(llptr, llcast_ty);
343 OperandValue::Immediate(lladdr)
344 }
345 mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
346 match *operand.layout.ty.kind() {
347 ty::FnDef(def_id, args) => {
348 let instance = ty::Instance::resolve_for_fn_ptr(
349 bx.tcx(),
350 bx.typing_env(),
351 def_id,
352 args,
353 )
354 .unwrap();
355 OperandValue::Immediate(bx.get_fn_addr(instance))
356 }
357 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
358 }
359 }
360 mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
361 match *operand.layout.ty.kind() {
362 ty::Closure(def_id, args) => {
363 let instance = Instance::resolve_closure(
364 bx.cx().tcx(),
365 def_id,
366 args,
367 ty::ClosureKind::FnOnce,
368 );
369 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
370 }
371 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
372 }
373 }
374 mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
375 operand.val
377 }
378 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
379 assert!(bx.cx().is_backend_scalar_pair(cast));
380 let (lldata, llextra) = operand.val.pointer_parts();
381 let (lldata, llextra) =
382 base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
383 OperandValue::Pair(lldata, llextra)
384 }
385 mir::CastKind::PointerCoercion(
386 PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, _
387 ) => {
388 bug!("{kind:?} is for borrowck, and should never appear in codegen");
389 }
390 mir::CastKind::PtrToPtr
391 if bx.cx().is_backend_scalar_pair(operand.layout) =>
392 {
393 if let OperandValue::Pair(data_ptr, meta) = operand.val {
394 if bx.cx().is_backend_scalar_pair(cast) {
395 OperandValue::Pair(data_ptr, meta)
396 } else {
397 OperandValue::Immediate(data_ptr)
399 }
400 } else {
401 bug!("unexpected non-pair operand");
402 }
403 }
404 | mir::CastKind::IntToInt
405 | mir::CastKind::FloatToInt
406 | mir::CastKind::FloatToFloat
407 | mir::CastKind::IntToFloat
408 | mir::CastKind::PtrToPtr
409 | mir::CastKind::FnPtrToPtr
410 | mir::CastKind::PointerWithExposedProvenance => {
414 let imm = operand.immediate();
415 let abi::BackendRepr::Scalar(from_scalar) = operand.layout.backend_repr else {
416 bug!("Found non-scalar for operand {operand:?}");
417 };
418 let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
419
420 assert!(bx.cx().is_backend_immediate(cast));
421 let to_backend_ty = bx.cx().immediate_backend_type(cast);
422 if operand.layout.is_uninhabited() {
423 let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
424 return OperandRef { val, layout: cast };
425 }
426 let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
427 bug!("Found non-scalar for cast {cast:?}");
428 };
429
430 self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
431 .map(OperandValue::Immediate)
432 .unwrap_or_else(|| {
433 bug!("Unsupported cast of {operand:?} to {cast:?}");
434 })
435 }
436 mir::CastKind::Transmute => {
437 self.codegen_transmute_operand(bx, operand, cast)
438 }
439 };
440 OperandRef { val, layout: cast }
441 }
442
443 mir::Rvalue::Ref(_, bk, place) => {
444 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
445 Ty::new_ref(tcx, tcx.lifetimes.re_erased, ty, bk.to_mutbl_lossy())
446 };
447 self.codegen_place_to_pointer(bx, place, mk_ref)
448 }
449
450 mir::Rvalue::CopyForDeref(place) => {
451 self.codegen_operand(bx, &mir::Operand::Copy(place))
452 }
453 mir::Rvalue::RawPtr(kind, place) => {
454 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
455 Ty::new_ptr(tcx, ty, kind.to_mutbl_lossy())
456 };
457 self.codegen_place_to_pointer(bx, place, mk_ptr)
458 }
459
460 mir::Rvalue::Len(place) => {
461 let size = self.evaluate_array_len(bx, place);
462 OperandRef {
463 val: OperandValue::Immediate(size),
464 layout: bx.cx().layout_of(bx.tcx().types.usize),
465 }
466 }
467
468 mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs))
469 if let Some(op) = op_with_overflow.overflowing_to_wrapping() =>
470 {
471 let lhs = self.codegen_operand(bx, lhs);
472 let rhs = self.codegen_operand(bx, rhs);
473 let result = self.codegen_scalar_checked_binop(
474 bx,
475 op,
476 lhs.immediate(),
477 rhs.immediate(),
478 lhs.layout.ty,
479 );
480 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
481 let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
482 OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
483 }
484 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
485 let lhs = self.codegen_operand(bx, lhs);
486 let rhs = self.codegen_operand(bx, rhs);
487 let llresult = match (lhs.val, rhs.val) {
488 (
489 OperandValue::Pair(lhs_addr, lhs_extra),
490 OperandValue::Pair(rhs_addr, rhs_extra),
491 ) => self.codegen_wide_ptr_binop(
492 bx,
493 op,
494 lhs_addr,
495 lhs_extra,
496 rhs_addr,
497 rhs_extra,
498 lhs.layout.ty,
499 ),
500
501 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => self
502 .codegen_scalar_binop(
503 bx,
504 op,
505 lhs_val,
506 rhs_val,
507 lhs.layout.ty,
508 rhs.layout.ty,
509 ),
510
511 _ => bug!(),
512 };
513 OperandRef {
514 val: OperandValue::Immediate(llresult),
515 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
516 }
517 }
518
519 mir::Rvalue::UnaryOp(op, ref operand) => {
520 let operand = self.codegen_operand(bx, operand);
521 let is_float = operand.layout.ty.is_floating_point();
522 let (val, layout) = match op {
523 mir::UnOp::Not => {
524 let llval = bx.not(operand.immediate());
525 (OperandValue::Immediate(llval), operand.layout)
526 }
527 mir::UnOp::Neg => {
528 let llval = if is_float {
529 bx.fneg(operand.immediate())
530 } else {
531 bx.neg(operand.immediate())
532 };
533 (OperandValue::Immediate(llval), operand.layout)
534 }
535 mir::UnOp::PtrMetadata => {
536 assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),);
537 let (_, meta) = operand.val.pointer_parts();
538 assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
539 if let Some(meta) = meta {
540 (OperandValue::Immediate(meta), operand.layout.field(self.cx, 1))
541 } else {
542 (OperandValue::ZeroSized, bx.cx().layout_of(bx.tcx().types.unit))
543 }
544 }
545 };
546 assert!(
547 val.is_expected_variant_for_type(self.cx, layout),
548 "Made wrong variant {val:?} for type {layout:?}",
549 );
550 OperandRef { val, layout }
551 }
552
553 mir::Rvalue::Discriminant(ref place) => {
554 let discr_ty = rvalue.ty(self.mir, bx.tcx());
555 let discr_ty = self.monomorphize(discr_ty);
556 let operand = self.codegen_consume(bx, place.as_ref());
557 let discr = operand.codegen_get_discr(self, bx, discr_ty);
558 OperandRef {
559 val: OperandValue::Immediate(discr),
560 layout: self.cx.layout_of(discr_ty),
561 }
562 }
563
564 mir::Rvalue::NullaryOp(ref null_op, ty) => {
565 let ty = self.monomorphize(ty);
566 let layout = bx.cx().layout_of(ty);
567 let val = match null_op {
568 mir::NullOp::SizeOf => {
569 assert!(bx.cx().type_is_sized(ty));
570 let val = layout.size.bytes();
571 bx.cx().const_usize(val)
572 }
573 mir::NullOp::AlignOf => {
574 assert!(bx.cx().type_is_sized(ty));
575 let val = layout.align.abi.bytes();
576 bx.cx().const_usize(val)
577 }
578 mir::NullOp::OffsetOf(fields) => {
579 let val = bx
580 .tcx()
581 .offset_of_subfield(bx.typing_env(), layout, fields.iter())
582 .bytes();
583 bx.cx().const_usize(val)
584 }
585 mir::NullOp::UbChecks => {
586 let val = bx.tcx().sess.ub_checks();
587 bx.cx().const_bool(val)
588 }
589 mir::NullOp::ContractChecks => {
590 let val = bx.tcx().sess.contract_checks();
591 bx.cx().const_bool(val)
592 }
593 };
594 let tcx = self.cx.tcx();
595 OperandRef {
596 val: OperandValue::Immediate(val),
597 layout: self.cx.layout_of(null_op.ty(tcx)),
598 }
599 }
600
601 mir::Rvalue::ThreadLocalRef(def_id) => {
602 assert!(bx.cx().tcx().is_static(def_id));
603 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
604 let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
605 {
606 let instance = ty::Instance {
607 def: ty::InstanceKind::ThreadLocalShim(def_id),
608 args: ty::GenericArgs::empty(),
609 };
610 let fn_ptr = bx.get_fn_addr(instance);
611 let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
612 let fn_ty = bx.fn_decl_backend_type(fn_abi);
613 let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
614 Some(bx.tcx().codegen_instance_attrs(instance.def))
615 } else {
616 None
617 };
618 bx.call(
619 fn_ty,
620 fn_attrs.as_deref(),
621 Some(fn_abi),
622 fn_ptr,
623 &[],
624 None,
625 Some(instance),
626 )
627 } else {
628 bx.get_static(def_id)
629 };
630 OperandRef { val: OperandValue::Immediate(static_), layout }
631 }
632 mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
633 mir::Rvalue::Repeat(..) => bug!("{rvalue:?} in codegen_rvalue_operand"),
634 mir::Rvalue::Aggregate(ref kind, ref fields) => {
635 let (variant_index, active_field_index) = match **kind {
636 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
637 (variant_index, active_field_index)
638 }
639 _ => (FIRST_VARIANT, None),
640 };
641
642 let ty = rvalue.ty(self.mir, self.cx.tcx());
643 let ty = self.monomorphize(ty);
644 let layout = self.cx.layout_of(ty);
645
646 let mut builder = OperandRefBuilder::new(layout);
649 for (field_idx, field) in fields.iter_enumerated() {
650 let op = self.codegen_operand(bx, field);
651 let fi = active_field_index.unwrap_or(field_idx);
652 builder.insert_field(bx, variant_index, fi, op);
653 }
654
655 let tag_result = codegen_tag_value(self.cx, variant_index, layout);
656 match tag_result {
657 Err(super::place::UninhabitedVariantError) => {
658 bx.abort();
662 let val = OperandValue::poison(bx, layout);
663 OperandRef { val, layout }
664 }
665 Ok(maybe_tag_value) => {
666 if let Some((tag_field, tag_imm)) = maybe_tag_value {
667 builder.insert_imm(tag_field, tag_imm);
668 }
669 builder.build(bx.cx())
670 }
671 }
672 }
673 mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
674 let operand = self.codegen_operand(bx, operand);
675 let val = operand.immediate();
676
677 let content_ty = self.monomorphize(content_ty);
678 let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
679
680 OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
681 }
682 mir::Rvalue::WrapUnsafeBinder(ref operand, binder_ty) => {
683 let operand = self.codegen_operand(bx, operand);
684 let binder_ty = self.monomorphize(binder_ty);
685 let layout = bx.cx().layout_of(binder_ty);
686 OperandRef { val: operand.val, layout }
687 }
688 }
689 }
690
691 fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
692 if let Some(index) = place.as_local()
695 && let LocalRef::Operand(op) = self.locals[index]
696 && let ty::Array(_, n) = op.layout.ty.kind()
697 {
698 let n = n.try_to_target_usize(bx.tcx()).expect("expected monomorphic const in codegen");
699 return bx.cx().const_usize(n);
700 }
701 let cg_value = self.codegen_place(bx, place.as_ref());
703 cg_value.len(bx.cx())
704 }
705
706 fn codegen_place_to_pointer(
708 &mut self,
709 bx: &mut Bx,
710 place: mir::Place<'tcx>,
711 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
712 ) -> OperandRef<'tcx, Bx::Value> {
713 let cg_place = self.codegen_place(bx, place.as_ref());
714 let val = cg_place.val.address();
715
716 let ty = cg_place.layout.ty;
717 assert!(
718 if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) {
719 matches!(val, OperandValue::Pair(..))
720 } else {
721 matches!(val, OperandValue::Immediate(..))
722 },
723 "Address of place was unexpectedly {val:?} for pointee type {ty:?}",
724 );
725
726 OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
727 }
728
729 fn codegen_scalar_binop(
730 &mut self,
731 bx: &mut Bx,
732 op: mir::BinOp,
733 lhs: Bx::Value,
734 rhs: Bx::Value,
735 lhs_ty: Ty<'tcx>,
736 rhs_ty: Ty<'tcx>,
737 ) -> Bx::Value {
738 let is_float = lhs_ty.is_floating_point();
739 let is_signed = lhs_ty.is_signed();
740 match op {
741 mir::BinOp::Add => {
742 if is_float {
743 bx.fadd(lhs, rhs)
744 } else {
745 bx.add(lhs, rhs)
746 }
747 }
748 mir::BinOp::AddUnchecked => {
749 if is_signed {
750 bx.unchecked_sadd(lhs, rhs)
751 } else {
752 bx.unchecked_uadd(lhs, rhs)
753 }
754 }
755 mir::BinOp::Sub => {
756 if is_float {
757 bx.fsub(lhs, rhs)
758 } else {
759 bx.sub(lhs, rhs)
760 }
761 }
762 mir::BinOp::SubUnchecked => {
763 if is_signed {
764 bx.unchecked_ssub(lhs, rhs)
765 } else {
766 bx.unchecked_usub(lhs, rhs)
767 }
768 }
769 mir::BinOp::Mul => {
770 if is_float {
771 bx.fmul(lhs, rhs)
772 } else {
773 bx.mul(lhs, rhs)
774 }
775 }
776 mir::BinOp::MulUnchecked => {
777 if is_signed {
778 bx.unchecked_smul(lhs, rhs)
779 } else {
780 bx.unchecked_umul(lhs, rhs)
781 }
782 }
783 mir::BinOp::Div => {
784 if is_float {
785 bx.fdiv(lhs, rhs)
786 } else if is_signed {
787 bx.sdiv(lhs, rhs)
788 } else {
789 bx.udiv(lhs, rhs)
790 }
791 }
792 mir::BinOp::Rem => {
793 if is_float {
794 bx.frem(lhs, rhs)
795 } else if is_signed {
796 bx.srem(lhs, rhs)
797 } else {
798 bx.urem(lhs, rhs)
799 }
800 }
801 mir::BinOp::BitOr => bx.or(lhs, rhs),
802 mir::BinOp::BitAnd => bx.and(lhs, rhs),
803 mir::BinOp::BitXor => bx.xor(lhs, rhs),
804 mir::BinOp::Offset => {
805 let pointee_type = lhs_ty
806 .builtin_deref(true)
807 .unwrap_or_else(|| bug!("deref of non-pointer {:?}", lhs_ty));
808 let pointee_layout = bx.cx().layout_of(pointee_type);
809 if pointee_layout.is_zst() {
810 lhs
813 } else {
814 let llty = bx.cx().backend_type(pointee_layout);
815 if !rhs_ty.is_signed() {
816 bx.inbounds_nuw_gep(llty, lhs, &[rhs])
817 } else {
818 bx.inbounds_gep(llty, lhs, &[rhs])
819 }
820 }
821 }
822 mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
823 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
824 bx.shl(lhs, rhs)
825 }
826 mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
827 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
828 if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
829 }
830 mir::BinOp::Ne
831 | mir::BinOp::Lt
832 | mir::BinOp::Gt
833 | mir::BinOp::Eq
834 | mir::BinOp::Le
835 | mir::BinOp::Ge => {
836 if is_float {
837 bx.fcmp(base::bin_op_to_fcmp_predicate(op), lhs, rhs)
838 } else {
839 bx.icmp(base::bin_op_to_icmp_predicate(op, is_signed), lhs, rhs)
840 }
841 }
842 mir::BinOp::Cmp => {
843 use std::cmp::Ordering;
844 assert!(!is_float);
845 if let Some(value) = bx.three_way_compare(lhs_ty, lhs, rhs) {
846 return value;
847 }
848 let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed);
849 if bx.cx().tcx().sess.opts.optimize == OptLevel::No {
850 let is_gt = bx.icmp(pred(mir::BinOp::Gt), lhs, rhs);
857 let gtext = bx.zext(is_gt, bx.type_i8());
858 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
859 let ltext = bx.zext(is_lt, bx.type_i8());
860 bx.unchecked_ssub(gtext, ltext)
861 } else {
862 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
865 let is_ne = bx.icmp(pred(mir::BinOp::Ne), lhs, rhs);
866 let ge = bx.select(
867 is_ne,
868 bx.cx().const_i8(Ordering::Greater as i8),
869 bx.cx().const_i8(Ordering::Equal as i8),
870 );
871 bx.select(is_lt, bx.cx().const_i8(Ordering::Less as i8), ge)
872 }
873 }
874 mir::BinOp::AddWithOverflow
875 | mir::BinOp::SubWithOverflow
876 | mir::BinOp::MulWithOverflow => {
877 bug!("{op:?} needs to return a pair, so call codegen_scalar_checked_binop instead")
878 }
879 }
880 }
881
882 fn codegen_wide_ptr_binop(
883 &mut self,
884 bx: &mut Bx,
885 op: mir::BinOp,
886 lhs_addr: Bx::Value,
887 lhs_extra: Bx::Value,
888 rhs_addr: Bx::Value,
889 rhs_extra: Bx::Value,
890 _input_ty: Ty<'tcx>,
891 ) -> Bx::Value {
892 match op {
893 mir::BinOp::Eq => {
894 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
895 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
896 bx.and(lhs, rhs)
897 }
898 mir::BinOp::Ne => {
899 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
900 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
901 bx.or(lhs, rhs)
902 }
903 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
904 let (op, strict_op) = match op {
906 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
907 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
908 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
909 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
910 _ => bug!(),
911 };
912 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
913 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
914 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
915 let rhs = bx.and(and_lhs, and_rhs);
916 bx.or(lhs, rhs)
917 }
918 _ => {
919 bug!("unexpected wide ptr binop");
920 }
921 }
922 }
923
924 fn codegen_scalar_checked_binop(
925 &mut self,
926 bx: &mut Bx,
927 op: mir::BinOp,
928 lhs: Bx::Value,
929 rhs: Bx::Value,
930 input_ty: Ty<'tcx>,
931 ) -> OperandValue<Bx::Value> {
932 let (val, of) = match op {
933 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
935 let oop = match op {
936 mir::BinOp::Add => OverflowOp::Add,
937 mir::BinOp::Sub => OverflowOp::Sub,
938 mir::BinOp::Mul => OverflowOp::Mul,
939 _ => unreachable!(),
940 };
941 bx.checked_binop(oop, input_ty, lhs, rhs)
942 }
943 _ => bug!("Operator `{:?}` is not a checkable operator", op),
944 };
945
946 OperandValue::Pair(val, of)
947 }
948
949 pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
959 match *rvalue {
960 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
961 let operand_ty = operand.ty(self.mir, self.cx.tcx());
962 let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
963 let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
964 match (operand_layout.backend_repr, cast_layout.backend_repr) {
965 (_, abi::BackendRepr::Memory { .. }) => cast_layout.is_zst(),
968
969 (abi::BackendRepr::Memory { .. }, _) => true,
972
973 (abi::BackendRepr::Scalar(a), abi::BackendRepr::Scalar(b)) =>
976 a.size(self.cx) == b.size(self.cx),
977 (abi::BackendRepr::ScalarPair(a0, a1), abi::BackendRepr::ScalarPair(b0, b1)) =>
978 a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
979
980 (abi::BackendRepr::Scalar(_), abi::BackendRepr::ScalarPair(_, _)) |
983 (abi::BackendRepr::ScalarPair(_, _), abi::BackendRepr::Scalar(_)) => false,
984
985 (abi::BackendRepr::SimdVector { .. }, _) | (_, abi::BackendRepr::SimdVector { .. }) => false,
989 }
990 }
991 mir::Rvalue::Ref(..) |
992 mir::Rvalue::CopyForDeref(..) |
993 mir::Rvalue::RawPtr(..) |
994 mir::Rvalue::Len(..) |
995 mir::Rvalue::Cast(..) | mir::Rvalue::ShallowInitBox(..) | mir::Rvalue::BinaryOp(..) |
998 mir::Rvalue::UnaryOp(..) |
999 mir::Rvalue::Discriminant(..) |
1000 mir::Rvalue::NullaryOp(..) |
1001 mir::Rvalue::ThreadLocalRef(_) |
1002 mir::Rvalue::Use(..) |
1003 mir::Rvalue::Aggregate(..) | mir::Rvalue::WrapUnsafeBinder(..) => true,
1006 mir::Rvalue::Repeat(..) => false,
1009 }
1010
1011 }
1013}
1014
1015pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1023 bx: &mut Bx,
1024 mut imm: Bx::Value,
1025 from_scalar: abi::Scalar,
1026 to_scalar: abi::Scalar,
1027) -> Bx::Value {
1028 assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
1029 let imm_ty = bx.cx().val_ty(imm);
1030 assert_ne!(
1031 bx.cx().type_kind(imm_ty),
1032 TypeKind::Vector,
1033 "Vector type {imm_ty:?} not allowed in transmute_scalar {from_scalar:?} -> {to_scalar:?}"
1034 );
1035
1036 if from_scalar == to_scalar {
1040 return imm;
1041 }
1042
1043 use abi::Primitive::*;
1044 imm = bx.from_immediate(imm);
1045
1046 let from_backend_ty = bx.cx().type_from_scalar(from_scalar);
1047 debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty);
1048 let to_backend_ty = bx.cx().type_from_scalar(to_scalar);
1049
1050 assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
1060
1061 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
1062 (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
1063 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
1064 (Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm),
1065 (Pointer(..), Int(..)) => {
1066 bx.ptrtoint(imm, to_backend_ty)
1068 }
1069 (Float(_), Pointer(..)) => {
1070 let int_imm = bx.bitcast(imm, bx.cx().type_isize());
1071 bx.ptradd(bx.const_null(bx.type_ptr()), int_imm)
1072 }
1073 (Pointer(..), Float(_)) => {
1074 let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
1076 bx.bitcast(int_imm, to_backend_ty)
1077 }
1078 };
1079
1080 debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty);
1081
1082 assume_scalar_range(bx, imm, to_scalar, to_backend_ty);
1088
1089 imm = bx.to_immediate_scalar(imm, to_scalar);
1090 imm
1091}
1092
1093fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1094 bx: &mut Bx,
1095 imm: Bx::Value,
1096 scalar: abi::Scalar,
1097 backend_ty: Bx::Type,
1098) {
1099 if matches!(bx.cx().sess().opts.optimize, OptLevel::No) || scalar.is_always_valid(bx.cx()) {
1100 return;
1101 }
1102
1103 match scalar.primitive() {
1104 abi::Primitive::Int(..) => {
1105 let range = scalar.valid_range(bx.cx());
1106 bx.assume_integer_range(imm, backend_ty, range);
1107 }
1108 abi::Primitive::Pointer(abi::AddressSpace::ZERO)
1109 if !scalar.valid_range(bx.cx()).contains(0) =>
1110 {
1111 bx.assume_nonnull(imm);
1112 }
1113 abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
1114 }
1115}