Skip to content

Commit

Permalink
ScalarInt: size mismatches are a bug, do not delay the panic
Browse files Browse the repository at this point in the history
  • Loading branch information
RalfJung committed Jun 10, 2024
1 parent 13423be commit 3c57ea0
Show file tree
Hide file tree
Showing 45 changed files with 229 additions and 295 deletions.
6 changes: 3 additions & 3 deletions compiler/rustc_codegen_cranelift/src/constant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ pub(crate) fn codegen_const_value<'tcx>(
if fx.clif_type(layout.ty).is_some() {
return CValue::const_val(fx, layout, int);
} else {
let raw_val = int.size().truncate(int.assert_bits(int.size()));
let raw_val = int.size().truncate(int.to_bits(int.size()));
let val = match int.size().bytes() {
1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
Expand Down Expand Up @@ -501,12 +501,12 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
Ordering::Equal => scalar_int,
Ordering::Less => match ty.kind() {
ty::Uint(_) => ScalarInt::try_from_uint(
scalar_int.assert_uint(scalar_int.size()),
scalar_int.to_uint(scalar_int.size()),
fx.layout_of(*ty).size,
)
.unwrap(),
ty::Int(_) => ScalarInt::try_from_int(
scalar_int.assert_int(scalar_int.size()),
scalar_int.to_int(scalar_int.size()),
fx.layout_of(*ty).size,
)
.unwrap(),
Expand Down
10 changes: 5 additions & 5 deletions compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
Original file line number Diff line number Diff line change
Expand Up @@ -902,7 +902,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
.span_fatal(span, "Index argument for `_mm_cmpestri` is not a constant");
};

let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
let imm8 = imm8.to_u8();

codegen_inline_asm_inner(
fx,
Expand Down Expand Up @@ -955,7 +955,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
.span_fatal(span, "Index argument for `_mm_cmpestrm` is not a constant");
};

let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
let imm8 = imm8.to_u8();

codegen_inline_asm_inner(
fx,
Expand Down Expand Up @@ -1003,7 +1003,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
);
};

let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
let imm8 = imm8.to_u8();

codegen_inline_asm_inner(
fx,
Expand Down Expand Up @@ -1040,7 +1040,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
);
};

let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
let imm8 = imm8.to_u8();

codegen_inline_asm_inner(
fx,
Expand Down Expand Up @@ -1195,7 +1195,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
.span_fatal(span, "Func argument for `_mm_sha1rnds4_epu32` is not a constant");
};

let func = func.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", func));
let func = func.to_u8();

codegen_inline_asm_inner(
fx,
Expand Down
11 changes: 3 additions & 8 deletions compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(

let total_len = lane_count * 2;

let indexes =
idx.iter().map(|idx| idx.unwrap_leaf().try_to_u32().unwrap()).collect::<Vec<u32>>();
let indexes = idx.iter().map(|idx| idx.unwrap_leaf().to_u32()).collect::<Vec<u32>>();

for &idx in &indexes {
assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
Expand Down Expand Up @@ -282,9 +281,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
fx.tcx.dcx().span_fatal(span, "Index argument for `simd_insert` is not a constant");
};

let idx: u32 = idx_const
.try_to_u32()
.unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const));
let idx: u32 = idx_const.to_u32();
let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
if u64::from(idx) >= lane_count {
fx.tcx.dcx().span_fatal(
Expand Down Expand Up @@ -330,9 +327,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
return;
};

let idx = idx_const
.try_to_u32()
.unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const));
let idx = idx_const.to_u32();
let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
if u64::from(idx) >= lane_count {
fx.tcx.dcx().span_fatal(
Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_codegen_cranelift/src/value_and_place.rs
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ impl<'tcx> CValue<'tcx> {

let val = match layout.ty.kind() {
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
let const_val = const_val.assert_bits(layout.size);
let const_val = const_val.to_bits(layout.size);
let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
fx.bcx.ins().iconcat(lsb, msb)
Expand All @@ -339,7 +339,7 @@ impl<'tcx> CValue<'tcx> {
| ty::Ref(..)
| ty::RawPtr(..)
| ty::FnPtr(..) => {
let raw_val = const_val.size().truncate(const_val.assert_bits(layout.size));
let raw_val = const_val.size().truncate(const_val.to_bits(layout.size));
fx.bcx.ins().iconst(clif_ty, raw_val as i64)
}
ty::Float(FloatTy::F32) => {
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_gcc/src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
match cv {
Scalar::Int(int) => {
let data = int.assert_bits(layout.size(self));
let data = int.to_bits(layout.size(self));

// FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
// the paths for floating-point values.
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_llvm/src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
match cv {
Scalar::Int(int) => {
let data = int.assert_bits(layout.size(self));
let data = int.to_bits(layout.size(self));
let llval = self.const_uint_big(self.type_ix(bitsize), data);
if matches!(layout.primitive(), Pointer(_)) {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_llvm/src/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1221,7 +1221,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
.iter()
.enumerate()
.map(|(arg_idx, val)| {
let idx = val.unwrap_leaf().try_to_i32().unwrap();
let idx = val.unwrap_leaf().to_i32();
if idx >= i32::try_from(total_len).unwrap() {
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
span,
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_ssa/src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ pub fn asm_const_to_str<'tcx>(
let mir::ConstValue::Scalar(scalar) = const_value else {
span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value)
};
let value = scalar.assert_bits(ty_and_layout.size);
let value = scalar.assert_scalar_int().to_bits(ty_and_layout.size);
match ty_and_layout.ty.kind() {
ty::Uint(_) => value.to_string(),
ty::Int(int_ty) => match int_ty.normalize(tcx.sess.target.pointer_width) {
Expand Down
8 changes: 4 additions & 4 deletions compiler/rustc_const_eval/src/const_eval/valtrees.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,10 +95,10 @@ fn const_to_valtree_inner<'tcx>(
}
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
let val = ecx.read_immediate(place)?;
let val = val.to_scalar();
let val = val.to_scalar_int().unwrap();
*num_nodes += 1;

Ok(ty::ValTree::Leaf(val.assert_int()))
Ok(ty::ValTree::Leaf(val))
}

ty::Pat(base, ..) => {
Expand All @@ -125,7 +125,7 @@ fn const_to_valtree_inner<'tcx>(
let val = val.to_scalar();
// We are in the CTFE machine, so ptr-to-int casts will fail.
// This can only be `Ok` if `val` already is an integer.
let Ok(val) = val.try_to_int() else {
let Ok(val) = val.try_to_scalar_int() else {
return Err(ValTreeCreationError::NonSupportedType);
};
// It's just a ScalarInt!
Expand Down Expand Up @@ -411,7 +411,7 @@ fn valtree_into_mplace<'tcx>(
ty::Adt(def, _) if def.is_enum() => {
// First element of valtree corresponds to variant
let scalar_int = branches[0].unwrap_leaf();
let variant_idx = VariantIdx::from_u32(scalar_int.try_to_u32().unwrap());
let variant_idx = VariantIdx::from_u32(scalar_int.to_u32());
let variant = def.variant(variant_idx);
debug!(?variant);

Expand Down
15 changes: 7 additions & 8 deletions compiler/rustc_const_eval/src/interpret/discriminant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,14 +123,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// (`tag_bits` itself is only used for error messages below.)
let tag_bits = tag_val
.to_scalar()
.try_to_int()
.try_to_scalar_int()
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
.assert_bits(tag_layout.size);
.to_bits(tag_layout.size);
// Cast bits from tag layout to discriminant layout.
// After the checks we did above, this cannot fail, as
// discriminants are int-like.
let discr_val = self.int_to_int_or_float(&tag_val, discr_layout).unwrap();
let discr_bits = discr_val.to_scalar().assert_bits(discr_layout.size);
let discr_bits = discr_val.to_scalar().to_bits(discr_layout.size)?;
// Convert discriminant to variant index, and catch invalid discriminants.
let index = match *ty.kind() {
ty::Adt(adt, _) => {
Expand All @@ -152,7 +152,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// discriminant (encoded in niche/tag) and variant index are the same.
let variants_start = niche_variants.start().as_u32();
let variants_end = niche_variants.end().as_u32();
let variant = match tag_val.try_to_int() {
let variant = match tag_val.try_to_scalar_int() {
Err(dbg_val) => {
// So this is a pointer then, and casting to an int failed.
// Can only happen during CTFE.
Expand All @@ -167,15 +167,15 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
untagged_variant
}
Ok(tag_bits) => {
let tag_bits = tag_bits.assert_bits(tag_layout.size);
let tag_bits = tag_bits.to_bits(tag_layout.size);
// We need to use machine arithmetic to get the relative variant idx:
// variant_index_relative = tag_val - niche_start_val
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
let variant_index_relative =
variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
variant_index_relative_val.to_scalar().to_bits(tag_val.layout.size)?;
// Check if this is in the range that indicates an actual discriminant.
if variant_index_relative <= u128::from(variants_end - variants_start) {
let variant_index_relative = u32::try_from(variant_index_relative)
Expand Down Expand Up @@ -294,8 +294,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
ImmTy::from_uint(variant_index_relative, tag_layout);
let tag = self
.binary_op(mir::BinOp::Add, &variant_index_relative_val, &niche_start_val)?
.to_scalar()
.assert_int();
.to_scalar_int()?;
Ok(Some((tag, tag_field)))
}
}
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_const_eval/src/interpret/intrinsics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
// First, check x % y != 0 (or if that computation overflows).
let rem = self.binary_op(BinOp::Rem, a, b)?;
if rem.to_scalar().assert_bits(a.layout.size) != 0 {
if rem.to_scalar().to_bits(a.layout.size)? != 0 {
throw_ub_custom!(
fluent::const_eval_exact_div_has_remainder,
a = format!("{a}"),
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_const_eval/src/interpret/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1344,7 +1344,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// Test if this value might be null.
/// If the machine does not support ptr-to-int casts, this is conservative.
pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
Ok(match scalar.try_to_int() {
Ok(match scalar.try_to_scalar_int() {
Ok(int) => int.is_null(),
Err(_) => {
// Can only happen during CTFE.
Expand Down
17 changes: 8 additions & 9 deletions compiler/rustc_const_eval/src/interpret/operand.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,12 @@ impl<Prov: Provenance> Immediate<Prov> {
}
}

#[inline]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn to_scalar_int(self) -> ScalarInt {
self.to_scalar().try_to_scalar_int().unwrap()
}

#[inline]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn to_scalar_pair(self) -> (Scalar<Prov>, Scalar<Prov>) {
Expand Down Expand Up @@ -219,19 +225,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
Self::from_scalar(Scalar::from(s), layout)
}

#[inline]
pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
}
#[inline]
pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
}

#[inline]
pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
}
#[inline]
pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
Self::from_scalar(Scalar::from_int(i, layout.size), layout)
Expand Down Expand Up @@ -276,7 +274,8 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline]
pub fn to_const_int(self) -> ConstInt {
assert!(self.layout.ty.is_integral());
let int = self.to_scalar().assert_int();
let int = self.imm.to_scalar_int();
assert_eq!(int.size(), self.layout.size);
ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
}

Expand Down
8 changes: 4 additions & 4 deletions compiler/rustc_const_eval/src/interpret/operator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,10 +95,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let l = left.to_scalar_int()?;
let r = right.to_scalar_int()?;
// Prepare to convert the values to signed or unsigned form.
let l_signed = || l.assert_int(left.layout.size);
let l_unsigned = || l.assert_uint(left.layout.size);
let r_signed = || r.assert_int(right.layout.size);
let r_unsigned = || r.assert_uint(right.layout.size);
let l_signed = || l.to_int(left.layout.size);
let l_unsigned = || l.to_uint(left.layout.size);
let r_signed = || r.to_int(right.layout.size);
let r_unsigned = || r.to_uint(right.layout.size);

let throw_ub_on_overflow = match bin_op {
AddUnchecked => Some(sym::unchecked_add),
Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_const_eval/src/interpret/validity.rs
Original file line number Diff line number Diff line change
Expand Up @@ -653,8 +653,8 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
let WrappingRange { start, end } = valid_range;
let max_value = size.unsigned_int_max();
assert!(end <= max_value);
let bits = match scalar.try_to_int() {
Ok(int) => int.assert_bits(size),
let bits = match scalar.try_to_scalar_int() {
Ok(int) => int.to_bits(size),
Err(_) => {
// So this is a pointer then, and casting to an int failed.
// Can only happen during CTFE.
Expand Down
5 changes: 2 additions & 3 deletions compiler/rustc_hir_typeck/src/pat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2385,11 +2385,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
min_len: u64,
) -> (Option<Ty<'tcx>>, Ty<'tcx>) {
let len = match len.eval(self.tcx, self.param_env, span) {
// FIXME(BoxyUwU): Assert the `Ty` is a `usize`?
Ok((_, val)) => val
.try_to_scalar()
.and_then(|scalar| scalar.try_to_int().ok())
.and_then(|int| int.try_to_target_usize(self.tcx).ok()),
.and_then(|scalar| scalar.try_to_scalar_int().ok())
.map(|int| int.to_target_usize(self.tcx)),
Err(ErrorHandled::Reported(..)) => {
let guar = self.error_scrutinee_unfixed_length(span);
return (Some(Ty::new_error(self.tcx, guar)), arr_ty);
Expand Down
Loading

0 comments on commit 3c57ea0

Please sign in to comment.