@@ -2,12 +2,12 @@ use rustc_abi::{self as abi, FIRST_VARIANT};
2
2
use rustc_middle:: ty:: adjustment:: PointerCoercion ;
3
3
use rustc_middle:: ty:: layout:: { HasTyCtxt , HasTypingEnv , LayoutOf , TyAndLayout } ;
4
4
use rustc_middle:: ty:: { self , Instance , Ty , TyCtxt } ;
5
- use rustc_middle:: { bug, mir} ;
5
+ use rustc_middle:: { bug, mir, span_bug } ;
6
6
use rustc_session:: config:: OptLevel ;
7
7
use tracing:: { debug, instrument} ;
8
8
9
9
use super :: operand:: { OperandRef , OperandRefBuilder , OperandValue } ;
10
- use super :: place:: { PlaceRef , codegen_tag_value} ;
10
+ use super :: place:: { PlaceRef , PlaceValue , codegen_tag_value} ;
11
11
use super :: { FunctionCx , LocalRef } ;
12
12
use crate :: common:: { IntPredicate , TypeKind } ;
13
13
use crate :: traits:: * ;
@@ -229,6 +229,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
229
229
operand : OperandRef < ' tcx , Bx :: Value > ,
230
230
cast : TyAndLayout < ' tcx > ,
231
231
) -> OperandValue < Bx :: Value > {
232
+ if let abi:: BackendRepr :: Memory { .. } = cast. backend_repr
233
+ && !cast. is_zst ( )
234
+ {
235
+ span_bug ! ( self . mir. span, "Use `codegen_transmute` to transmute to {cast:?}" ) ;
236
+ }
237
+
238
+ // `Layout` is interned, so we can do a cheap check for things that are
239
+ // exactly the same and thus don't need any handling.
240
+ if abi:: Layout :: eq ( & operand. layout . layout , & cast. layout ) {
241
+ return operand. val ;
242
+ }
243
+
232
244
// Check for transmutes that are always UB.
233
245
if operand. layout . size != cast. size
234
246
|| operand. layout . is_uninhabited ( )
@@ -241,11 +253,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
241
253
return OperandValue :: poison ( bx, cast) ;
242
254
}
243
255
256
+ let cx = bx. cx ( ) ;
244
257
match ( operand. val , operand. layout . backend_repr , cast. backend_repr ) {
245
258
_ if cast. is_zst ( ) => OperandValue :: ZeroSized ,
246
- ( _, _, abi:: BackendRepr :: Memory { .. } ) => {
247
- bug ! ( "Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}" ) ;
248
- }
249
259
( OperandValue :: Ref ( source_place_val) , abi:: BackendRepr :: Memory { .. } , _) => {
250
260
assert_eq ! ( source_place_val. llextra, None ) ;
251
261
// The existing alignment is part of `source_place_val`,
@@ -256,16 +266,38 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
256
266
OperandValue :: Immediate ( imm) ,
257
267
abi:: BackendRepr :: Scalar ( from_scalar) ,
258
268
abi:: BackendRepr :: Scalar ( to_scalar) ,
259
- ) => OperandValue :: Immediate ( transmute_scalar ( bx, imm, from_scalar, to_scalar) ) ,
269
+ ) if from_scalar. size ( cx) == to_scalar. size ( cx) => {
270
+ OperandValue :: Immediate ( transmute_scalar ( bx, imm, from_scalar, to_scalar) )
271
+ }
260
272
(
261
273
OperandValue :: Pair ( imm_a, imm_b) ,
262
274
abi:: BackendRepr :: ScalarPair ( in_a, in_b) ,
263
275
abi:: BackendRepr :: ScalarPair ( out_a, out_b) ,
264
- ) => OperandValue :: Pair (
265
- transmute_scalar ( bx, imm_a, in_a, out_a) ,
266
- transmute_scalar ( bx, imm_b, in_b, out_b) ,
267
- ) ,
268
- _ => bug ! ( "Cannot `codegen_transmute_operand` {operand:?} to {cast:?}" ) ,
276
+ ) if in_a. size ( cx) == out_a. size ( cx) && in_b. size ( cx) == out_b. size ( cx) => {
277
+ OperandValue :: Pair (
278
+ transmute_scalar ( bx, imm_a, in_a, out_a) ,
279
+ transmute_scalar ( bx, imm_b, in_b, out_b) ,
280
+ )
281
+ }
282
+ _ => {
283
+ // For any other potentially-tricky cases, make a temporary instead.
284
+ // If anything else wants the target local to be in memory this won't
285
+ // be hit, as `codegen_transmute` will get called directly. Thus this
286
+ // is only for places where everything else wants the operand form,
287
+ // and thus it's not worth making those places get it from memory.
288
+ //
289
+ // Notably, Scalar ⇌ ScalarPair cases go here to avoid padding
290
+ // and endianness issues, as do SimdVector ones to avoid worrying
291
+ // about things like f32x8 ⇌ ptrx4 that would need multiple steps.
292
+ let align = Ord :: max ( operand. layout . align . abi , cast. align . abi ) ;
293
+ let size = Ord :: max ( operand. layout . size , cast. size ) ;
294
+ let temp = PlaceValue :: alloca ( bx, size, align) ;
295
+ bx. lifetime_start ( temp. llval , size) ;
296
+ operand. val . store ( bx, temp. with_type ( operand. layout ) ) ;
297
+ let val = bx. load_operand ( temp. with_type ( cast) ) . val ;
298
+ bx. lifetime_end ( temp. llval , size) ;
299
+ val
300
+ }
269
301
}
270
302
}
271
303
@@ -949,37 +981,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
949
981
/// layout in this code when the right thing will happen anyway.
950
982
pub ( crate ) fn rvalue_creates_operand ( & self , rvalue : & mir:: Rvalue < ' tcx > ) -> bool {
951
983
match * rvalue {
952
- mir:: Rvalue :: Cast ( mir:: CastKind :: Transmute , ref operand, cast_ty) => {
953
- let operand_ty = operand. ty ( self . mir , self . cx . tcx ( ) ) ;
954
- let cast_layout = self . cx . layout_of ( self . monomorphize ( cast_ty) ) ;
955
- let operand_layout = self . cx . layout_of ( self . monomorphize ( operand_ty) ) ;
956
- match ( operand_layout. backend_repr , cast_layout. backend_repr ) {
957
- // When the output will be in memory anyway, just use its place
958
- // (instead of the operand path) unless it's the trivial ZST case.
959
- ( _, abi:: BackendRepr :: Memory { .. } ) => cast_layout. is_zst ( ) ,
960
-
961
- // Otherwise (for a non-memory output) if the input is memory
962
- // then we can just read the value from the place.
963
- ( abi:: BackendRepr :: Memory { .. } , _) => true ,
964
-
965
- // When we have scalar immediates, we can only convert things
966
- // where the sizes match, to avoid endianness questions.
967
- ( abi:: BackendRepr :: Scalar ( a) , abi:: BackendRepr :: Scalar ( b) ) =>
968
- a. size ( self . cx ) == b. size ( self . cx ) ,
969
- ( abi:: BackendRepr :: ScalarPair ( a0, a1) , abi:: BackendRepr :: ScalarPair ( b0, b1) ) =>
970
- a0. size ( self . cx ) == b0. size ( self . cx ) && a1. size ( self . cx ) == b1. size ( self . cx ) ,
971
-
972
- // Mixing Scalars and ScalarPairs can get quite complicated when
973
- // padding and undef get involved, so leave that to the memory path.
974
- ( abi:: BackendRepr :: Scalar ( _) , abi:: BackendRepr :: ScalarPair ( _, _) ) |
975
- ( abi:: BackendRepr :: ScalarPair ( _, _) , abi:: BackendRepr :: Scalar ( _) ) => false ,
976
-
977
- // SIMD vectors aren't worth the trouble of dealing with complex
978
- // cases like from vectors of f32 to vectors of pointers or
979
- // from fat pointers to vectors of u16. (See #143194 #110021 ...)
980
- ( abi:: BackendRepr :: SimdVector { .. } , _) | ( _, abi:: BackendRepr :: SimdVector { .. } ) => false ,
981
- }
982
- }
983
984
mir:: Rvalue :: Ref ( ..) |
984
985
mir:: Rvalue :: CopyForDeref ( ..) |
985
986
mir:: Rvalue :: RawPtr ( ..) |
0 commit comments