From 66b2aad37706e7f1b5f8985d8e58f370ed597ea0 Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Sat, 24 Feb 2024 00:48:20 -0500 Subject: [PATCH 1/2] use [N x i8] for alloca types --- compiler/rustc_codegen_gcc/src/builder.rs | 17 ++++++++++++----- compiler/rustc_codegen_gcc/src/intrinsic/mod.rs | 2 +- .../rustc_codegen_gcc/src/intrinsic/simd.rs | 4 ++-- compiler/rustc_codegen_llvm/src/abi.rs | 2 +- compiler/rustc_codegen_llvm/src/builder.rs | 17 ++++++++++++++--- compiler/rustc_codegen_llvm/src/intrinsic.rs | 9 +++++---- compiler/rustc_codegen_ssa/src/base.rs | 2 +- compiler/rustc_codegen_ssa/src/mir/operand.rs | 4 ++-- compiler/rustc_codegen_ssa/src/mir/place.rs | 2 +- .../rustc_codegen_ssa/src/traits/builder.rs | 8 ++++++-- tests/codegen/align-byval.rs | 12 ++++++------ tests/codegen/align-enum.rs | 4 ++-- tests/codegen/align-struct.rs | 8 ++++---- tests/codegen/array-map.rs | 2 +- tests/codegen/debug-fndef-size.rs | 2 +- tests/codegen/enum/enum-match.rs | 2 +- tests/codegen/i128-x86-align.rs | 9 ++++----- tests/codegen/intrinsics/transmute.rs | 16 ++++++++-------- .../issues/issue-105386-ub-in-debuginfo.rs | 2 +- tests/codegen/issues/issue-111603.rs | 2 +- tests/codegen/overaligned-constant.rs | 9 +++++---- tests/codegen/packed.rs | 4 ++-- tests/codegen/personality_lifetimes.rs | 2 +- tests/codegen/sroa-fragment-debuginfo.rs | 6 +++--- tests/codegen/stores.rs | 8 ++++---- tests/codegen/swap-large-types.rs | 2 +- tests/codegen/swap-small-types.rs | 2 +- 27 files changed, 91 insertions(+), 68 deletions(-) diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 71a0a4c2e96f2..14941c43123ca 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -734,7 +734,18 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.gcc_checked_binop(oop, typ, lhs, rhs) } - fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> { + fn alloca(&mut self, size: Size, align: Align) -> RValue<'gcc> { + let ty = self.cx.type_array(self.cx.type_i8(), size.bytes()).get_aligned(align.bytes()); + // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial. + self.stack_var_count.set(self.stack_var_count.get() + 1); + self.current_func().new_local(None, ty, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None) + } + + fn dynamic_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> { + unimplemented!(); + } + + fn typed_alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> { // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type. // Ideally, we shouldn't need to do this check. let aligned_type = @@ -749,10 +760,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None) } - fn byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> { - unimplemented!(); - } - fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> { let block = self.llbb(); let function = block.get_function(); diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index d43f5d74757ae..d5d4a66b4d09c 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -490,7 +490,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { // We instead thus allocate some scratch space... let scratch_size = cast.size(bx); let scratch_align = cast.align(bx); - let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align); + let llscratch = bx.alloca(scratch_size, scratch_align); bx.lifetime_start(llscratch, scratch_size); // ... where we first store the value... diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index d8091724d8647..8e51ba8d8e78c 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -16,7 +16,7 @@ use rustc_middle::span_bug; use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::{self, Ty}; use rustc_span::{sym, Span, Symbol}; -use rustc_target::abi::Align; +use rustc_target::abi::{Align, Size}; use crate::builder::Builder; #[cfg(feature = "master")] @@ -363,7 +363,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let ze = bx.zext(result, bx.type_ix(expected_bytes * 8)); // Convert the integer to a byte array - let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE); + let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE); bx.store(ze, ptr, Align::ONE); let array_ty = bx.type_array(bx.type_i8(), expected_bytes); let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty)); diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index b5b4f894e4d82..de967d6bbbe5c 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -234,7 +234,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { // We instead thus allocate some scratch space... let scratch_size = cast.size(bx); let scratch_align = cast.align(bx); - let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); + let llscratch = bx.alloca(scratch_size, scratch_align); bx.lifetime_start(llscratch, scratch_size); // ... where we first store the value... diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index eaedaec635fb4..bd0f31cd70b13 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -466,9 +466,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { val } - fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { + fn alloca(&mut self, size: Size, align: Align) -> &'ll Value { let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); + let ty = self.cx().type_array(self.cx().type_i8(), size.bytes()); unsafe { let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); @@ -476,10 +477,20 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value { + fn dynamic_alloca(&mut self, size: &'ll Value, align: Align) -> &'ll Value { unsafe { let alloca = - llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED); + llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), size, UNNAMED); + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); + alloca + } + } + + fn typed_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { + let mut bx = Builder::with_cx(self.cx); + bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); + unsafe { + let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index f33a672aff0d7..30163699232d9 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -18,7 +18,7 @@ use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf}; use rustc_middle::ty::{self, GenericArgsRef, Ty}; use rustc_middle::{bug, span_bug}; use rustc_span::{sym, Span, Symbol}; -use rustc_target::abi::{self, Align, HasDataLayout, Primitive}; +use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size}; use rustc_target::spec::{HasTargetSpec, PanicStrategy}; use std::cmp::Ordering; @@ -635,8 +635,9 @@ fn codegen_msvc_try<'ll>( // } // // More information can be found in libstd's seh.rs implementation. + let ptr_size = bx.tcx().data_layout.pointer_size; let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let slot = bx.alloca(bx.type_ptr(), ptr_align); + let slot = bx.alloca(ptr_size, ptr_align); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None); @@ -909,7 +910,7 @@ fn codegen_emcc_try<'ll>( let ptr_align = bx.tcx().data_layout.pointer_align.abi; let i8_align = bx.tcx().data_layout.i8_align.abi; let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false); - let catch_data = bx.alloca(catch_data_type, ptr_align); + let catch_data = bx.typed_alloca(catch_data_type, ptr_align); let catch_data_0 = bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]); bx.store(ptr, catch_data_0, ptr_align); @@ -1360,7 +1361,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8)); // Convert the integer to a byte array - let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE); + let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE); bx.store(ze, ptr, Align::ONE); let array_ty = bx.type_array(bx.type_i8(), expected_bytes); return Ok(bx.load(array_ty, ptr, Align::ONE)); diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index f7afd22a48cab..47fb5f476d472 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -511,7 +511,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let param_handle = bx.get_param(0); let param_system_table = bx.get_param(1); let arg_argc = bx.const_int(cx.type_isize(), 2); - let arg_argv = bx.alloca(cx.type_array(cx.type_ptr(), 2), Align::ONE); + let arg_argv = bx.typed_alloca(cx.type_array(cx.type_ptr(), 2), Align::ONE); bx.store(param_handle, arg_argv, Align::ONE); let arg_argv_el1 = bx.gep(cx.type_ptr(), arg_argv, &[bx.const_int(cx.type_int(), 1)]); bx.store(param_system_table, arg_argv_el1, Align::ONE); diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 94eb37e78e07d..05464f19b63e3 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -323,7 +323,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let llfield_ty = bx.cx().backend_type(field); // Can't bitcast an aggregate, so round trip through memory. - let llptr = bx.alloca(llfield_ty, field.align.abi); + let llptr = bx.alloca(field.size, field.align.abi); bx.store(*llval, llptr, field.align.abi); *llval = bx.load(llfield_ty, llptr, field.align.abi); } @@ -471,7 +471,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { let align_minus_1 = bx.sub(align, one); let size_extra = bx.add(size, align_minus_1); let min_align = Align::ONE; - let alloca = bx.byte_array_alloca(size_extra, min_align); + let alloca = bx.dynamic_alloca(size_extra, min_align); let address = bx.ptrtoint(alloca, bx.type_isize()); let neg_address = bx.neg(address); let offset = bx.and(neg_address, align_minus_1); diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 725d3bf4431e4..8a571aed7f239 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -57,7 +57,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { align: Align, ) -> Self { assert!(layout.is_sized(), "tried to statically allocate unsized place"); - let tmp = bx.alloca(bx.cx().backend_type(layout), align); + let tmp = bx.alloca(layout.size, align); Self::new_sized_aligned(tmp, layout, align) } diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index 36f37e3791bc5..750d807e4f6cf 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -141,8 +141,12 @@ pub trait BuilderMethods<'a, 'tcx>: } fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value; - fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; - fn byte_array_alloca(&mut self, len: Self::Value, align: Align) -> Self::Value; + /// Used for all fixed-size Rust types. + fn alloca(&mut self, size: Size, align: Align) -> Self::Value; + /// Used for DSTs and unsized locals. + fn dynamic_alloca(&mut self, size: Self::Value, align: Align) -> Self::Value; + /// Should only be used for types without a Rust layout, e.g. C++ EH catch data. + fn typed_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value; fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value; diff --git a/tests/codegen/align-byval.rs b/tests/codegen/align-byval.rs index 1016c7903eb2d..e9e5ab34e0d78 100644 --- a/tests/codegen/align-byval.rs +++ b/tests/codegen/align-byval.rs @@ -106,20 +106,20 @@ pub struct ForceAlign16 { pub unsafe fn call_na1(x: NaturalAlign1) { // CHECK: start: - // m68k: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1 + // m68k: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1 // m68k: call void @natural_align_1({{.*}}byval(%NaturalAlign1) align 1{{.*}} [[ALLOCA]]) - // wasm: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1 + // wasm: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1 // wasm: call void @natural_align_1({{.*}}byval(%NaturalAlign1) align 1{{.*}} [[ALLOCA]]) // x86_64-linux: call void @natural_align_1(i16 // x86_64-windows: call void @natural_align_1(i16 - // i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4 + // i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 4 // i686-linux: call void @natural_align_1({{.*}}byval(%NaturalAlign1) align 4{{.*}} [[ALLOCA]]) - // i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4 + // i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 4 // i686-windows: call void @natural_align_1({{.*}}byval(%NaturalAlign1) align 4{{.*}} [[ALLOCA]]) natural_align_1(x); } @@ -134,10 +134,10 @@ pub unsafe fn call_na2(x: NaturalAlign2) { // x86_64-linux-NEXT: call void @natural_align_2 // x86_64-windows-NEXT: call void @natural_align_2 - // i686-linux: [[ALLOCA:%[0-9]+]] = alloca %NaturalAlign2, align 4 + // i686-linux: [[ALLOCA:%[0-9]+]] = alloca [34 x i8], align 4 // i686-linux: call void @natural_align_2({{.*}}byval(%NaturalAlign2) align 4{{.*}} [[ALLOCA]]) - // i686-windows: [[ALLOCA:%[0-9]+]] = alloca %NaturalAlign2, align 4 + // i686-windows: [[ALLOCA:%[0-9]+]] = alloca [34 x i8], align 4 // i686-windows: call void @natural_align_2({{.*}}byval(%NaturalAlign2) align 4{{.*}} [[ALLOCA]]) natural_align_2(x); } diff --git a/tests/codegen/align-enum.rs b/tests/codegen/align-enum.rs index 17bf2cf725682..6c5529b84f333 100644 --- a/tests/codegen/align-enum.rs +++ b/tests/codegen/align-enum.rs @@ -19,7 +19,7 @@ pub struct Nested64 { // CHECK-LABEL: @align64 #[no_mangle] pub fn align64(a: u32) -> Align64 { -// CHECK: %a64 = alloca %Align64, align 64 +// CHECK: %a64 = alloca [64 x i8], align 64 // CHECK: call void @llvm.memcpy.{{.*}}(ptr align 64 %{{.*}}, ptr align 64 %{{.*}}, i{{[0-9]+}} 64, i1 false) let a64 = Align64::A(a); a64 @@ -28,7 +28,7 @@ pub fn align64(a: u32) -> Align64 { // CHECK-LABEL: @nested64 #[no_mangle] pub fn nested64(a: u8, b: u32, c: u16) -> Nested64 { -// CHECK: %n64 = alloca %Nested64, align 64 +// CHECK: %n64 = alloca [128 x i8], align 64 let n64 = Nested64 { a, b: Align64::B(b), c }; n64 } diff --git a/tests/codegen/align-struct.rs b/tests/codegen/align-struct.rs index 31859152830a5..29cc45b7d71aa 100644 --- a/tests/codegen/align-struct.rs +++ b/tests/codegen/align-struct.rs @@ -30,7 +30,7 @@ pub enum Enum64 { // CHECK-LABEL: @align64 #[no_mangle] pub fn align64(i : i32) -> Align64 { -// CHECK: %a64 = alloca %Align64, align 64 +// CHECK: %a64 = alloca [64 x i8], align 64 // CHECK: call void @llvm.memcpy.{{.*}}(ptr align 64 %{{.*}}, ptr align 64 %{{.*}}, i{{[0-9]+}} 64, i1 false) let a64 = Align64(i); a64 @@ -48,7 +48,7 @@ pub fn align64_load(a: Align64) -> i32 { // CHECK-LABEL: @nested64 #[no_mangle] pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 { -// CHECK: %n64 = alloca %Nested64, align 64 +// CHECK: %n64 = alloca [128 x i8], align 64 let n64 = Nested64 { a, b, c, d }; n64 } @@ -56,7 +56,7 @@ pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 { // CHECK-LABEL: @enum4 #[no_mangle] pub fn enum4(a: i32) -> Enum4 { -// CHECK: %e4 = alloca %Enum4, align 4 +// CHECK: %e4 = alloca [8 x i8], align 4 let e4 = Enum4::A(a); e4 } @@ -64,7 +64,7 @@ pub fn enum4(a: i32) -> Enum4 { // CHECK-LABEL: @enum64 #[no_mangle] pub fn enum64(a: Align64) -> Enum64 { -// CHECK: %e64 = alloca %Enum64, align 64 +// CHECK: %e64 = alloca [128 x i8], align 64 let e64 = Enum64::A(a); e64 } diff --git a/tests/codegen/array-map.rs b/tests/codegen/array-map.rs index 743a15989f78e..f49dddcfc207a 100644 --- a/tests/codegen/array-map.rs +++ b/tests/codegen/array-map.rs @@ -27,7 +27,7 @@ pub fn short_integer_map(x: [u32; 8]) -> [u32; 8] { #[no_mangle] pub fn long_integer_map(x: [u32; 512]) -> [u32; 512] { // CHECK: start: - // CHECK-NEXT: alloca [512 x i32] + // CHECK-NEXT: alloca [2048 x i8] // CHECK-NOT: alloca // CHECK: mul <{{[0-9]+}} x i32> // CHECK: add <{{[0-9]+}} x i32> diff --git a/tests/codegen/debug-fndef-size.rs b/tests/codegen/debug-fndef-size.rs index b3cc45614bc5b..5551d2cc39cf8 100644 --- a/tests/codegen/debug-fndef-size.rs +++ b/tests/codegen/debug-fndef-size.rs @@ -12,7 +12,7 @@ pub fn main() { foo(0, 1, i32::cmp); } -// CHECK: %compare.dbg.spill = alloca {}, align 1 +// CHECK: %compare.dbg.spill = alloca [0 x i8], align 1 // CHECK: call void @llvm.dbg.declare(metadata ptr %compare.dbg.spill, metadata ![[VAR:.*]], metadata !DIExpression()), !dbg !{{.*}} // CHECK: ![[TYPE:.*]] = !DIDerivedType(tag: DW_TAG_pointer_type, name: "fn(&i32, &i32) -> core::cmp::Ordering", baseType: !{{.*}}, align: 1, dwarfAddressSpace: {{.*}}) // CHECK: ![[VAR]] = !DILocalVariable(name: "compare", scope: !{{.*}}, file: !{{.*}}, line: {{.*}}, type: ![[TYPE]], align: 1) diff --git a/tests/codegen/enum/enum-match.rs b/tests/codegen/enum/enum-match.rs index 2e6dad8791b28..f1c40f6695be1 100644 --- a/tests/codegen/enum/enum-match.rs +++ b/tests/codegen/enum/enum-match.rs @@ -15,7 +15,7 @@ pub enum Enum0 { // CHECK-NEXT: start: // CHECK-NEXT: %1 = icmp eq i8 %0, 2 // CHECK-NEXT: %2 = and i8 %0, 1 -// CHECK-NEXT: %_0.0 = select i1 %1, i8 13, i8 %2 +// CHECK-NEXT: %{{.+}} = select i1 %1, i8 13, i8 %2 #[no_mangle] pub fn match0(e: Enum0) -> u8 { use Enum0::*; diff --git a/tests/codegen/i128-x86-align.rs b/tests/codegen/i128-x86-align.rs index b2e0c294c39d6..3e6ed2b8e16a2 100644 --- a/tests/codegen/i128-x86-align.rs +++ b/tests/codegen/i128-x86-align.rs @@ -6,7 +6,6 @@ // correctly. // CHECK: %ScalarPair = type { i32, [3 x i32], i128 } -// CHECK: %Struct = type { i32, i32, [2 x i32], i128 } #![feature(core_intrinsics)] @@ -43,7 +42,7 @@ pub fn store(x: &mut ScalarPair) { #[no_mangle] pub fn alloca() { // CHECK-LABEL: @alloca( - // CHECK: [[X:%.*]] = alloca %ScalarPair, align 16 + // CHECK: [[X:%.*]] = alloca [32 x i8], align 16 // CHECK: store i32 1, ptr %x, align 16 // CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %x, i64 16 // CHECK-NEXT: store i128 2, ptr [[GEP]], align 16 @@ -55,7 +54,7 @@ pub fn alloca() { pub fn load_volatile(x: &ScalarPair) -> ScalarPair { // CHECK-LABEL: @load_volatile( // CHECK-SAME: align 16 dereferenceable(32) %x - // CHECK: [[TMP:%.*]] = alloca %ScalarPair, align 16 + // CHECK: [[TMP:%.*]] = alloca [32 x i8], align 16 // CHECK: [[LOAD:%.*]] = load volatile %ScalarPair, ptr %x, align 16 // CHECK-NEXT: store %ScalarPair [[LOAD]], ptr [[TMP]], align 16 // CHECK-NEXT: [[A:%.*]] = load i32, ptr [[TMP]], align 16 @@ -67,7 +66,7 @@ pub fn load_volatile(x: &ScalarPair) -> ScalarPair { #[no_mangle] pub fn transmute(x: ScalarPair) -> (std::mem::MaybeUninit, i128) { // CHECK-LABEL: define { i128, i128 } @transmute(i32 noundef %x.0, i128 noundef %x.1) - // CHECK: [[TMP:%.*]] = alloca { i128, i128 }, align 16 + // CHECK: [[TMP:%.*]] = alloca [32 x i8], align 16 // CHECK-NEXT: store i32 %x.0, ptr [[TMP]], align 16 // CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 16 // CHECK-NEXT: store i128 %x.1, ptr [[GEP]], align 16 @@ -92,7 +91,7 @@ pub struct Struct { pub fn store_struct(x: &mut Struct) { // CHECK-LABEL: @store_struct( // CHECK-SAME: align 16 dereferenceable(32) %x - // CHECK: [[TMP:%.*]] = alloca %Struct, align 16 + // CHECK: [[TMP:%.*]] = alloca [32 x i8], align 16 // CHECK: store i32 1, ptr [[TMP]], align 16 // CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 4 // CHECK-NEXT: store i32 2, ptr [[GEP1]], align 4 diff --git a/tests/codegen/intrinsics/transmute.rs b/tests/codegen/intrinsics/transmute.rs index 5a503e86010b2..3b99d1ae54661 100644 --- a/tests/codegen/intrinsics/transmute.rs +++ b/tests/codegen/intrinsics/transmute.rs @@ -153,7 +153,7 @@ pub unsafe fn check_from_newtype(x: Scalar64) -> u64 { // CHECK-LABEL: @check_aggregate_to_bool( #[no_mangle] pub unsafe fn check_aggregate_to_bool(x: Aggregate8) -> bool { - // CHECK: %x = alloca %Aggregate8, align 1 + // CHECK: %x = alloca [1 x i8], align 1 // CHECK: %[[BYTE:.+]] = load i8, ptr %x, align 1 // CHECK: %[[BOOL:.+]] = trunc i8 %[[BYTE]] to i1 // CHECK: ret i1 %[[BOOL]] @@ -163,7 +163,7 @@ pub unsafe fn check_aggregate_to_bool(x: Aggregate8) -> bool { // CHECK-LABEL: @check_aggregate_from_bool( #[no_mangle] pub unsafe fn check_aggregate_from_bool(x: bool) -> Aggregate8 { - // CHECK: %_0 = alloca %Aggregate8, align 1 + // CHECK: %_0 = alloca [1 x i8], align 1 // CHECK: %[[BYTE:.+]] = zext i1 %x to i8 // CHECK: store i8 %[[BYTE]], ptr %_0, align 1 transmute(x) @@ -190,7 +190,7 @@ pub unsafe fn check_byte_from_bool(x: bool) -> u8 { // CHECK-LABEL: @check_to_pair( #[no_mangle] pub unsafe fn check_to_pair(x: u64) -> Option { - // CHECK: %_0 = alloca %"core::option::Option", align 4 + // CHECK: %_0 = alloca [8 x i8], align 4 // CHECK: store i64 %x, ptr %_0, align 4 transmute(x) } @@ -202,7 +202,7 @@ pub unsafe fn check_from_pair(x: Option) -> u64 { // immediates so we can write using the destination alloca's alignment. const { assert!(std::mem::align_of::>() == 4) }; - // CHECK: %_0 = alloca i64, align 8 + // CHECK: %_0 = alloca [8 x i8], align 8 // CHECK: store i32 %x.0, ptr %_0, align 8 // CHECK: store i32 %x.1, ptr %0, align 4 // CHECK: %[[R:.+]] = load i64, ptr %_0, align 8 @@ -248,7 +248,7 @@ pub unsafe fn check_from_bytes(x: [u8; 4]) -> u32 { // CHECK-LABEL: @check_to_aggregate( #[no_mangle] pub unsafe fn check_to_aggregate(x: u64) -> Aggregate64 { - // CHECK: %_0 = alloca %Aggregate64, align 4 + // CHECK: %_0 = alloca [8 x i8], align 4 // CHECK: store i64 %x, ptr %_0, align 4 // CHECK: %0 = load i64, ptr %_0, align 4 // CHECK: ret i64 %0 @@ -258,7 +258,7 @@ pub unsafe fn check_to_aggregate(x: u64) -> Aggregate64 { // CHECK-LABEL: @check_from_aggregate( #[no_mangle] pub unsafe fn check_from_aggregate(x: Aggregate64) -> u64 { - // CHECK: %x = alloca %Aggregate64, align 4 + // CHECK: %x = alloca [8 x i8], align 4 // CHECK: %[[VAL:.+]] = load i64, ptr %x, align 4 // CHECK: ret i64 %[[VAL]] transmute(x) @@ -452,7 +452,7 @@ pub struct HighAlignScalar(u8); // CHECK-LABEL: @check_to_overalign( #[no_mangle] pub unsafe fn check_to_overalign(x: u64) -> HighAlignScalar { - // CHECK: %_0 = alloca %HighAlignScalar, align 8 + // CHECK: %_0 = alloca [8 x i8], align 8 // CHECK: store i64 %x, ptr %_0, align 8 // CHECK: %0 = load i64, ptr %_0, align 8 // CHECK: ret i64 %0 @@ -462,7 +462,7 @@ pub unsafe fn check_to_overalign(x: u64) -> HighAlignScalar { // CHECK-LABEL: @check_from_overalign( #[no_mangle] pub unsafe fn check_from_overalign(x: HighAlignScalar) -> u64 { - // CHECK: %x = alloca %HighAlignScalar, align 8 + // CHECK: %x = alloca [8 x i8], align 8 // CHECK: %[[VAL:.+]] = load i64, ptr %x, align 8 // CHECK: ret i64 %[[VAL]] transmute(x) diff --git a/tests/codegen/issues/issue-105386-ub-in-debuginfo.rs b/tests/codegen/issues/issue-105386-ub-in-debuginfo.rs index 0bd43dc50b21a..4d1c0a83b55fa 100644 --- a/tests/codegen/issues/issue-105386-ub-in-debuginfo.rs +++ b/tests/codegen/issues/issue-105386-ub-in-debuginfo.rs @@ -15,7 +15,7 @@ pub fn outer_function(x: S, y: S) -> usize { // Check that we do not attempt to load from the spilled arg before it is assigned to // when generating debuginfo. // CHECK-LABEL: @outer_function -// CHECK: [[spill:%.*]] = alloca %"{closure@{{.*.rs}}:9:23: 9:25}" +// CHECK: [[spill:%.*]] = alloca [72 x i8] // CHECK-NOT: [[ptr_tmp:%.*]] = getelementptr inbounds i8, ptr [[spill]] // CHECK-NOT: [[load:%.*]] = load ptr, ptr // CHECK: call void @llvm.lifetime.start{{.*}}({{.*}}, ptr [[spill]]) diff --git a/tests/codegen/issues/issue-111603.rs b/tests/codegen/issues/issue-111603.rs index 3f4c7e7d54232..41bfb493ff580 100644 --- a/tests/codegen/issues/issue-111603.rs +++ b/tests/codegen/issues/issue-111603.rs @@ -11,7 +11,7 @@ pub fn new_from_array(x: u64) -> Arc<[u64]> { // Ensure that we only generate one alloca for the array. // CHECK: alloca - // CHECK-SAME: [1000 x i64] + // CHECK-SAME: [8000 x i8] // CHECK-NOT: alloca let array = [x; 1000]; Arc::new(array) diff --git a/tests/codegen/overaligned-constant.rs b/tests/codegen/overaligned-constant.rs index 351c8ea8f4b2b..59b57140ce320 100644 --- a/tests/codegen/overaligned-constant.rs +++ b/tests/codegen/overaligned-constant.rs @@ -2,7 +2,7 @@ // do not ICE during codegen, and that the LLVM constant has the higher alignment. // //@ compile-flags: -Zmir-opt-level=0 -Zmir-enable-passes=+GVN -//@ compile-flags: -Cno-prepopulate-passes +//@ compile-flags: -Cno-prepopulate-passes --crate-type=lib //@ only-64bit struct S(i32); @@ -12,9 +12,10 @@ struct SmallStruct(f32, Option, &'static [f32]); // CHECK: @0 = private unnamed_addr constant // CHECK-SAME: , align 8 -fn main() { - // CHECK-LABEL: @_ZN20overaligned_constant4main - // CHECK: [[full:%_.*]] = alloca %SmallStruct, align 8 +#[no_mangle] +pub fn overaligned_constant() { + // CHECK-LABEL: @overaligned_constant + // CHECK: [[full:%_.*]] = alloca [32 x i8], align 8 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[full]], ptr align 8 @0, i64 32, i1 false) // CHECK: %b.0 = load i32, ptr @0, align 4, // CHECK: %b.1 = load i32, ptr getelementptr inbounds ({{.*}}), align 4 diff --git a/tests/codegen/packed.rs b/tests/codegen/packed.rs index 764476b0aa138..5142df9c48817 100644 --- a/tests/codegen/packed.rs +++ b/tests/codegen/packed.rs @@ -51,7 +51,7 @@ pub struct BigPacked2 { // CHECK-LABEL: @call_pkd1 #[no_mangle] pub fn call_pkd1(f: fn() -> Array) -> BigPacked1 { -// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array +// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca [32 x i8] // CHECK: call void %{{.*}}(ptr noalias nocapture noundef sret{{.*}} dereferenceable(32) [[ALLOCA]]) // CHECK: call void @llvm.memcpy.{{.*}}(ptr align 1 %{{.*}}, ptr align 4 %{{.*}}, i{{[0-9]+}} 32, i1 false) // check that calls whose destination is a field of a packed struct @@ -63,7 +63,7 @@ pub fn call_pkd1(f: fn() -> Array) -> BigPacked1 { // CHECK-LABEL: @call_pkd2 #[no_mangle] pub fn call_pkd2(f: fn() -> Array) -> BigPacked2 { -// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array +// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca [32 x i8] // CHECK: call void %{{.*}}(ptr noalias nocapture noundef sret{{.*}} dereferenceable(32) [[ALLOCA]]) // CHECK: call void @llvm.memcpy.{{.*}}(ptr align 2 %{{.*}}, ptr align 4 %{{.*}}, i{{[0-9]+}} 32, i1 false) // check that calls whose destination is a field of a packed struct diff --git a/tests/codegen/personality_lifetimes.rs b/tests/codegen/personality_lifetimes.rs index 06389688e0e20..71848256e3a53 100644 --- a/tests/codegen/personality_lifetimes.rs +++ b/tests/codegen/personality_lifetimes.rs @@ -24,7 +24,7 @@ pub fn test() { let _s = S; // Check that the personality slot alloca gets a lifetime start in each cleanup block, not just // in the first one. - // CHECK: [[SLOT:%[0-9]+]] = alloca { ptr, i32{{.*}} } + // CHECK: [[SLOT:%[0-9]+]] = alloca [{{[0-9]+}} x i8] // CHECK-LABEL: cleanup: // CHECK: call void @llvm.lifetime.start.{{.*}}({{.*}}) // CHECK-LABEL: cleanup1: diff --git a/tests/codegen/sroa-fragment-debuginfo.rs b/tests/codegen/sroa-fragment-debuginfo.rs index d8c2d2c6f9ef8..32786d2a76a48 100644 --- a/tests/codegen/sroa-fragment-debuginfo.rs +++ b/tests/codegen/sroa-fragment-debuginfo.rs @@ -14,9 +14,9 @@ pub struct ExtraSlice<'input> { #[no_mangle] pub fn extra(s: &[u8]) { // CHECK: void @extra( -// CHECK: %slice.dbg.spill1 = alloca i32, -// CHECK: %slice.dbg.spill = alloca { ptr, i64 }, -// CHECK: %s.dbg.spill = alloca { ptr, i64 }, +// CHECK: %slice.dbg.spill1 = alloca [4 x i8], +// CHECK: %slice.dbg.spill = alloca [16 x i8], +// CHECK: %s.dbg.spill = alloca [16 x i8], // CHECK: call void @llvm.dbg.declare(metadata ptr %s.dbg.spill, metadata ![[S_EXTRA:.*]], metadata !DIExpression()), // CHECK: call void @llvm.dbg.declare(metadata ptr %slice.dbg.spill, metadata ![[SLICE_EXTRA:.*]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 128)), // CHECK: call void @llvm.dbg.declare(metadata ptr %slice.dbg.spill1, metadata ![[SLICE_EXTRA]], metadata !DIExpression(DW_OP_LLVM_fragment, 128, 32)), diff --git a/tests/codegen/stores.rs b/tests/codegen/stores.rs index 3fda5aa47eaf8..86ec52fa10159 100644 --- a/tests/codegen/stores.rs +++ b/tests/codegen/stores.rs @@ -15,8 +15,8 @@ pub struct Bytes { // dependent alignment #[no_mangle] pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { -// CHECK: [[TMP:%.+]] = alloca i32 -// CHECK: %y = alloca [4 x i8] +// CHECK: [[TMP:%.+]] = alloca [4 x i8], align 4 +// CHECK: %y = alloca [4 x i8], align 1 // CHECK: store i32 %0, ptr [[TMP]] // CHECK: call void @llvm.memcpy.{{.*}}(ptr align 1 {{.+}}, ptr align 4 {{.+}}, i{{[0-9]+}} 4, i1 false) *x = y; @@ -27,8 +27,8 @@ pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { // dependent alignment #[no_mangle] pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) { -// CHECK: [[TMP:%.+]] = alloca i32 -// CHECK: %y = alloca %Bytes +// CHECK: [[TMP:%.+]] = alloca [4 x i8], align 4 +// CHECK: %y = alloca [4 x i8], align 1 // CHECK: store i32 %0, ptr [[TMP]] // CHECK: call void @llvm.memcpy.{{.*}}(ptr align 1 {{.+}}, ptr align 4 {{.+}}, i{{[0-9]+}} 4, i1 false) *x = y; diff --git a/tests/codegen/swap-large-types.rs b/tests/codegen/swap-large-types.rs index b182f3ed94798..b976f6fe207bc 100644 --- a/tests/codegen/swap-large-types.rs +++ b/tests/codegen/swap-large-types.rs @@ -15,7 +15,7 @@ type KeccakBuffer = [[u64; 5]; 5]; // CHECK-LABEL: @swap_basic #[no_mangle] pub fn swap_basic(x: &mut KeccakBuffer, y: &mut KeccakBuffer) { -// CHECK: alloca [5 x [5 x i64]] +// CHECK: alloca [200 x i8] // SAFETY: exclusive references are always valid to read/write, // are non-overlapping, and nothing here panics so it's drop-safe. diff --git a/tests/codegen/swap-small-types.rs b/tests/codegen/swap-small-types.rs index 5fdf4a5804a9c..56060aff4d927 100644 --- a/tests/codegen/swap-small-types.rs +++ b/tests/codegen/swap-small-types.rs @@ -12,7 +12,7 @@ type RGB48 = [u16; 3]; pub fn swap_rgb48_manually(x: &mut RGB48, y: &mut RGB48) { // FIXME: See #115212 for why this has an alloca again - // CHECK: alloca [3 x i16], align 2 + // CHECK: alloca [6 x i8], align 2 // CHECK: call void @llvm.memcpy.p0.p0.i64({{.+}}, i64 6, i1 false) // CHECK: call void @llvm.memcpy.p0.p0.i64({{.+}}, i64 6, i1 false) // CHECK: call void @llvm.memcpy.p0.p0.i64({{.+}}, i64 6, i1 false) From 26be56992eeff4bee387a32eec19c58c045e6ede Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Tue, 5 Mar 2024 20:55:42 -0500 Subject: [PATCH 2/2] adjust stack-protector test (which inappropriately depends on IR types) --- .../stack-protector-heuristics-effect.rs | 55 +++++-------------- 1 file changed, 15 insertions(+), 40 deletions(-) diff --git a/tests/assembly/stack-protector/stack-protector-heuristics-effect.rs b/tests/assembly/stack-protector/stack-protector-heuristics-effect.rs index e63adc88ff501..8e32d170244a7 100644 --- a/tests/assembly/stack-protector/stack-protector-heuristics-effect.rs +++ b/tests/assembly/stack-protector/stack-protector-heuristics-effect.rs @@ -11,6 +11,11 @@ //@ compile-flags: -C opt-level=2 -Z merge-functions=disabled //@ min-llvm-version: 17.0.2 +// NOTE: the heuristics for stack smash protection inappropriately rely on types in LLVM IR, +// despite those types having no semantic meaning. This means that the `basic` and `strong` +// settings do not behave in a coherent way. This is a known issue in LLVM. +// See comments on https://github.com/rust-lang/rust/issues/114903. + #![crate_type = "lib"] #![allow(incomplete_features)] @@ -39,23 +44,9 @@ pub fn array_char(f: fn(*const char)) { f(&b as *const _); f(&c as *const _); - // Any type of local array variable leads to stack protection with the - // "strong" heuristic. The 'basic' heuristic only adds stack protection to - // functions with local array variables of a byte-sized type, however. Since - // 'char' is 4 bytes in Rust, this function is not protected by the 'basic' - // heuristic - // - // (This test *also* takes the address of the local stack variables. We - // cannot know that this isn't what triggers the `strong` heuristic. - // However, the test strategy of passing the address of a stack array to an - // external function is sufficient to trigger the `basic` heuristic (see - // test `array_u8_large()`). Since the `basic` heuristic only checks for the - // presence of stack-local array variables, we can be confident that this - // test also captures this part of the `strong` heuristic specification.) - // all: __stack_chk_fail // strong: __stack_chk_fail - // basic-NOT: __stack_chk_fail + // basic: __stack_chk_fail // none-NOT: __stack_chk_fail // missing-NOT: __stack_chk_fail } @@ -163,26 +154,11 @@ pub fn local_string_addr_taken(f: fn(&String)) { f(&x); // Taking the address of the local variable `x` leads to stack smash - // protection with the `strong` heuristic, but not with the `basic` - // heuristic. It does not matter that the reference is not mut. - // - // An interesting note is that a similar function in C++ *would* be - // protected by the `basic` heuristic, because `std::string` has a char - // array internally as a small object optimization: - // ``` - // cat < - // void f(void (*g)(const std::string&)) { - // std::string x; - // g(x); - // } - // EOF - // ``` - // + // protection. It does not matter that the reference is not mut. // all: __stack_chk_fail // strong: __stack_chk_fail - // basic-NOT: __stack_chk_fail + // basic: __stack_chk_fail // none-NOT: __stack_chk_fail // missing-NOT: __stack_chk_fail } @@ -233,8 +209,8 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) { // Even though the local variable conceptually doesn't have its address // taken, it's so large that the "move" is implemented with a reference to a // stack-local variable in the ABI. Consequently, this function *is* - // protected by the `strong` heuristic. This is also the case for - // rvalue-references in C++, regardless of struct size: + // protected. This is also the case for rvalue-references in C++, + // regardless of struct size: // ``` // cat < @@ -248,7 +224,7 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) { // all: __stack_chk_fail // strong: __stack_chk_fail - // basic-NOT: __stack_chk_fail + // basic: __stack_chk_fail // none-NOT: __stack_chk_fail // missing-NOT: __stack_chk_fail } @@ -261,9 +237,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) { // A new instance of `Gigastruct` is passed to `f()`, without any apparent // connection to this stack frame. Still, since instances of `Gigastruct` // are sufficiently large, it is allocated in the caller stack frame and - // passed as a pointer. As such, this function is *also* protected by the - // `strong` heuristic, just like `local_large_var_moved`. This is also the - // case for pass-by-value of sufficiently large structs in C++: + // passed as a pointer. As such, this function is *also* protected, just + // like `local_large_var_moved`. This is also the case for pass-by-value + // of sufficiently large structs in C++: // ``` // cat < @@ -275,10 +251,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) { // EOF // ``` - // all: __stack_chk_fail // strong: __stack_chk_fail - // basic-NOT: __stack_chk_fail + // basic: __stack_chk_fail // none-NOT: __stack_chk_fail // missing-NOT: __stack_chk_fail }