https://github.com/yxsamliu updated 
https://github.com/llvm/llvm-project/pull/165159

>From 663fec83dfab731a1f519cd98767e9eb6f285b16 Mon Sep 17 00:00:00 2001
From: "Yaxun (Sam) Liu" <[email protected]>
Date: Tue, 10 Feb 2026 16:44:37 -0500
Subject: [PATCH 1/7] [SROA] Canonicalize homogeneous structs into fixed
 vectors

When SROA splits an alloca into partitions, it now recognizes
homogeneous structs (all fields same type, no padding) and converts
them to fixed vectors. This enables further optimization of
struct-heavy code that previously remained as scalar operations.

The conversion is always enabled with no size limit, as the 2-or-4
element constraint naturally bounds the struct sizes involved.
---
 llvm/lib/Transforms/Scalar/SROA.cpp           |  63 +++
 llvm/test/CodeGen/NVPTX/lower-byval-args.ll   |  77 ++--
 .../assignment-tracking/sroa/user-memcpy.ll   |   8 +-
 .../DebugInfo/Generic/sroa-alloca-offset.ll   |   8 +-
 llvm/test/DebugInfo/X86/sroasplit-4.ll        |  11 +-
 llvm/test/Transforms/SROA/struct-to-vector.ll | 400 ++++++++++++++++++
 llvm/test/Transforms/SROA/tbaa-struct3.ll     |   9 +-
 7 files changed, 508 insertions(+), 68 deletions(-)
 create mode 100644 llvm/test/Transforms/SROA/struct-to-vector.ll

diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp 
b/llvm/lib/Transforms/Scalar/SROA.cpp
index f0a1aa3367f5b..b98541c0de899 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -5121,6 +5121,64 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, 
AllocaSlices &AS) {
   return true;
 }
 
+/// Try to canonicalize a homogeneous, tightly-packed struct to a vector type.
+///
+/// For structs where all elements have the same type and are tightly packed
+/// (no padding), we can represent them as a fixed vector which enables better
+/// optimization (e.g., vector selects instead of memcpy).
+///
+/// \param STy The struct type to try to canonicalize.
+/// \param DL The DataLayout for size/alignment queries.
+/// \returns The equivalent vector type, or nullptr if not applicable.
+static FixedVectorType *tryCanonicalizeStructToVector(StructType *STy,
+                                                      const DataLayout &DL) {
+  unsigned NumElts = STy->getNumElements();
+  if (NumElts != 2 && NumElts != 4)
+    return nullptr;
+
+  // All elements must be the same type.
+  Type *EltTy = STy->getElementType(0);
+  for (unsigned I = 1; I < NumElts; ++I)
+    if (STy->getElementType(I) != EltTy)
+      return nullptr;
+
+  // Element type must be valid for vectors.
+  if (!VectorType::isValidElementType(EltTy))
+    return nullptr;
+
+  // Only allow integer types >= 8 bits or floating point.
+  if (auto *IT = dyn_cast<IntegerType>(EltTy)) {
+    if (IT->getBitWidth() < 8)
+      return nullptr;
+  } else if (!EltTy->isFloatingPointTy()) {
+    return nullptr;
+  }
+
+  // Element size must be fixed and non-zero.
+  TypeSize EltTS = DL.getTypeAllocSize(EltTy);
+  if (!EltTS.isFixed())
+    return nullptr;
+  uint64_t EltSize = EltTS.getFixedValue();
+  if (EltSize < 1)
+    return nullptr;
+
+  const StructLayout *SL = DL.getStructLayout(STy);
+  uint64_t StructSize = SL->getSizeInBytes();
+  if (StructSize == 0)
+    return nullptr;
+
+  // Must be tightly packed: size == NumElts * EltSize.
+  if (StructSize != NumElts * EltSize)
+    return nullptr;
+
+  // Verify each element is at the expected offset (no padding).
+  for (unsigned I = 0; I < NumElts; ++I)
+    if (SL->getElementOffset(I) != I * EltSize)
+      return nullptr;
+
+  return FixedVectorType::get(EltTy, NumElts);
+}
+
 /// Select a partition type for an alloca partition.
 ///
 /// Try to compute a friendly type for this partition of the alloca. This
@@ -5194,6 +5252,11 @@ selectPartitionType(Partition &P, const DataLayout &DL, 
AllocaInst &AI,
         isIntegerWideningViable(P, LargestIntTy, DL))
       return {LargestIntTy, true, nullptr};
 
+    // Try homogeneous struct to vector canonicalization.
+    if (auto *STy = dyn_cast<StructType>(TypePartitionTy))
+      if (auto *VTy = tryCanonicalizeStructToVector(STy, DL))
+        return {VTy, false, nullptr};
+
     // Fallback to TypePartitionTy and we probably won't promote.
     return {TypePartitionTy, false, nullptr};
   }
diff --git a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll 
b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
index 827097e90e7d3..4f018e15571a3 100644
--- a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
+++ b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
@@ -455,64 +455,39 @@ define dso_local ptx_kernel void @memcpy_to_param(ptr 
nocapture noundef readonly
 ; PTX-NEXT:    .local .align 8 .b8 __local_depot9[8];
 ; PTX-NEXT:    .reg .b64 %SP;
 ; PTX-NEXT:    .reg .b64 %SPL;
-; PTX-NEXT:    .reg .b32 %r<3>;
-; PTX-NEXT:    .reg .b64 %rd<47>;
+; PTX-NEXT:    .reg .b32 %r<23>;
+; PTX-NEXT:    .reg .b64 %rd<4>;
 ; PTX-EMPTY:
 ; PTX-NEXT:  // %bb.0: // %entry
 ; PTX-NEXT:    mov.b64 %SPL, __local_depot9;
 ; PTX-NEXT:    cvta.local.u64 %SP, %SPL;
 ; PTX-NEXT:    ld.param.b64 %rd1, [memcpy_to_param_param_0];
-; PTX-NEXT:    add.u64 %rd2, %SPL, 0;
+; PTX-NEXT:    cvta.to.global.u64 %rd2, %rd1;
 ; PTX-NEXT:    ld.param.b32 %r1, [memcpy_to_param_param_1+4];
-; PTX-NEXT:    st.local.b32 [%rd2+4], %r1;
 ; PTX-NEXT:    ld.param.b32 %r2, [memcpy_to_param_param_1];
-; PTX-NEXT:    st.local.b32 [%rd2], %r2;
-; PTX-NEXT:    ld.volatile.b8 %rd3, [%rd1];
-; PTX-NEXT:    ld.volatile.b8 %rd4, [%rd1+1];
-; PTX-NEXT:    shl.b64 %rd5, %rd4, 8;
-; PTX-NEXT:    or.b64 %rd6, %rd5, %rd3;
-; PTX-NEXT:    ld.volatile.b8 %rd7, [%rd1+2];
-; PTX-NEXT:    shl.b64 %rd8, %rd7, 16;
-; PTX-NEXT:    ld.volatile.b8 %rd9, [%rd1+3];
-; PTX-NEXT:    shl.b64 %rd10, %rd9, 24;
-; PTX-NEXT:    or.b64 %rd11, %rd10, %rd8;
-; PTX-NEXT:    or.b64 %rd12, %rd11, %rd6;
-; PTX-NEXT:    ld.volatile.b8 %rd13, [%rd1+4];
-; PTX-NEXT:    ld.volatile.b8 %rd14, [%rd1+5];
-; PTX-NEXT:    shl.b64 %rd15, %rd14, 8;
-; PTX-NEXT:    or.b64 %rd16, %rd15, %rd13;
-; PTX-NEXT:    ld.volatile.b8 %rd17, [%rd1+6];
-; PTX-NEXT:    shl.b64 %rd18, %rd17, 16;
-; PTX-NEXT:    ld.volatile.b8 %rd19, [%rd1+7];
-; PTX-NEXT:    shl.b64 %rd20, %rd19, 24;
-; PTX-NEXT:    or.b64 %rd21, %rd20, %rd18;
-; PTX-NEXT:    or.b64 %rd22, %rd21, %rd16;
-; PTX-NEXT:    shl.b64 %rd23, %rd22, 32;
-; PTX-NEXT:    or.b64 %rd24, %rd23, %rd12;
-; PTX-NEXT:    st.volatile.b64 [%SP], %rd24;
-; PTX-NEXT:    ld.volatile.b8 %rd25, [%rd1+8];
-; PTX-NEXT:    ld.volatile.b8 %rd26, [%rd1+9];
-; PTX-NEXT:    shl.b64 %rd27, %rd26, 8;
-; PTX-NEXT:    or.b64 %rd28, %rd27, %rd25;
-; PTX-NEXT:    ld.volatile.b8 %rd29, [%rd1+10];
-; PTX-NEXT:    shl.b64 %rd30, %rd29, 16;
-; PTX-NEXT:    ld.volatile.b8 %rd31, [%rd1+11];
-; PTX-NEXT:    shl.b64 %rd32, %rd31, 24;
-; PTX-NEXT:    or.b64 %rd33, %rd32, %rd30;
-; PTX-NEXT:    or.b64 %rd34, %rd33, %rd28;
-; PTX-NEXT:    ld.volatile.b8 %rd35, [%rd1+12];
-; PTX-NEXT:    ld.volatile.b8 %rd36, [%rd1+13];
-; PTX-NEXT:    shl.b64 %rd37, %rd36, 8;
-; PTX-NEXT:    or.b64 %rd38, %rd37, %rd35;
-; PTX-NEXT:    ld.volatile.b8 %rd39, [%rd1+14];
-; PTX-NEXT:    shl.b64 %rd40, %rd39, 16;
-; PTX-NEXT:    ld.volatile.b8 %rd41, [%rd1+15];
-; PTX-NEXT:    shl.b64 %rd42, %rd41, 24;
-; PTX-NEXT:    or.b64 %rd43, %rd42, %rd40;
-; PTX-NEXT:    or.b64 %rd44, %rd43, %rd38;
-; PTX-NEXT:    shl.b64 %rd45, %rd44, 32;
-; PTX-NEXT:    or.b64 %rd46, %rd45, %rd34;
-; PTX-NEXT:    st.volatile.b64 [%SP+8], %rd46;
+; PTX-NEXT:    st.v2.b32 [%SP], {%r2, %r1};
+; PTX-NEXT:    ld.volatile.global.b8 %r3, [%rd2+4];
+; PTX-NEXT:    ld.volatile.global.b8 %r4, [%rd2+5];
+; PTX-NEXT:    shl.b32 %r5, %r4, 8;
+; PTX-NEXT:    or.b32 %r6, %r5, %r3;
+; PTX-NEXT:    ld.volatile.global.b8 %r7, [%rd2+6];
+; PTX-NEXT:    shl.b32 %r8, %r7, 16;
+; PTX-NEXT:    ld.volatile.global.b8 %r9, [%rd2+7];
+; PTX-NEXT:    shl.b32 %r10, %r9, 24;
+; PTX-NEXT:    or.b32 %r11, %r10, %r8;
+; PTX-NEXT:    or.b32 %r12, %r11, %r6;
+; PTX-NEXT:    ld.volatile.global.b8 %r13, [%rd2];
+; PTX-NEXT:    ld.volatile.global.b8 %r14, [%rd2+1];
+; PTX-NEXT:    shl.b32 %r15, %r14, 8;
+; PTX-NEXT:    or.b32 %r16, %r15, %r13;
+; PTX-NEXT:    ld.volatile.global.b8 %r17, [%rd2+2];
+; PTX-NEXT:    shl.b32 %r18, %r17, 16;
+; PTX-NEXT:    ld.volatile.global.b8 %r19, [%rd2+3];
+; PTX-NEXT:    shl.b32 %r20, %r19, 24;
+; PTX-NEXT:    or.b32 %r21, %r20, %r18;
+; PTX-NEXT:    or.b32 %r22, %r21, %r16;
+; PTX-NEXT:    add.u64 %rd3, %SPL, 0;
+; PTX-NEXT:    st.local.v2.b32 [%rd3], {%r22, %r12};
 ; PTX-NEXT:    ret;
 entry:
   tail call void @llvm.memcpy.p0.p0.i64(ptr %s, ptr %in, i64 16, i1 true)
diff --git 
a/llvm/test/DebugInfo/Generic/assignment-tracking/sroa/user-memcpy.ll 
b/llvm/test/DebugInfo/Generic/assignment-tracking/sroa/user-memcpy.ll
index ded78f4ff83f4..eff2713c54468 100644
--- a/llvm/test/DebugInfo/Generic/assignment-tracking/sroa/user-memcpy.ll
+++ b/llvm/test/DebugInfo/Generic/assignment-tracking/sroa/user-memcpy.ll
@@ -21,8 +21,8 @@
 ;; Allocas have been promoted - the linked dbg.assigns have been removed.
 
 ;; | V3i point = {0, 0, 0};
-; CHECK-NEXT: #dbg_value(i64 0, ![[point:[0-9]+]], 
!DIExpression(DW_OP_LLVM_fragment, 0, 64),
-; CHECK-NEXT: #dbg_value(i64 0, ![[point]], !DIExpression(DW_OP_LLVM_fragment, 
64, 64),
+;; First two fields vectorized to <2 x i64>.
+; CHECK-NEXT: #dbg_value(<2 x i64> zeroinitializer, ![[point:[0-9]+]], 
!DIExpression(DW_OP_LLVM_fragment, 0, 128),
 
 ;; point.z = 5000;
 ; CHECK-NEXT: #dbg_value(i64 5000, ![[point]], 
!DIExpression(DW_OP_LLVM_fragment, 128, 64),
@@ -40,8 +40,8 @@
 ; CHECK-NEXT: #dbg_value(i64 %other.sroa.3.0.copyload, ![[other]], 
!DIExpression(DW_OP_LLVM_fragment, 128, 64),
 
 ;; | std::memcpy(&point.y, &other.x, sizeof(long) * 2);
-;;   other is now 3 scalars:
-;;     point.y = other.x
+;;   The first partition is <2 x i64>, insertelement updates point.y:
+; CHECK-NEXT: %point.sroa.0.8.vec.insert = insertelement <2 x i64> 
zeroinitializer, i64 %other.sroa.0.0.copyload, i32 1
 ; CHECK-NEXT: #dbg_value(i64 %other.sroa.0.0.copyload, ![[point]], 
!DIExpression(DW_OP_LLVM_fragment, 64, 64),
 ;;
 ;;     point.z = other.y
diff --git a/llvm/test/DebugInfo/Generic/sroa-alloca-offset.ll 
b/llvm/test/DebugInfo/Generic/sroa-alloca-offset.ll
index 6718711f83e04..8f5d2ed7515ed 100644
--- a/llvm/test/DebugInfo/Generic/sroa-alloca-offset.ll
+++ b/llvm/test/DebugInfo/Generic/sroa-alloca-offset.ll
@@ -1,5 +1,5 @@
 ; RUN: opt %s -passes=sroa -S | FileCheck %s --check-prefixes=COMMON,OLD
-; RUN: opt %s -passes=declare-to-assign,sroa -S | FileCheck %s 
--check-prefixes=COMMON,NEW
+; RUN: opt %s -passes='declare-to-assign,sroa' -S | FileCheck %s 
--check-prefixes=COMMON,NEW
 
 ;; C++17 source:
 ;; struct two { int a, b; } gt;
@@ -140,9 +140,7 @@ entry:
 ;; 16 bit variable f (!62): value vgf (lower bits)
 ;; 16 bit variable g (!63): value vgf (upper bits)
 ;;
-;; 16 bit variable h (!64): deref dead_64_128
-; COMMON-NEXT: %[[dead_64_128:.*]] = alloca %struct.two
-; COMMON-NEXT: #dbg_declare(ptr %[[dead_64_128]], ![[h:[0-9]+]], 
!DIExpression(),
+;; 16 bit variable h (!64): promoted to <2 x i32> vector
 ; COMMON-NEXT: %[[ve:.*]] = load i32, ptr @gf
 ;; FIXME: mem2reg bug - offset is incorrect - see comment above.
 ; COMMON-NEXT: #dbg_value(i32 %[[ve]], ![[e:[0-9]+]], 
!DIExpression(DW_OP_plus_uconst, 2),
@@ -150,6 +148,8 @@ entry:
 ; COMMON-NEXT: #dbg_value(i32 %[[vfg]], ![[f:[0-9]+]], !DIExpression(),
 ;; FIXME: mem2reg bug - offset is incorrect - see comment above.
 ; COMMON-NEXT: #dbg_value(i32 %[[vfg]], ![[g:[0-9]+]], 
!DIExpression(DW_OP_plus_uconst, 2),
+; COMMON-NEXT: %[[vh:.*]] = load <2 x i32>, ptr getelementptr inbounds (i8, 
ptr @gf, i64 8)
+; COMMON-NEXT: #dbg_value(<2 x i32> %[[vh]], ![[h:[0-9]+]], !DIExpression(),
 define dso_local noundef i32 @_Z4fun3v() #0 !dbg !55 {
 entry:
   %0 = alloca %struct.four, align 4
diff --git a/llvm/test/DebugInfo/X86/sroasplit-4.ll 
b/llvm/test/DebugInfo/X86/sroasplit-4.ll
index d5ce348e9896e..a44d1a3d913c7 100644
--- a/llvm/test/DebugInfo/X86/sroasplit-4.ll
+++ b/llvm/test/DebugInfo/X86/sroasplit-4.ll
@@ -1,12 +1,13 @@
-; RUN: opt -passes='sroa' < %s -S -o - | FileCheck %s
+; RUN: opt -passes=sroa < %s -S -o - | FileCheck %s
 ;
 ; Test that recursively splitting an alloca updates the debug info correctly.
 ; CHECK: %[[T:.*]] = load i64, ptr @t, align 8
-; CHECK: #dbg_value(i64 %[[T]], ![[Y:.*]], !DIExpression(DW_OP_LLVM_fragment, 
0, 64),
+; CHECK: %[[VI:.*]] = insertelement <2 x i64> {{undef|poison}}, i64 %[[T]], 
i32 0
+; CHECK: #dbg_value(<2 x i64> %[[VI]], ![[Y:.*]], !DIExpression(),
 ; CHECK: %[[T1:.*]] = load i64, ptr @t, align 8
-; CHECK: #dbg_value(i64 %[[T1]], ![[Y]], !DIExpression(DW_OP_LLVM_fragment, 
64, 64),
-; CHECK: #dbg_value(i64 %[[T]], ![[R:.*]], !DIExpression(DW_OP_LLVM_fragment, 
192, 64),
-; CHECK: #dbg_value(i64 %[[T1]], ![[R]], !DIExpression(DW_OP_LLVM_fragment, 
256, 64),
+; CHECK: %[[VI2:.*]] = insertelement <2 x i64> %[[VI]], i64 %[[T1]], i32 1
+; CHECK: #dbg_value(<2 x i64> %[[VI2]], ![[Y]], !DIExpression(),
+; CHECK: #dbg_value(<2 x i64> %[[VI2]], ![[R:.*]], 
!DIExpression(DW_OP_LLVM_fragment, 192, 128),
 ;
 ; struct p {
 ;   __SIZE_TYPE__ s;
diff --git a/llvm/test/Transforms/SROA/struct-to-vector.ll 
b/llvm/test/Transforms/SROA/struct-to-vector.ll
new file mode 100644
index 0000000000000..7bcde59f14b30
--- /dev/null
+++ b/llvm/test/Transforms/SROA/struct-to-vector.ll
@@ -0,0 +1,400 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 
UTC_ARGS: --version 6
+; RUN: opt -passes='sroa,gvn,instcombine,simplifycfg' -S %s \
+; RUN:   | FileCheck %s \
+; RUN:       --check-prefixes=FLAT,NESTED,PADDED,NONHOMO,I1,PTR
+%struct.myint4 = type { i32, i32, i32, i32 }
+
+define dso_local void @foo_flat(ptr noundef %x, i64 %y.coerce0, i64 
%y.coerce1, i32 noundef %cond) {
+; FLAT-LABEL: define dso_local void @foo_flat(
+; FLAT-SAME: ptr noundef [[X:%.*]], i64 [[Y_COERCE0:%.*]], i64 
[[Y_COERCE1:%.*]], i32 noundef [[COND:%.*]]) {
+; FLAT-NEXT:  [[ENTRY:.*:]]
+; FLAT-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[COND]], 0
+; FLAT-NEXT:    [[Y_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> 
poison, i64 [[Y_COERCE0]], i64 0
+; FLAT-NEXT:    [[Y_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> 
[[Y_SROA_0_0_VEC_INSERT]], i64 [[Y_COERCE1]], i64 1
+; FLAT-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[Y_SROA_0_8_VEC_INSERT]] to 
<4 x i32>
+; FLAT-NEXT:    [[COND1_SROA_SPECULATED:%.*]] = select i1 [[TOBOOL_NOT]], <4 x 
i32> zeroinitializer, <4 x i32> [[TMP0]]
+; FLAT-NEXT:    store <4 x i32> [[COND1_SROA_SPECULATED]], ptr [[X]], align 16
+; FLAT-NEXT:    ret void
+;
+entry:
+  %y = alloca %struct.myint4, align 16
+  %x.addr = alloca ptr, align 8
+  %cond.addr = alloca i32, align 4
+  %temp = alloca %struct.myint4, align 16
+  %zero = alloca %struct.myint4, align 16
+  %data = alloca %struct.myint4, align 16
+  %0 = getelementptr inbounds nuw { i64, i64 }, ptr %y, i32 0, i32 0
+  store i64 %y.coerce0, ptr %0, align 16
+  %1 = getelementptr inbounds nuw { i64, i64 }, ptr %y, i32 0, i32 1
+  store i64 %y.coerce1, ptr %1, align 8
+  store ptr %x, ptr %x.addr, align 8
+  store i32 %cond, ptr %cond.addr, align 4
+  call void @llvm.lifetime.start.p0(ptr %temp)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %temp, ptr align 16 %y, i64 
16, i1 false)
+  call void @llvm.lifetime.start.p0(ptr %zero)
+  call void @llvm.memset.p0.i64(ptr align 16 %zero, i8 0, i64 16, i1 false)
+  call void @llvm.lifetime.start.p0(ptr %data)
+  %2 = load i32, ptr %cond.addr, align 4
+  %tobool = icmp ne i32 %2, 0
+  br i1 %tobool, label %cond.true, label %cond.false
+
+cond.true:
+  br label %cond.end
+
+cond.false:
+  br label %cond.end
+
+cond.end:
+  %cond1 = phi ptr [ %temp, %cond.true ], [ %zero, %cond.false ]
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %data, ptr align 16 %cond1, 
i64 16, i1 false)
+  %3 = load ptr, ptr %x.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %3, ptr align 16 %data, i64 
16, i1 false)
+  call void @llvm.lifetime.end.p0(ptr %data)
+  call void @llvm.lifetime.end.p0(ptr %zero)
+  call void @llvm.lifetime.end.p0(ptr %temp)
+  ret void
+}
+%struct.myint4_base_n = type { i32, i32, i32, i32 }
+%struct.myint4_nested = type { %struct.myint4_base_n }
+
+define dso_local void @foo_nested(ptr noundef %x, i64 %y.coerce0, i64 
%y.coerce1, i32 noundef %cond) {
+; FLAT-LABEL: define dso_local void @foo_nested(
+; FLAT-SAME: ptr noundef [[X:%.*]], i64 [[Y_COERCE0:%.*]], i64 
[[Y_COERCE1:%.*]], i32 noundef [[COND:%.*]]) {
+; FLAT-NEXT:  [[ENTRY:.*:]]
+; FLAT-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[COND]], 0
+; FLAT-NEXT:    [[Y_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> 
poison, i64 [[Y_COERCE0]], i64 0
+; FLAT-NEXT:    [[Y_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> 
[[Y_SROA_0_0_VEC_INSERT]], i64 [[Y_COERCE1]], i64 1
+; FLAT-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[Y_SROA_0_8_VEC_INSERT]] to 
<4 x i32>
+; FLAT-NEXT:    [[COND1_SROA_SPECULATED:%.*]] = select i1 [[TOBOOL_NOT]], <4 x 
i32> zeroinitializer, <4 x i32> [[TMP0]]
+; FLAT-NEXT:    store <4 x i32> [[COND1_SROA_SPECULATED]], ptr [[X]], align 16
+; FLAT-NEXT:    ret void
+;
+entry:
+  %y = alloca %struct.myint4_nested, align 16
+  %x.addr = alloca ptr, align 8
+  %cond.addr = alloca i32, align 4
+  %temp = alloca %struct.myint4_nested, align 16
+  %zero = alloca %struct.myint4_nested, align 16
+  %data = alloca %struct.myint4_nested, align 16
+  %0 = getelementptr inbounds nuw { i64, i64 }, ptr %y, i32 0, i32 0
+  store i64 %y.coerce0, ptr %0, align 16
+  %1 = getelementptr inbounds nuw { i64, i64 }, ptr %y, i32 0, i32 1
+  store i64 %y.coerce1, ptr %1, align 8
+  store ptr %x, ptr %x.addr, align 8
+  store i32 %cond, ptr %cond.addr, align 4
+  call void @llvm.lifetime.start.p0(ptr %temp)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %temp, ptr align 16 %y, i64 
16, i1 false)
+  call void @llvm.lifetime.start.p0(ptr %zero)
+  call void @llvm.memset.p0.i64(ptr align 16 %zero, i8 0, i64 16, i1 false)
+  call void @llvm.lifetime.start.p0(ptr %data)
+  %2 = load i32, ptr %cond.addr, align 4
+  %tobool = icmp ne i32 %2, 0
+  br i1 %tobool, label %cond.true, label %cond.false
+
+cond.true:
+  br label %cond.end
+
+cond.false:
+  br label %cond.end
+
+cond.end:
+  %cond1 = phi ptr [ %temp, %cond.true ], [ %zero, %cond.false ]
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %data, ptr align 16 %cond1, 
i64 16, i1 false)
+  %3 = load ptr, ptr %x.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %3, ptr align 16 %data, i64 
16, i1 false)
+  call void @llvm.lifetime.end.p0(ptr %data)
+  call void @llvm.lifetime.end.p0(ptr %zero)
+  call void @llvm.lifetime.end.p0(ptr %temp)
+  ret void
+}
+
+%struct.padded = type { i32, i8, i32, i8 }
+define dso_local void @foo_padded(ptr noundef %x, i32 %a0, i8 %a1,
+; FLAT-LABEL: define dso_local void @foo_padded(
+; FLAT-SAME: ptr noundef [[X:%.*]], i32 [[A0:%.*]], i8 [[A1:%.*]], i32 
[[A2:%.*]], i8 [[A3:%.*]], i32 noundef [[COND:%.*]]) {
+; FLAT-NEXT:  [[ENTRY:.*:]]
+; FLAT-NEXT:    [[TEMP:%.*]] = alloca [[STRUCT_PADDED:%.*]], align 4
+; FLAT-NEXT:    [[ZERO:%.*]] = alloca [[STRUCT_PADDED]], align 4
+; FLAT-NEXT:    [[DATA:%.*]] = alloca [[STRUCT_PADDED]], align 4
+; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
+; FLAT-NEXT:    store i32 [[A0]], ptr [[TEMP]], align 4
+; FLAT-NEXT:    [[Y_SROA_2_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 4
+; FLAT-NEXT:    store i8 [[A1]], ptr [[Y_SROA_2_0_TEMP_SROA_IDX]], align 4
+; FLAT-NEXT:    [[Y_SROA_31_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 8
+; FLAT-NEXT:    store i32 [[A2]], ptr [[Y_SROA_31_0_TEMP_SROA_IDX]], align 4
+; FLAT-NEXT:    [[Y_SROA_4_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 12
+; FLAT-NEXT:    store i8 [[A3]], ptr [[Y_SROA_4_0_TEMP_SROA_IDX]], align 4
+; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[ZERO]])
+; FLAT-NEXT:    call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 
dereferenceable(16) [[ZERO]], i8 0, i64 16, i1 false)
+; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[DATA]])
+; FLAT-NEXT:    [[TOBOOL_PAD_NOT:%.*]] = icmp eq i32 [[COND]], 0
+; FLAT-NEXT:    [[ZERO_TEMP:%.*]] = select i1 [[TOBOOL_PAD_NOT]], ptr 
[[ZERO]], ptr [[TEMP]]
+; FLAT-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 
dereferenceable(16) [[DATA]], ptr noundef nonnull align 4 dereferenceable(16) 
[[ZERO_TEMP]], i64 16, i1 false)
+; FLAT-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 
dereferenceable(16) [[X]], ptr noundef nonnull align 4 dereferenceable(16) 
[[DATA]], i64 16, i1 false)
+; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[DATA]])
+; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[ZERO]])
+; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
+; FLAT-NEXT:    ret void
+;
+  i32 %a2, i8 %a3,
+  i32 noundef %cond) {
+entry:
+  %y = alloca %struct.padded, align 4
+  %x.addr = alloca ptr, align 8
+  %cond.addr = alloca i32, align 4
+  %temp = alloca %struct.padded, align 4
+  %zero = alloca %struct.padded, align 4
+  %data = alloca %struct.padded, align 4
+  %y_i32_0 = getelementptr inbounds %struct.padded, ptr %y, i32 0, i32 0
+  store i32 %a0, ptr %y_i32_0, align 4
+  %y_i8_1 = getelementptr inbounds %struct.padded, ptr %y, i32 0, i32 1
+  store i8 %a1, ptr %y_i8_1, align 1
+  %y_i32_2 = getelementptr inbounds %struct.padded, ptr %y, i32 0, i32 2
+  store i32 %a2, ptr %y_i32_2, align 4
+  %y_i8_3 = getelementptr inbounds %struct.padded, ptr %y, i32 0, i32 3
+  store i8 %a3, ptr %y_i8_3, align 1
+  store ptr %x, ptr %x.addr, align 8
+  store i32 %cond, ptr %cond.addr, align 4
+  call void @llvm.lifetime.start.p0(ptr %temp)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %temp, ptr align 4 %y,
+  i64 16, i1 false)
+  call void @llvm.lifetime.start.p0(ptr %zero)
+  call void @llvm.memset.p0.i64(ptr align 4 %zero, i8 0, i64 16, i1 false)
+  call void @llvm.lifetime.start.p0(ptr %data)
+  %c.pad = load i32, ptr %cond.addr, align 4
+  %tobool.pad = icmp ne i32 %c.pad, 0
+  br i1 %tobool.pad, label %cond.true.pad, label %cond.false.pad
+
+cond.true.pad:
+  br label %cond.end.pad
+
+cond.false.pad:
+  br label %cond.end.pad
+
+cond.end.pad:
+  %cond1.pad = phi ptr [ %temp, %cond.true.pad ], [ %zero, %cond.false.pad ]
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %data, ptr align 4 %cond1.pad,
+  i64 16, i1 false)
+  %xv.pad = load ptr, ptr %x.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %xv.pad, ptr align 4 %data,
+  i64 16, i1 false)
+  call void @llvm.lifetime.end.p0(ptr %data)
+  call void @llvm.lifetime.end.p0(ptr %zero)
+  call void @llvm.lifetime.end.p0(ptr %temp)
+  ret void
+}
+
+%struct.nonhomo = type { i32, i64, i32, i64 }
+define dso_local void @foo_nonhomo(ptr noundef %x, i32 %a0, i64 %a1,
+; FLAT-LABEL: define dso_local void @foo_nonhomo(
+; FLAT-SAME: ptr noundef [[X:%.*]], i32 [[A0:%.*]], i64 [[A1:%.*]], i32 
[[A2:%.*]], i64 [[A3:%.*]], i32 noundef [[COND:%.*]]) {
+; FLAT-NEXT:  [[ENTRY:.*:]]
+; FLAT-NEXT:    [[TEMP:%.*]] = alloca [[STRUCT_NONHOMO:%.*]], align 8
+; FLAT-NEXT:    [[ZERO:%.*]] = alloca [[STRUCT_NONHOMO]], align 8
+; FLAT-NEXT:    [[DATA:%.*]] = alloca [[STRUCT_NONHOMO]], align 8
+; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
+; FLAT-NEXT:    store i32 [[A0]], ptr [[TEMP]], align 8
+; FLAT-NEXT:    [[Y_SROA_2_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 4
+; FLAT-NEXT:    store i64 [[A1]], ptr [[Y_SROA_2_0_TEMP_SROA_IDX]], align 4
+; FLAT-NEXT:    [[Y_SROA_3_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 12
+; FLAT-NEXT:    store i32 [[A2]], ptr [[Y_SROA_3_0_TEMP_SROA_IDX]], align 4
+; FLAT-NEXT:    [[Y_SROA_4_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 16
+; FLAT-NEXT:    store i64 [[A3]], ptr [[Y_SROA_4_0_TEMP_SROA_IDX]], align 8
+; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[ZERO]])
+; FLAT-NEXT:    call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[ZERO]], i8 0, i64 32, i1 false)
+; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[DATA]])
+; FLAT-NEXT:    [[TOBOOL_NH_NOT:%.*]] = icmp eq i32 [[COND]], 0
+; FLAT-NEXT:    [[ZERO_TEMP:%.*]] = select i1 [[TOBOOL_NH_NOT]], ptr [[ZERO]], 
ptr [[TEMP]]
+; FLAT-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[DATA]], ptr noundef nonnull align 8 dereferenceable(32) 
[[ZERO_TEMP]], i64 32, i1 false)
+; FLAT-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[X]], ptr noundef nonnull align 8 dereferenceable(32) 
[[DATA]], i64 32, i1 false)
+; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[DATA]])
+; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[ZERO]])
+; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
+; FLAT-NEXT:    ret void
+;
+  i32 %a2, i64 %a3,
+  i32 noundef %cond) {
+entry:
+  %y = alloca %struct.nonhomo, align 8
+  %x.addr = alloca ptr, align 8
+  %cond.addr = alloca i32, align 4
+  %temp = alloca %struct.nonhomo, align 8
+  %zero = alloca %struct.nonhomo, align 8
+  %data = alloca %struct.nonhomo, align 8
+  %y_i32_0n = getelementptr inbounds %struct.nonhomo, ptr %y, i32 0, i32 0
+  store i32 %a0, ptr %y_i32_0n, align 4
+  %y_i64_1n = getelementptr inbounds %struct.nonhomo, ptr %y, i32 0, i32 1
+  store i64 %a1, ptr %y_i64_1n, align 8
+  %y_i32_2n = getelementptr inbounds %struct.nonhomo, ptr %y, i32 0, i32 2
+  store i32 %a2, ptr %y_i32_2n, align 4
+  %y_i64_3n = getelementptr inbounds %struct.nonhomo, ptr %y, i32 0, i32 3
+  store i64 %a3, ptr %y_i64_3n, align 8
+  store ptr %x, ptr %x.addr, align 8
+  store i32 %cond, ptr %cond.addr, align 4
+  call void @llvm.lifetime.start.p0(ptr %temp)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %temp, ptr align 8 %y,
+  i64 32, i1 false)
+  call void @llvm.lifetime.start.p0(ptr %zero)
+  call void @llvm.memset.p0.i64(ptr align 8 %zero, i8 0, i64 32, i1 false)
+  call void @llvm.lifetime.start.p0(ptr %data)
+  %c.nh = load i32, ptr %cond.addr, align 4
+  %tobool.nh = icmp ne i32 %c.nh, 0
+  br i1 %tobool.nh, label %cond.true.nh, label %cond.false.nh
+
+cond.true.nh:
+  br label %cond.end.nh
+
+cond.false.nh:
+  br label %cond.end.nh
+
+cond.end.nh:
+  %cond1.nh = phi ptr [ %temp, %cond.true.nh ], [ %zero, %cond.false.nh ]
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %data, ptr align 8 %cond1.nh,
+  i64 32, i1 false)
+  %xv.nh = load ptr, ptr %x.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %xv.nh, ptr align 8 %data,
+  i64 32, i1 false)
+  call void @llvm.lifetime.end.p0(ptr %data)
+  call void @llvm.lifetime.end.p0(ptr %zero)
+  call void @llvm.lifetime.end.p0(ptr %temp)
+  ret void
+}
+
+%struct.i1x4 = type { i1, i1, i1, i1 }
+define dso_local void @foo_i1(ptr noundef %x, i64 %dummy0, i64 %dummy1,
+; FLAT-LABEL: define dso_local void @foo_i1(
+; FLAT-SAME: ptr noundef [[X:%.*]], i64 [[DUMMY0:%.*]], i64 [[DUMMY1:%.*]], 
i32 noundef [[COND:%.*]]) {
+; FLAT-NEXT:  [[ENTRY:.*:]]
+; FLAT-NEXT:    [[TEMP:%.*]] = alloca [[STRUCT_I1X4:%.*]], align 1
+; FLAT-NEXT:    [[ZERO:%.*]] = alloca [[STRUCT_I1X4]], align 1
+; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
+; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[ZERO]])
+; FLAT-NEXT:    store i32 0, ptr [[ZERO]], align 1
+; FLAT-NEXT:    [[TOBOOL_I1_NOT:%.*]] = icmp eq i32 [[COND]], 0
+; FLAT-NEXT:    [[ZERO_TEMP:%.*]] = select i1 [[TOBOOL_I1_NOT]], ptr [[ZERO]], 
ptr [[TEMP]]
+; FLAT-NEXT:    [[TMP0:%.*]] = load i32, ptr [[ZERO_TEMP]], align 1
+; FLAT-NEXT:    store i32 [[TMP0]], ptr [[X]], align 1
+; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[ZERO]])
+; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
+; FLAT-NEXT:    ret void
+;
+  i32 noundef %cond) {
+entry:
+  %y = alloca %struct.i1x4, align 1
+  %x.addr = alloca ptr, align 8
+  %cond.addr = alloca i32, align 4
+  %temp = alloca %struct.i1x4, align 1
+  %zero = alloca %struct.i1x4, align 1
+  %data = alloca %struct.i1x4, align 1
+  store ptr %x, ptr %x.addr, align 8
+  store i32 %cond, ptr %cond.addr, align 4
+  call void @llvm.lifetime.start.p0(ptr %temp)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %temp, ptr align 1 %y,
+  i64 4, i1 false)
+  call void @llvm.lifetime.start.p0(ptr %zero)
+  call void @llvm.memset.p0.i64(ptr align 1 %zero, i8 0, i64 4, i1 false)
+  call void @llvm.lifetime.start.p0(ptr %data)
+  %c.i1 = load i32, ptr %cond.addr, align 4
+  %tobool.i1 = icmp ne i32 %c.i1, 0
+  br i1 %tobool.i1, label %cond.true.i1, label %cond.false.i1
+
+cond.true.i1:
+  br label %cond.end.i1
+
+cond.false.i1:
+  br label %cond.end.i1
+
+cond.end.i1:
+  %cond1.i1 = phi ptr [ %temp, %cond.true.i1 ], [ %zero, %cond.false.i1 ]
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %data, ptr align 1 %cond1.i1,
+  i64 4, i1 false)
+  %xv.i1 = load ptr, ptr %x.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %xv.i1, ptr align 1 %data,
+  i64 4, i1 false)
+  call void @llvm.lifetime.end.p0(ptr %data)
+  call void @llvm.lifetime.end.p0(ptr %zero)
+  call void @llvm.lifetime.end.p0(ptr %temp)
+  ret void
+}
+
+%struct.ptr4 = type { ptr, ptr, ptr, ptr }
+define dso_local void @foo_ptr(ptr noundef %x, ptr %p0, ptr %p1,
+; FLAT-LABEL: define dso_local void @foo_ptr(
+; FLAT-SAME: ptr noundef [[X:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], ptr 
[[P2:%.*]], ptr [[P3:%.*]], i32 noundef [[COND:%.*]]) {
+; FLAT-NEXT:  [[ENTRY:.*:]]
+; FLAT-NEXT:    [[TEMP:%.*]] = alloca [[STRUCT_PTR4:%.*]], align 8
+; FLAT-NEXT:    [[ZERO:%.*]] = alloca [[STRUCT_PTR4]], align 8
+; FLAT-NEXT:    [[DATA:%.*]] = alloca [[STRUCT_PTR4]], align 8
+; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
+; FLAT-NEXT:    store ptr [[P0]], ptr [[TEMP]], align 8
+; FLAT-NEXT:    [[Y_SROA_2_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 8
+; FLAT-NEXT:    store ptr [[P1]], ptr [[Y_SROA_2_0_TEMP_SROA_IDX]], align 8
+; FLAT-NEXT:    [[Y_SROA_3_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 16
+; FLAT-NEXT:    store ptr [[P2]], ptr [[Y_SROA_3_0_TEMP_SROA_IDX]], align 8
+; FLAT-NEXT:    [[Y_SROA_4_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 24
+; FLAT-NEXT:    store ptr [[P3]], ptr [[Y_SROA_4_0_TEMP_SROA_IDX]], align 8
+; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[ZERO]])
+; FLAT-NEXT:    call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[ZERO]], i8 0, i64 32, i1 false)
+; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[DATA]])
+; FLAT-NEXT:    [[TOBOOL_PTR_NOT:%.*]] = icmp eq i32 [[COND]], 0
+; FLAT-NEXT:    [[ZERO_TEMP:%.*]] = select i1 [[TOBOOL_PTR_NOT]], ptr 
[[ZERO]], ptr [[TEMP]]
+; FLAT-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[DATA]], ptr noundef nonnull align 8 dereferenceable(32) 
[[ZERO_TEMP]], i64 32, i1 false)
+; FLAT-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[X]], ptr noundef nonnull align 8 dereferenceable(32) 
[[DATA]], i64 32, i1 false)
+; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[DATA]])
+; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[ZERO]])
+; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
+; FLAT-NEXT:    ret void
+;
+  ptr %p2, ptr %p3,
+  i32 noundef %cond) {
+entry:
+  %y = alloca %struct.ptr4, align 8
+  %x.addr = alloca ptr, align 8
+  %cond.addr = alloca i32, align 4
+  %temp = alloca %struct.ptr4, align 8
+  %zero = alloca %struct.ptr4, align 8
+  %data = alloca %struct.ptr4, align 8
+  %y_p0 = getelementptr inbounds %struct.ptr4, ptr %y, i32 0, i32 0
+  store ptr %p0, ptr %y_p0, align 8
+  %y_p1 = getelementptr inbounds %struct.ptr4, ptr %y, i32 0, i32 1
+  store ptr %p1, ptr %y_p1, align 8
+  %y_p2 = getelementptr inbounds %struct.ptr4, ptr %y, i32 0, i32 2
+  store ptr %p2, ptr %y_p2, align 8
+  %y_p3 = getelementptr inbounds %struct.ptr4, ptr %y, i32 0, i32 3
+  store ptr %p3, ptr %y_p3, align 8
+  store ptr %x, ptr %x.addr, align 8
+  store i32 %cond, ptr %cond.addr, align 4
+  call void @llvm.lifetime.start.p0(ptr %temp)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %temp, ptr align 8 %y,
+  i64 32, i1 false)
+  call void @llvm.lifetime.start.p0(ptr %zero)
+  call void @llvm.memset.p0.i64(ptr align 8 %zero, i8 0, i64 32, i1 false)
+  call void @llvm.lifetime.start.p0(ptr %data)
+  %c.ptr = load i32, ptr %cond.addr, align 4
+  %tobool.ptr = icmp ne i32 %c.ptr, 0
+  br i1 %tobool.ptr, label %cond.true.ptr, label %cond.false.ptr
+
+cond.true.ptr:
+  br label %cond.end.ptr
+
+cond.false.ptr:
+  br label %cond.end.ptr
+
+cond.end.ptr:
+  %cond1.ptr = phi ptr [ %temp, %cond.true.ptr ], [ %zero, %cond.false.ptr ]
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %data, ptr align 8 %cond1.ptr,
+  i64 32, i1 false)
+  %xv.ptr = load ptr, ptr %x.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %xv.ptr, ptr align 8 %data,
+  i64 32, i1 false)
+  call void @llvm.lifetime.end.p0(ptr %data)
+  call void @llvm.lifetime.end.p0(ptr %zero)
+  call void @llvm.lifetime.end.p0(ptr %temp)
+  ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add 
tests below this line:
+; I1: {{.*}}
+; NESTED: {{.*}}
+; NONHOMO: {{.*}}
+; PADDED: {{.*}}
+; PTR: {{.*}}
diff --git a/llvm/test/Transforms/SROA/tbaa-struct3.ll 
b/llvm/test/Transforms/SROA/tbaa-struct3.ll
index 6a0cacc7016f7..97e82db27c378 100644
--- a/llvm/test/Transforms/SROA/tbaa-struct3.ll
+++ b/llvm/test/Transforms/SROA/tbaa-struct3.ll
@@ -73,12 +73,13 @@ define void 
@load_store_transfer_split_struct_tbaa_2_i31(ptr dereferenceable(24)
 ; CHECK-LABEL: define void @load_store_transfer_split_struct_tbaa_2_i31(
 ; CHECK-SAME: ptr dereferenceable(24) [[RES:%.*]], i31 [[A:%.*]], i31 
[[B:%.*]]) {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[TMP:%.*]] = alloca { i31, i31 }, align 4
-; CHECK-NEXT:    store i31 [[A]], ptr [[TMP]], align 4
+; CHECK-NEXT:    [[TMP:%.*]] = alloca <2 x i31>, align 8
+; CHECK-NEXT:    store i31 [[A]], ptr [[TMP]], align 8
 ; CHECK-NEXT:    [[TMP_4_TMP_4_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr 
[[TMP]], i64 4
 ; CHECK-NEXT:    store i31 [[B]], ptr [[TMP_4_TMP_4_SROA_IDX]], align 4
-; CHECK-NEXT:    [[TMP_0_L1:%.*]] = load i62, ptr [[TMP]], align 4, 
!tbaa.struct [[TBAA_STRUCT4:![0-9]+]]
-; CHECK-NEXT:    store i62 [[TMP_0_L1]], ptr [[RES]], align 4, !tbaa.struct 
[[TBAA_STRUCT4]]
+; CHECK-NEXT:    [[TMP_SROA_0_0_TMP_SROA_0_0_L1:%.*]] = load <2 x i31>, ptr 
[[TMP]], align 8, !tbaa.struct [[TBAA_STRUCT4:![0-9]+]]
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i31> 
[[TMP_SROA_0_0_TMP_SROA_0_0_L1]] to i62
+; CHECK-NEXT:    store i62 [[TMP0]], ptr [[RES]], align 4, !tbaa.struct 
[[TBAA_STRUCT4]]
 ; CHECK-NEXT:    ret void
 ;
 entry:

>From 9b9f310f7cd8296c7387dc1f5745e41f52fabe60 Mon Sep 17 00:00:00 2001
From: "Yaxun (Sam) Liu" <[email protected]>
Date: Tue, 24 Feb 2026 17:01:15 -0500
Subject: [PATCH 2/7] [SROA] Update test to expect vector store from
 struct-to-vector optimization

The struct-to-vector optimization in SROA converts memset of homogeneous
structs to vector stores. Update the OpenCL nullptr test to expect
<4 x i64> vector store for the 4-long struct initialization.
---
 clang/test/CodeGenOpenCL/nullptr.cl | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/clang/test/CodeGenOpenCL/nullptr.cl 
b/clang/test/CodeGenOpenCL/nullptr.cl
index 976e12c0bef47..f45df110ec243 100644
--- a/clang/test/CodeGenOpenCL/nullptr.cl
+++ b/clang/test/CodeGenOpenCL/nullptr.cl
@@ -597,10 +597,10 @@ typedef struct {
 } StructTy3;
 
 // CHECK-LABEL: test_memset_private
-// SPIR64: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) %ptr, i8 0, i64 32, i1 false)
+// SPIR64: store <4 x i64> zeroinitializer, ptr %ptr, align 8
 // SPIR64: [[GEP:%.*]] = getelementptr inbounds nuw i8, ptr %ptr, i64 32
 // SPIR64: store ptr addrspacecast (ptr addrspace(4) null to ptr), ptr 
[[GEP]], align 8
-// AMDGCN: call void @llvm.memset.p5.i64(ptr addrspace(5) noundef align 8 
{{.*}}, i8 0, i64 32, i1 false)
+// AMDGCN: store <4 x i64> zeroinitializer, ptr addrspace(5) %ptr, align 8
 // AMDGCN: [[GEP:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(5) %ptr, 
i32 32
 // AMDGCN: store ptr addrspace(5) addrspacecast (ptr null to ptr 
addrspace(5)), ptr addrspace(5) [[GEP]]
 // AMDGCN: [[GEP1:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(5) 
{{.*}}, i32 36

>From 64c54cd67bb0d5fa88d016ccb16bd7b2c2ff6712 Mon Sep 17 00:00:00 2001
From: "Yaxun (Sam) Liu" <[email protected]>
Date: Fri, 27 Feb 2026 11:21:30 -0500
Subject: [PATCH 3/7] [SROA] Restrict struct-to-vector conversion to avoid code
 size regression

Only apply tryCanonicalizeStructToVector when:
1. The partition type matches the alloca type (not a synthetic sub-struct
   from getTypePartition for a sub-partition), AND
2. The alloca is involved in phi/select patterns or has non-splittable
   typed uses that benefit from vectorization.

Without these guards, converting a sub-partition's struct type (e.g.,
{ i64, i64 } from [16,32) of { ptr, i64, i64, i64 }) to a vector type
like <2 x i64> propagates through memcpy splits to other allocas,
causing insertelement/extractelement overhead and altering inlining
decisions. This was observed as a +10 line regression in the delta-rs
benchmark from llvm-opt-benchmark.

Adds a regression test for the sub-partition case.
---
 llvm/lib/Transforms/Scalar/SROA.cpp           | 35 +++++++++++++--
 .../SROA/struct-to-vector-subpartition.ll     | 45 +++++++++++++++++++
 2 files changed, 76 insertions(+), 4 deletions(-)
 create mode 100644 llvm/test/Transforms/SROA/struct-to-vector-subpartition.ll

diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp 
b/llvm/lib/Transforms/Scalar/SROA.cpp
index b98541c0de899..0c04f5fe268a4 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -5252,10 +5252,37 @@ selectPartitionType(Partition &P, const DataLayout &DL, 
AllocaInst &AI,
         isIntegerWideningViable(P, LargestIntTy, DL))
       return {LargestIntTy, true, nullptr};
 
-    // Try homogeneous struct to vector canonicalization.
-    if (auto *STy = dyn_cast<StructType>(TypePartitionTy))
-      if (auto *VTy = tryCanonicalizeStructToVector(STy, DL))
-        return {VTy, false, nullptr};
+    // Try homogeneous struct to vector canonicalization, but only when:
+    // 1. The partition type matches the alloca type (not a synthetic
+    //    sub-struct from getTypePartition for a sub-partition), AND
+    // 2. The conversion would actually benefit from vectorization: either
+    //    the alloca is involved in phi/select patterns (enabling
+    //    speculation), or the partition has non-splittable typed uses.
+    //
+    // When all uses are splittable (memcpy/lifetime only) and there's no
+    // phi/select involvement, converting to vector just changes memcpy
+    // split types without enabling promotion, propagating vector types to
+    // other allocas and causing insertelement/extractelement overhead.
+    if (TypePartitionTy == AI.getAllocatedType()) {
+      bool HasNonSplittable =
+          any_of(P, [](const Slice &S) { return !S.isSplittable(); });
+      bool ShouldConvert = HasNonSplittable;
+      if (!ShouldConvert) {
+        ShouldConvert = any_of(AI.users(), [&AI](const User *U) {
+          if (isa<PHINode>(U) || isa<SelectInst>(U))
+            return true;
+          if (const auto *MI = dyn_cast<MemTransferInst>(U))
+            for (const Value *Op : {MI->getRawSource(), MI->getRawDest()})
+              if (Op != &AI && (isa<PHINode>(Op) || isa<SelectInst>(Op)))
+                return true;
+          return false;
+        });
+      }
+      if (ShouldConvert)
+        if (auto *STy = dyn_cast<StructType>(TypePartitionTy))
+          if (auto *VTy = tryCanonicalizeStructToVector(STy, DL))
+            return {VTy, false, nullptr};
+    }
 
     // Fallback to TypePartitionTy and we probably won't promote.
     return {TypePartitionTy, false, nullptr};
diff --git a/llvm/test/Transforms/SROA/struct-to-vector-subpartition.ll 
b/llvm/test/Transforms/SROA/struct-to-vector-subpartition.ll
new file mode 100644
index 0000000000000..52263415844bd
--- /dev/null
+++ b/llvm/test/Transforms/SROA/struct-to-vector-subpartition.ll
@@ -0,0 +1,45 @@
+; RUN: opt -passes=sroa -S %s | FileCheck %s
+
+target datalayout = 
"e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; When SROA splits { ptr, i64, i64, i64 } into [0,8), [8,16), [16,32),
+; the [16,32) partition type from getTypePartition is { i64, i64 }.
+; tryCanonicalizeStructToVector should NOT convert this to <2 x i64>
+; when the partition is in the non-promotable fallback path, because the
+; <2 x i64> type propagates through memcpy splits to other allocas and
+; causes insertelement overhead in downstream SROA iterations.
+;
+; This test verifies the [16,32) partition type stays as { i64, i64 }
+; (or equivalent scalar representation), not <2 x i64>.
+;
+; Regression test for PR #165159 delta-rs benchmark regression.
+
+; The alloca for the [16,32) partition should NOT use <2 x i64> type.
+; With the fix, SROA should keep { i64, i64 } and use memcpy.
+; CHECK-LABEL: define void @test_subpartition_type(
+; CHECK-NOT: <2 x i64>
+define void @test_subpartition_type(ptr %src, ptr %dst) {
+entry:
+  %a = alloca { ptr, i64, i64, i64 }, align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr %a)
+
+  ; Copy all 32 bytes from src into %a (splittable)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %a, ptr align 8 %src, i64 32, 
i1 false)
+
+  ; Load ptr at [0,8) -- forces partition boundary at 8
+  %p = load ptr, ptr %a, align 8
+
+  ; Load i64 at [8,16) -- forces partition boundary at 16
+  %gep.a.8 = getelementptr inbounds i8, ptr %a, i64 8
+  %v1 = load i64, ptr %gep.a.8, align 8
+
+  ; Only splittable memcpy uses touch [16,32), so SROA creates a single
+  ; [16,32) partition. getTypePartition returns { i64, i64 } for this.
+
+  ; Copy all 32 bytes from %a to dst (splittable)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %a, i64 32, 
i1 false)
+
+  call void @llvm.lifetime.end.p0(i64 32, ptr %a)
+  ret void
+}

>From f0f2b0dd4f87e2e5700004cd72a83c97d18df367 Mon Sep 17 00:00:00 2001
From: "Yaxun (Sam) Liu" <[email protected]>
Date: Fri, 27 Feb 2026 14:26:55 -0500
Subject: [PATCH 4/7] [SROA] Simplify struct-to-vector.ll test prefixes

Replace 6 redundant check prefixes (FLAT,NESTED,PADDED,NONHOMO,I1,PTR)
with a single CHECK prefix. All functions shared the same RUN line, so
only FLAT was used; the rest were autogenerated as unused stubs.

Regenerated with update_test_checks.py.
---
 llvm/test/Transforms/SROA/struct-to-vector.ll | 230 +++++++++---------
 1 file changed, 111 insertions(+), 119 deletions(-)

diff --git a/llvm/test/Transforms/SROA/struct-to-vector.ll 
b/llvm/test/Transforms/SROA/struct-to-vector.ll
index 7bcde59f14b30..a4f68c53952ab 100644
--- a/llvm/test/Transforms/SROA/struct-to-vector.ll
+++ b/llvm/test/Transforms/SROA/struct-to-vector.ll
@@ -1,20 +1,18 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 
UTC_ARGS: --version 6
-; RUN: opt -passes='sroa,gvn,instcombine,simplifycfg' -S %s \
-; RUN:   | FileCheck %s \
-; RUN:       --check-prefixes=FLAT,NESTED,PADDED,NONHOMO,I1,PTR
+; RUN: opt -passes='sroa,gvn,instcombine,simplifycfg' -S %s | FileCheck %s
 %struct.myint4 = type { i32, i32, i32, i32 }
 
 define dso_local void @foo_flat(ptr noundef %x, i64 %y.coerce0, i64 
%y.coerce1, i32 noundef %cond) {
-; FLAT-LABEL: define dso_local void @foo_flat(
-; FLAT-SAME: ptr noundef [[X:%.*]], i64 [[Y_COERCE0:%.*]], i64 
[[Y_COERCE1:%.*]], i32 noundef [[COND:%.*]]) {
-; FLAT-NEXT:  [[ENTRY:.*:]]
-; FLAT-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[COND]], 0
-; FLAT-NEXT:    [[Y_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> 
poison, i64 [[Y_COERCE0]], i64 0
-; FLAT-NEXT:    [[Y_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> 
[[Y_SROA_0_0_VEC_INSERT]], i64 [[Y_COERCE1]], i64 1
-; FLAT-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[Y_SROA_0_8_VEC_INSERT]] to 
<4 x i32>
-; FLAT-NEXT:    [[COND1_SROA_SPECULATED:%.*]] = select i1 [[TOBOOL_NOT]], <4 x 
i32> zeroinitializer, <4 x i32> [[TMP0]]
-; FLAT-NEXT:    store <4 x i32> [[COND1_SROA_SPECULATED]], ptr [[X]], align 16
-; FLAT-NEXT:    ret void
+; CHECK-LABEL: define dso_local void @foo_flat(
+; CHECK-SAME: ptr noundef [[X:%.*]], i64 [[Y_COERCE0:%.*]], i64 
[[Y_COERCE1:%.*]], i32 noundef [[COND:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[COND]], 0
+; CHECK-NEXT:    [[Y_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> 
poison, i64 [[Y_COERCE0]], i64 0
+; CHECK-NEXT:    [[Y_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> 
[[Y_SROA_0_0_VEC_INSERT]], i64 [[Y_COERCE1]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[Y_SROA_0_8_VEC_INSERT]] to 
<4 x i32>
+; CHECK-NEXT:    [[COND1_SROA_SPECULATED:%.*]] = select i1 [[TOBOOL_NOT]], <4 
x i32> zeroinitializer, <4 x i32> [[TMP0]]
+; CHECK-NEXT:    store <4 x i32> [[COND1_SROA_SPECULATED]], ptr [[X]], align 16
+; CHECK-NEXT:    ret void
 ;
 entry:
   %y = alloca %struct.myint4, align 16
@@ -58,16 +56,16 @@ cond.end:
 %struct.myint4_nested = type { %struct.myint4_base_n }
 
 define dso_local void @foo_nested(ptr noundef %x, i64 %y.coerce0, i64 
%y.coerce1, i32 noundef %cond) {
-; FLAT-LABEL: define dso_local void @foo_nested(
-; FLAT-SAME: ptr noundef [[X:%.*]], i64 [[Y_COERCE0:%.*]], i64 
[[Y_COERCE1:%.*]], i32 noundef [[COND:%.*]]) {
-; FLAT-NEXT:  [[ENTRY:.*:]]
-; FLAT-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[COND]], 0
-; FLAT-NEXT:    [[Y_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> 
poison, i64 [[Y_COERCE0]], i64 0
-; FLAT-NEXT:    [[Y_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> 
[[Y_SROA_0_0_VEC_INSERT]], i64 [[Y_COERCE1]], i64 1
-; FLAT-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[Y_SROA_0_8_VEC_INSERT]] to 
<4 x i32>
-; FLAT-NEXT:    [[COND1_SROA_SPECULATED:%.*]] = select i1 [[TOBOOL_NOT]], <4 x 
i32> zeroinitializer, <4 x i32> [[TMP0]]
-; FLAT-NEXT:    store <4 x i32> [[COND1_SROA_SPECULATED]], ptr [[X]], align 16
-; FLAT-NEXT:    ret void
+; CHECK-LABEL: define dso_local void @foo_nested(
+; CHECK-SAME: ptr noundef [[X:%.*]], i64 [[Y_COERCE0:%.*]], i64 
[[Y_COERCE1:%.*]], i32 noundef [[COND:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[COND]], 0
+; CHECK-NEXT:    [[Y_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> 
poison, i64 [[Y_COERCE0]], i64 0
+; CHECK-NEXT:    [[Y_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> 
[[Y_SROA_0_0_VEC_INSERT]], i64 [[Y_COERCE1]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[Y_SROA_0_8_VEC_INSERT]] to 
<4 x i32>
+; CHECK-NEXT:    [[COND1_SROA_SPECULATED:%.*]] = select i1 [[TOBOOL_NOT]], <4 
x i32> zeroinitializer, <4 x i32> [[TMP0]]
+; CHECK-NEXT:    store <4 x i32> [[COND1_SROA_SPECULATED]], ptr [[X]], align 16
+; CHECK-NEXT:    ret void
 ;
 entry:
   %y = alloca %struct.myint4_nested, align 16
@@ -110,31 +108,31 @@ cond.end:
 
 %struct.padded = type { i32, i8, i32, i8 }
 define dso_local void @foo_padded(ptr noundef %x, i32 %a0, i8 %a1,
-; FLAT-LABEL: define dso_local void @foo_padded(
-; FLAT-SAME: ptr noundef [[X:%.*]], i32 [[A0:%.*]], i8 [[A1:%.*]], i32 
[[A2:%.*]], i8 [[A3:%.*]], i32 noundef [[COND:%.*]]) {
-; FLAT-NEXT:  [[ENTRY:.*:]]
-; FLAT-NEXT:    [[TEMP:%.*]] = alloca [[STRUCT_PADDED:%.*]], align 4
-; FLAT-NEXT:    [[ZERO:%.*]] = alloca [[STRUCT_PADDED]], align 4
-; FLAT-NEXT:    [[DATA:%.*]] = alloca [[STRUCT_PADDED]], align 4
-; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
-; FLAT-NEXT:    store i32 [[A0]], ptr [[TEMP]], align 4
-; FLAT-NEXT:    [[Y_SROA_2_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 4
-; FLAT-NEXT:    store i8 [[A1]], ptr [[Y_SROA_2_0_TEMP_SROA_IDX]], align 4
-; FLAT-NEXT:    [[Y_SROA_31_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 8
-; FLAT-NEXT:    store i32 [[A2]], ptr [[Y_SROA_31_0_TEMP_SROA_IDX]], align 4
-; FLAT-NEXT:    [[Y_SROA_4_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 12
-; FLAT-NEXT:    store i8 [[A3]], ptr [[Y_SROA_4_0_TEMP_SROA_IDX]], align 4
-; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[ZERO]])
-; FLAT-NEXT:    call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 
dereferenceable(16) [[ZERO]], i8 0, i64 16, i1 false)
-; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[DATA]])
-; FLAT-NEXT:    [[TOBOOL_PAD_NOT:%.*]] = icmp eq i32 [[COND]], 0
-; FLAT-NEXT:    [[ZERO_TEMP:%.*]] = select i1 [[TOBOOL_PAD_NOT]], ptr 
[[ZERO]], ptr [[TEMP]]
-; FLAT-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 
dereferenceable(16) [[DATA]], ptr noundef nonnull align 4 dereferenceable(16) 
[[ZERO_TEMP]], i64 16, i1 false)
-; FLAT-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 
dereferenceable(16) [[X]], ptr noundef nonnull align 4 dereferenceable(16) 
[[DATA]], i64 16, i1 false)
-; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[DATA]])
-; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[ZERO]])
-; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
-; FLAT-NEXT:    ret void
+; CHECK-LABEL: define dso_local void @foo_padded(
+; CHECK-SAME: ptr noundef [[X:%.*]], i32 [[A0:%.*]], i8 [[A1:%.*]], i32 
[[A2:%.*]], i8 [[A3:%.*]], i32 noundef [[COND:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TEMP:%.*]] = alloca [[STRUCT_PADDED:%.*]], align 4
+; CHECK-NEXT:    [[ZERO:%.*]] = alloca [[STRUCT_PADDED]], align 4
+; CHECK-NEXT:    [[DATA:%.*]] = alloca [[STRUCT_PADDED]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
+; CHECK-NEXT:    store i32 [[A0]], ptr [[TEMP]], align 4
+; CHECK-NEXT:    [[Y_SROA_2_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 4
+; CHECK-NEXT:    store i8 [[A1]], ptr [[Y_SROA_2_0_TEMP_SROA_IDX]], align 4
+; CHECK-NEXT:    [[Y_SROA_31_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds 
nuw i8, ptr [[TEMP]], i64 8
+; CHECK-NEXT:    store i32 [[A2]], ptr [[Y_SROA_31_0_TEMP_SROA_IDX]], align 4
+; CHECK-NEXT:    [[Y_SROA_4_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 12
+; CHECK-NEXT:    store i8 [[A3]], ptr [[Y_SROA_4_0_TEMP_SROA_IDX]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[ZERO]])
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 
dereferenceable(16) [[ZERO]], i8 0, i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[DATA]])
+; CHECK-NEXT:    [[TOBOOL_PAD_NOT:%.*]] = icmp eq i32 [[COND]], 0
+; CHECK-NEXT:    [[ZERO_TEMP:%.*]] = select i1 [[TOBOOL_PAD_NOT]], ptr 
[[ZERO]], ptr [[TEMP]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 
dereferenceable(16) [[DATA]], ptr noundef nonnull align 4 dereferenceable(16) 
[[ZERO_TEMP]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 
dereferenceable(16) [[X]], ptr noundef nonnull align 4 dereferenceable(16) 
[[DATA]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[DATA]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[ZERO]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
+; CHECK-NEXT:    ret void
 ;
   i32 %a2, i8 %a3,
   i32 noundef %cond) {
@@ -186,31 +184,31 @@ cond.end.pad:
 
 %struct.nonhomo = type { i32, i64, i32, i64 }
 define dso_local void @foo_nonhomo(ptr noundef %x, i32 %a0, i64 %a1,
-; FLAT-LABEL: define dso_local void @foo_nonhomo(
-; FLAT-SAME: ptr noundef [[X:%.*]], i32 [[A0:%.*]], i64 [[A1:%.*]], i32 
[[A2:%.*]], i64 [[A3:%.*]], i32 noundef [[COND:%.*]]) {
-; FLAT-NEXT:  [[ENTRY:.*:]]
-; FLAT-NEXT:    [[TEMP:%.*]] = alloca [[STRUCT_NONHOMO:%.*]], align 8
-; FLAT-NEXT:    [[ZERO:%.*]] = alloca [[STRUCT_NONHOMO]], align 8
-; FLAT-NEXT:    [[DATA:%.*]] = alloca [[STRUCT_NONHOMO]], align 8
-; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
-; FLAT-NEXT:    store i32 [[A0]], ptr [[TEMP]], align 8
-; FLAT-NEXT:    [[Y_SROA_2_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 4
-; FLAT-NEXT:    store i64 [[A1]], ptr [[Y_SROA_2_0_TEMP_SROA_IDX]], align 4
-; FLAT-NEXT:    [[Y_SROA_3_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 12
-; FLAT-NEXT:    store i32 [[A2]], ptr [[Y_SROA_3_0_TEMP_SROA_IDX]], align 4
-; FLAT-NEXT:    [[Y_SROA_4_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 16
-; FLAT-NEXT:    store i64 [[A3]], ptr [[Y_SROA_4_0_TEMP_SROA_IDX]], align 8
-; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[ZERO]])
-; FLAT-NEXT:    call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[ZERO]], i8 0, i64 32, i1 false)
-; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[DATA]])
-; FLAT-NEXT:    [[TOBOOL_NH_NOT:%.*]] = icmp eq i32 [[COND]], 0
-; FLAT-NEXT:    [[ZERO_TEMP:%.*]] = select i1 [[TOBOOL_NH_NOT]], ptr [[ZERO]], 
ptr [[TEMP]]
-; FLAT-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[DATA]], ptr noundef nonnull align 8 dereferenceable(32) 
[[ZERO_TEMP]], i64 32, i1 false)
-; FLAT-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[X]], ptr noundef nonnull align 8 dereferenceable(32) 
[[DATA]], i64 32, i1 false)
-; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[DATA]])
-; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[ZERO]])
-; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
-; FLAT-NEXT:    ret void
+; CHECK-LABEL: define dso_local void @foo_nonhomo(
+; CHECK-SAME: ptr noundef [[X:%.*]], i32 [[A0:%.*]], i64 [[A1:%.*]], i32 
[[A2:%.*]], i64 [[A3:%.*]], i32 noundef [[COND:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TEMP:%.*]] = alloca [[STRUCT_NONHOMO:%.*]], align 8
+; CHECK-NEXT:    [[ZERO:%.*]] = alloca [[STRUCT_NONHOMO]], align 8
+; CHECK-NEXT:    [[DATA:%.*]] = alloca [[STRUCT_NONHOMO]], align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
+; CHECK-NEXT:    store i32 [[A0]], ptr [[TEMP]], align 8
+; CHECK-NEXT:    [[Y_SROA_2_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 4
+; CHECK-NEXT:    store i64 [[A1]], ptr [[Y_SROA_2_0_TEMP_SROA_IDX]], align 4
+; CHECK-NEXT:    [[Y_SROA_3_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 12
+; CHECK-NEXT:    store i32 [[A2]], ptr [[Y_SROA_3_0_TEMP_SROA_IDX]], align 4
+; CHECK-NEXT:    [[Y_SROA_4_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 16
+; CHECK-NEXT:    store i64 [[A3]], ptr [[Y_SROA_4_0_TEMP_SROA_IDX]], align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[ZERO]])
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[ZERO]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[DATA]])
+; CHECK-NEXT:    [[TOBOOL_NH_NOT:%.*]] = icmp eq i32 [[COND]], 0
+; CHECK-NEXT:    [[ZERO_TEMP:%.*]] = select i1 [[TOBOOL_NH_NOT]], ptr 
[[ZERO]], ptr [[TEMP]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[DATA]], ptr noundef nonnull align 8 dereferenceable(32) 
[[ZERO_TEMP]], i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[X]], ptr noundef nonnull align 8 dereferenceable(32) 
[[DATA]], i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[DATA]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[ZERO]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
+; CHECK-NEXT:    ret void
 ;
   i32 %a2, i64 %a3,
   i32 noundef %cond) {
@@ -262,21 +260,21 @@ cond.end.nh:
 
 %struct.i1x4 = type { i1, i1, i1, i1 }
 define dso_local void @foo_i1(ptr noundef %x, i64 %dummy0, i64 %dummy1,
-; FLAT-LABEL: define dso_local void @foo_i1(
-; FLAT-SAME: ptr noundef [[X:%.*]], i64 [[DUMMY0:%.*]], i64 [[DUMMY1:%.*]], 
i32 noundef [[COND:%.*]]) {
-; FLAT-NEXT:  [[ENTRY:.*:]]
-; FLAT-NEXT:    [[TEMP:%.*]] = alloca [[STRUCT_I1X4:%.*]], align 1
-; FLAT-NEXT:    [[ZERO:%.*]] = alloca [[STRUCT_I1X4]], align 1
-; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
-; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[ZERO]])
-; FLAT-NEXT:    store i32 0, ptr [[ZERO]], align 1
-; FLAT-NEXT:    [[TOBOOL_I1_NOT:%.*]] = icmp eq i32 [[COND]], 0
-; FLAT-NEXT:    [[ZERO_TEMP:%.*]] = select i1 [[TOBOOL_I1_NOT]], ptr [[ZERO]], 
ptr [[TEMP]]
-; FLAT-NEXT:    [[TMP0:%.*]] = load i32, ptr [[ZERO_TEMP]], align 1
-; FLAT-NEXT:    store i32 [[TMP0]], ptr [[X]], align 1
-; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[ZERO]])
-; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
-; FLAT-NEXT:    ret void
+; CHECK-LABEL: define dso_local void @foo_i1(
+; CHECK-SAME: ptr noundef [[X:%.*]], i64 [[DUMMY0:%.*]], i64 [[DUMMY1:%.*]], 
i32 noundef [[COND:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TEMP:%.*]] = alloca [[STRUCT_I1X4:%.*]], align 1
+; CHECK-NEXT:    [[ZERO:%.*]] = alloca [[STRUCT_I1X4]], align 1
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[ZERO]])
+; CHECK-NEXT:    store i32 0, ptr [[ZERO]], align 1
+; CHECK-NEXT:    [[TOBOOL_I1_NOT:%.*]] = icmp eq i32 [[COND]], 0
+; CHECK-NEXT:    [[ZERO_TEMP:%.*]] = select i1 [[TOBOOL_I1_NOT]], ptr 
[[ZERO]], ptr [[TEMP]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[ZERO_TEMP]], align 1
+; CHECK-NEXT:    store i32 [[TMP0]], ptr [[X]], align 1
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[ZERO]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
+; CHECK-NEXT:    ret void
 ;
   i32 noundef %cond) {
 entry:
@@ -319,31 +317,31 @@ cond.end.i1:
 
 %struct.ptr4 = type { ptr, ptr, ptr, ptr }
 define dso_local void @foo_ptr(ptr noundef %x, ptr %p0, ptr %p1,
-; FLAT-LABEL: define dso_local void @foo_ptr(
-; FLAT-SAME: ptr noundef [[X:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], ptr 
[[P2:%.*]], ptr [[P3:%.*]], i32 noundef [[COND:%.*]]) {
-; FLAT-NEXT:  [[ENTRY:.*:]]
-; FLAT-NEXT:    [[TEMP:%.*]] = alloca [[STRUCT_PTR4:%.*]], align 8
-; FLAT-NEXT:    [[ZERO:%.*]] = alloca [[STRUCT_PTR4]], align 8
-; FLAT-NEXT:    [[DATA:%.*]] = alloca [[STRUCT_PTR4]], align 8
-; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
-; FLAT-NEXT:    store ptr [[P0]], ptr [[TEMP]], align 8
-; FLAT-NEXT:    [[Y_SROA_2_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 8
-; FLAT-NEXT:    store ptr [[P1]], ptr [[Y_SROA_2_0_TEMP_SROA_IDX]], align 8
-; FLAT-NEXT:    [[Y_SROA_3_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 16
-; FLAT-NEXT:    store ptr [[P2]], ptr [[Y_SROA_3_0_TEMP_SROA_IDX]], align 8
-; FLAT-NEXT:    [[Y_SROA_4_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 24
-; FLAT-NEXT:    store ptr [[P3]], ptr [[Y_SROA_4_0_TEMP_SROA_IDX]], align 8
-; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[ZERO]])
-; FLAT-NEXT:    call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[ZERO]], i8 0, i64 32, i1 false)
-; FLAT-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[DATA]])
-; FLAT-NEXT:    [[TOBOOL_PTR_NOT:%.*]] = icmp eq i32 [[COND]], 0
-; FLAT-NEXT:    [[ZERO_TEMP:%.*]] = select i1 [[TOBOOL_PTR_NOT]], ptr 
[[ZERO]], ptr [[TEMP]]
-; FLAT-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[DATA]], ptr noundef nonnull align 8 dereferenceable(32) 
[[ZERO_TEMP]], i64 32, i1 false)
-; FLAT-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[X]], ptr noundef nonnull align 8 dereferenceable(32) 
[[DATA]], i64 32, i1 false)
-; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[DATA]])
-; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[ZERO]])
-; FLAT-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
-; FLAT-NEXT:    ret void
+; CHECK-LABEL: define dso_local void @foo_ptr(
+; CHECK-SAME: ptr noundef [[X:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], ptr 
[[P2:%.*]], ptr [[P3:%.*]], i32 noundef [[COND:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TEMP:%.*]] = alloca [[STRUCT_PTR4:%.*]], align 8
+; CHECK-NEXT:    [[ZERO:%.*]] = alloca [[STRUCT_PTR4]], align 8
+; CHECK-NEXT:    [[DATA:%.*]] = alloca [[STRUCT_PTR4]], align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
+; CHECK-NEXT:    store ptr [[P0]], ptr [[TEMP]], align 8
+; CHECK-NEXT:    [[Y_SROA_2_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 8
+; CHECK-NEXT:    store ptr [[P1]], ptr [[Y_SROA_2_0_TEMP_SROA_IDX]], align 8
+; CHECK-NEXT:    [[Y_SROA_3_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 16
+; CHECK-NEXT:    store ptr [[P2]], ptr [[Y_SROA_3_0_TEMP_SROA_IDX]], align 8
+; CHECK-NEXT:    [[Y_SROA_4_0_TEMP_SROA_IDX:%.*]] = getelementptr inbounds nuw 
i8, ptr [[TEMP]], i64 24
+; CHECK-NEXT:    store ptr [[P3]], ptr [[Y_SROA_4_0_TEMP_SROA_IDX]], align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[ZERO]])
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[ZERO]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr nonnull [[DATA]])
+; CHECK-NEXT:    [[TOBOOL_PTR_NOT:%.*]] = icmp eq i32 [[COND]], 0
+; CHECK-NEXT:    [[ZERO_TEMP:%.*]] = select i1 [[TOBOOL_PTR_NOT]], ptr 
[[ZERO]], ptr [[TEMP]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[DATA]], ptr noundef nonnull align 8 dereferenceable(32) 
[[ZERO_TEMP]], i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) [[X]], ptr noundef nonnull align 8 dereferenceable(32) 
[[DATA]], i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[DATA]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[ZERO]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
+; CHECK-NEXT:    ret void
 ;
   ptr %p2, ptr %p3,
   i32 noundef %cond) {
@@ -392,9 +390,3 @@ cond.end.ptr:
   call void @llvm.lifetime.end.p0(ptr %temp)
   ret void
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add 
tests below this line:
-; I1: {{.*}}
-; NESTED: {{.*}}
-; NONHOMO: {{.*}}
-; PADDED: {{.*}}
-; PTR: {{.*}}

>From 37bad35d7d43ae83327d201a069a3306e94e8626 Mon Sep 17 00:00:00 2001
From: "Yaxun (Sam) Liu" <[email protected]>
Date: Fri, 27 Feb 2026 18:25:53 -0500
Subject: [PATCH 5/7] [SROA] Remove alloca type dependency from
 struct-to-vector guard

Replace the TypePartitionTy == AI.getAllocatedType() check with purely
access-pattern-based guards. The conversion is now gated only on whether
the partition has non-splittable typed uses or the alloca is involved in
phi/select patterns.

This avoids introducing a new dependency on the alloca type, which
conflicts with ongoing work to remove such dependencies from SROA.
---
 llvm/lib/Transforms/Scalar/SROA.cpp | 12 +++++-------
 1 file changed, 5 insertions(+), 7 deletions(-)

diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp 
b/llvm/lib/Transforms/Scalar/SROA.cpp
index 0c04f5fe268a4..9855fbb6b915c 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -5252,18 +5252,16 @@ selectPartitionType(Partition &P, const DataLayout &DL, 
AllocaInst &AI,
         isIntegerWideningViable(P, LargestIntTy, DL))
       return {LargestIntTy, true, nullptr};
 
-    // Try homogeneous struct to vector canonicalization, but only when:
-    // 1. The partition type matches the alloca type (not a synthetic
-    //    sub-struct from getTypePartition for a sub-partition), AND
-    // 2. The conversion would actually benefit from vectorization: either
-    //    the alloca is involved in phi/select patterns (enabling
-    //    speculation), or the partition has non-splittable typed uses.
+    // Try homogeneous struct to vector canonicalization, but only when
+    // the conversion would actually benefit from vectorization: either
+    // the partition has non-splittable typed uses, or the alloca is
+    // involved in phi/select patterns (enabling speculation).
     //
     // When all uses are splittable (memcpy/lifetime only) and there's no
     // phi/select involvement, converting to vector just changes memcpy
     // split types without enabling promotion, propagating vector types to
     // other allocas and causing insertelement/extractelement overhead.
-    if (TypePartitionTy == AI.getAllocatedType()) {
+    {
       bool HasNonSplittable =
           any_of(P, [](const Slice &S) { return !S.isSplittable(); });
       bool ShouldConvert = HasNonSplittable;

>From 980d7bbef2a5b9ad54881f4f25008065f36d78b4 Mon Sep 17 00:00:00 2001
From: "Yaxun (Sam) Liu" <[email protected]>
Date: Fri, 27 Feb 2026 18:31:41 -0500
Subject: [PATCH 6/7] [SROA] Update test expectations after struct-to-vector
 guard

Revert test CHECK lines for cases where struct-to-vector conversion no
longer fires due to the access-pattern guards. These tests involve
sub-partition struct types or memcpy-only allocas where the conversion
was blocked.
---
 llvm/test/CodeGen/NVPTX/lower-byval-args.ll   | 77 ++++++++++++-------
 .../assignment-tracking/sroa/user-memcpy.ll   |  8 +-
 .../DebugInfo/Generic/sroa-alloca-offset.ll   |  8 +-
 llvm/test/DebugInfo/X86/sroasplit-4.ll        | 11 ++-
 4 files changed, 64 insertions(+), 40 deletions(-)

diff --git a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll 
b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
index 4f018e15571a3..827097e90e7d3 100644
--- a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
+++ b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
@@ -455,39 +455,64 @@ define dso_local ptx_kernel void @memcpy_to_param(ptr 
nocapture noundef readonly
 ; PTX-NEXT:    .local .align 8 .b8 __local_depot9[8];
 ; PTX-NEXT:    .reg .b64 %SP;
 ; PTX-NEXT:    .reg .b64 %SPL;
-; PTX-NEXT:    .reg .b32 %r<23>;
-; PTX-NEXT:    .reg .b64 %rd<4>;
+; PTX-NEXT:    .reg .b32 %r<3>;
+; PTX-NEXT:    .reg .b64 %rd<47>;
 ; PTX-EMPTY:
 ; PTX-NEXT:  // %bb.0: // %entry
 ; PTX-NEXT:    mov.b64 %SPL, __local_depot9;
 ; PTX-NEXT:    cvta.local.u64 %SP, %SPL;
 ; PTX-NEXT:    ld.param.b64 %rd1, [memcpy_to_param_param_0];
-; PTX-NEXT:    cvta.to.global.u64 %rd2, %rd1;
+; PTX-NEXT:    add.u64 %rd2, %SPL, 0;
 ; PTX-NEXT:    ld.param.b32 %r1, [memcpy_to_param_param_1+4];
+; PTX-NEXT:    st.local.b32 [%rd2+4], %r1;
 ; PTX-NEXT:    ld.param.b32 %r2, [memcpy_to_param_param_1];
-; PTX-NEXT:    st.v2.b32 [%SP], {%r2, %r1};
-; PTX-NEXT:    ld.volatile.global.b8 %r3, [%rd2+4];
-; PTX-NEXT:    ld.volatile.global.b8 %r4, [%rd2+5];
-; PTX-NEXT:    shl.b32 %r5, %r4, 8;
-; PTX-NEXT:    or.b32 %r6, %r5, %r3;
-; PTX-NEXT:    ld.volatile.global.b8 %r7, [%rd2+6];
-; PTX-NEXT:    shl.b32 %r8, %r7, 16;
-; PTX-NEXT:    ld.volatile.global.b8 %r9, [%rd2+7];
-; PTX-NEXT:    shl.b32 %r10, %r9, 24;
-; PTX-NEXT:    or.b32 %r11, %r10, %r8;
-; PTX-NEXT:    or.b32 %r12, %r11, %r6;
-; PTX-NEXT:    ld.volatile.global.b8 %r13, [%rd2];
-; PTX-NEXT:    ld.volatile.global.b8 %r14, [%rd2+1];
-; PTX-NEXT:    shl.b32 %r15, %r14, 8;
-; PTX-NEXT:    or.b32 %r16, %r15, %r13;
-; PTX-NEXT:    ld.volatile.global.b8 %r17, [%rd2+2];
-; PTX-NEXT:    shl.b32 %r18, %r17, 16;
-; PTX-NEXT:    ld.volatile.global.b8 %r19, [%rd2+3];
-; PTX-NEXT:    shl.b32 %r20, %r19, 24;
-; PTX-NEXT:    or.b32 %r21, %r20, %r18;
-; PTX-NEXT:    or.b32 %r22, %r21, %r16;
-; PTX-NEXT:    add.u64 %rd3, %SPL, 0;
-; PTX-NEXT:    st.local.v2.b32 [%rd3], {%r22, %r12};
+; PTX-NEXT:    st.local.b32 [%rd2], %r2;
+; PTX-NEXT:    ld.volatile.b8 %rd3, [%rd1];
+; PTX-NEXT:    ld.volatile.b8 %rd4, [%rd1+1];
+; PTX-NEXT:    shl.b64 %rd5, %rd4, 8;
+; PTX-NEXT:    or.b64 %rd6, %rd5, %rd3;
+; PTX-NEXT:    ld.volatile.b8 %rd7, [%rd1+2];
+; PTX-NEXT:    shl.b64 %rd8, %rd7, 16;
+; PTX-NEXT:    ld.volatile.b8 %rd9, [%rd1+3];
+; PTX-NEXT:    shl.b64 %rd10, %rd9, 24;
+; PTX-NEXT:    or.b64 %rd11, %rd10, %rd8;
+; PTX-NEXT:    or.b64 %rd12, %rd11, %rd6;
+; PTX-NEXT:    ld.volatile.b8 %rd13, [%rd1+4];
+; PTX-NEXT:    ld.volatile.b8 %rd14, [%rd1+5];
+; PTX-NEXT:    shl.b64 %rd15, %rd14, 8;
+; PTX-NEXT:    or.b64 %rd16, %rd15, %rd13;
+; PTX-NEXT:    ld.volatile.b8 %rd17, [%rd1+6];
+; PTX-NEXT:    shl.b64 %rd18, %rd17, 16;
+; PTX-NEXT:    ld.volatile.b8 %rd19, [%rd1+7];
+; PTX-NEXT:    shl.b64 %rd20, %rd19, 24;
+; PTX-NEXT:    or.b64 %rd21, %rd20, %rd18;
+; PTX-NEXT:    or.b64 %rd22, %rd21, %rd16;
+; PTX-NEXT:    shl.b64 %rd23, %rd22, 32;
+; PTX-NEXT:    or.b64 %rd24, %rd23, %rd12;
+; PTX-NEXT:    st.volatile.b64 [%SP], %rd24;
+; PTX-NEXT:    ld.volatile.b8 %rd25, [%rd1+8];
+; PTX-NEXT:    ld.volatile.b8 %rd26, [%rd1+9];
+; PTX-NEXT:    shl.b64 %rd27, %rd26, 8;
+; PTX-NEXT:    or.b64 %rd28, %rd27, %rd25;
+; PTX-NEXT:    ld.volatile.b8 %rd29, [%rd1+10];
+; PTX-NEXT:    shl.b64 %rd30, %rd29, 16;
+; PTX-NEXT:    ld.volatile.b8 %rd31, [%rd1+11];
+; PTX-NEXT:    shl.b64 %rd32, %rd31, 24;
+; PTX-NEXT:    or.b64 %rd33, %rd32, %rd30;
+; PTX-NEXT:    or.b64 %rd34, %rd33, %rd28;
+; PTX-NEXT:    ld.volatile.b8 %rd35, [%rd1+12];
+; PTX-NEXT:    ld.volatile.b8 %rd36, [%rd1+13];
+; PTX-NEXT:    shl.b64 %rd37, %rd36, 8;
+; PTX-NEXT:    or.b64 %rd38, %rd37, %rd35;
+; PTX-NEXT:    ld.volatile.b8 %rd39, [%rd1+14];
+; PTX-NEXT:    shl.b64 %rd40, %rd39, 16;
+; PTX-NEXT:    ld.volatile.b8 %rd41, [%rd1+15];
+; PTX-NEXT:    shl.b64 %rd42, %rd41, 24;
+; PTX-NEXT:    or.b64 %rd43, %rd42, %rd40;
+; PTX-NEXT:    or.b64 %rd44, %rd43, %rd38;
+; PTX-NEXT:    shl.b64 %rd45, %rd44, 32;
+; PTX-NEXT:    or.b64 %rd46, %rd45, %rd34;
+; PTX-NEXT:    st.volatile.b64 [%SP+8], %rd46;
 ; PTX-NEXT:    ret;
 entry:
   tail call void @llvm.memcpy.p0.p0.i64(ptr %s, ptr %in, i64 16, i1 true)
diff --git 
a/llvm/test/DebugInfo/Generic/assignment-tracking/sroa/user-memcpy.ll 
b/llvm/test/DebugInfo/Generic/assignment-tracking/sroa/user-memcpy.ll
index eff2713c54468..ded78f4ff83f4 100644
--- a/llvm/test/DebugInfo/Generic/assignment-tracking/sroa/user-memcpy.ll
+++ b/llvm/test/DebugInfo/Generic/assignment-tracking/sroa/user-memcpy.ll
@@ -21,8 +21,8 @@
 ;; Allocas have been promoted - the linked dbg.assigns have been removed.
 
 ;; | V3i point = {0, 0, 0};
-;; First two fields vectorized to <2 x i64>.
-; CHECK-NEXT: #dbg_value(<2 x i64> zeroinitializer, ![[point:[0-9]+]], 
!DIExpression(DW_OP_LLVM_fragment, 0, 128),
+; CHECK-NEXT: #dbg_value(i64 0, ![[point:[0-9]+]], 
!DIExpression(DW_OP_LLVM_fragment, 0, 64),
+; CHECK-NEXT: #dbg_value(i64 0, ![[point]], !DIExpression(DW_OP_LLVM_fragment, 
64, 64),
 
 ;; point.z = 5000;
 ; CHECK-NEXT: #dbg_value(i64 5000, ![[point]], 
!DIExpression(DW_OP_LLVM_fragment, 128, 64),
@@ -40,8 +40,8 @@
 ; CHECK-NEXT: #dbg_value(i64 %other.sroa.3.0.copyload, ![[other]], 
!DIExpression(DW_OP_LLVM_fragment, 128, 64),
 
 ;; | std::memcpy(&point.y, &other.x, sizeof(long) * 2);
-;;   The first partition is <2 x i64>, insertelement updates point.y:
-; CHECK-NEXT: %point.sroa.0.8.vec.insert = insertelement <2 x i64> 
zeroinitializer, i64 %other.sroa.0.0.copyload, i32 1
+;;   other is now 3 scalars:
+;;     point.y = other.x
 ; CHECK-NEXT: #dbg_value(i64 %other.sroa.0.0.copyload, ![[point]], 
!DIExpression(DW_OP_LLVM_fragment, 64, 64),
 ;;
 ;;     point.z = other.y
diff --git a/llvm/test/DebugInfo/Generic/sroa-alloca-offset.ll 
b/llvm/test/DebugInfo/Generic/sroa-alloca-offset.ll
index 8f5d2ed7515ed..6718711f83e04 100644
--- a/llvm/test/DebugInfo/Generic/sroa-alloca-offset.ll
+++ b/llvm/test/DebugInfo/Generic/sroa-alloca-offset.ll
@@ -1,5 +1,5 @@
 ; RUN: opt %s -passes=sroa -S | FileCheck %s --check-prefixes=COMMON,OLD
-; RUN: opt %s -passes='declare-to-assign,sroa' -S | FileCheck %s 
--check-prefixes=COMMON,NEW
+; RUN: opt %s -passes=declare-to-assign,sroa -S | FileCheck %s 
--check-prefixes=COMMON,NEW
 
 ;; C++17 source:
 ;; struct two { int a, b; } gt;
@@ -140,7 +140,9 @@ entry:
 ;; 16 bit variable f (!62): value vgf (lower bits)
 ;; 16 bit variable g (!63): value vgf (upper bits)
 ;;
-;; 16 bit variable h (!64): promoted to <2 x i32> vector
+;; 16 bit variable h (!64): deref dead_64_128
+; COMMON-NEXT: %[[dead_64_128:.*]] = alloca %struct.two
+; COMMON-NEXT: #dbg_declare(ptr %[[dead_64_128]], ![[h:[0-9]+]], 
!DIExpression(),
 ; COMMON-NEXT: %[[ve:.*]] = load i32, ptr @gf
 ;; FIXME: mem2reg bug - offset is incorrect - see comment above.
 ; COMMON-NEXT: #dbg_value(i32 %[[ve]], ![[e:[0-9]+]], 
!DIExpression(DW_OP_plus_uconst, 2),
@@ -148,8 +150,6 @@ entry:
 ; COMMON-NEXT: #dbg_value(i32 %[[vfg]], ![[f:[0-9]+]], !DIExpression(),
 ;; FIXME: mem2reg bug - offset is incorrect - see comment above.
 ; COMMON-NEXT: #dbg_value(i32 %[[vfg]], ![[g:[0-9]+]], 
!DIExpression(DW_OP_plus_uconst, 2),
-; COMMON-NEXT: %[[vh:.*]] = load <2 x i32>, ptr getelementptr inbounds (i8, 
ptr @gf, i64 8)
-; COMMON-NEXT: #dbg_value(<2 x i32> %[[vh]], ![[h:[0-9]+]], !DIExpression(),
 define dso_local noundef i32 @_Z4fun3v() #0 !dbg !55 {
 entry:
   %0 = alloca %struct.four, align 4
diff --git a/llvm/test/DebugInfo/X86/sroasplit-4.ll 
b/llvm/test/DebugInfo/X86/sroasplit-4.ll
index a44d1a3d913c7..d5ce348e9896e 100644
--- a/llvm/test/DebugInfo/X86/sroasplit-4.ll
+++ b/llvm/test/DebugInfo/X86/sroasplit-4.ll
@@ -1,13 +1,12 @@
-; RUN: opt -passes=sroa < %s -S -o - | FileCheck %s
+; RUN: opt -passes='sroa' < %s -S -o - | FileCheck %s
 ;
 ; Test that recursively splitting an alloca updates the debug info correctly.
 ; CHECK: %[[T:.*]] = load i64, ptr @t, align 8
-; CHECK: %[[VI:.*]] = insertelement <2 x i64> {{undef|poison}}, i64 %[[T]], 
i32 0
-; CHECK: #dbg_value(<2 x i64> %[[VI]], ![[Y:.*]], !DIExpression(),
+; CHECK: #dbg_value(i64 %[[T]], ![[Y:.*]], !DIExpression(DW_OP_LLVM_fragment, 
0, 64),
 ; CHECK: %[[T1:.*]] = load i64, ptr @t, align 8
-; CHECK: %[[VI2:.*]] = insertelement <2 x i64> %[[VI]], i64 %[[T1]], i32 1
-; CHECK: #dbg_value(<2 x i64> %[[VI2]], ![[Y]], !DIExpression(),
-; CHECK: #dbg_value(<2 x i64> %[[VI2]], ![[R:.*]], 
!DIExpression(DW_OP_LLVM_fragment, 192, 128),
+; CHECK: #dbg_value(i64 %[[T1]], ![[Y]], !DIExpression(DW_OP_LLVM_fragment, 
64, 64),
+; CHECK: #dbg_value(i64 %[[T]], ![[R:.*]], !DIExpression(DW_OP_LLVM_fragment, 
192, 64),
+; CHECK: #dbg_value(i64 %[[T1]], ![[R]], !DIExpression(DW_OP_LLVM_fragment, 
256, 64),
 ;
 ; struct p {
 ;   __SIZE_TYPE__ s;

>From 9e413cd0e753b7ceb0204e1fd14ee6a675c60285 Mon Sep 17 00:00:00 2001
From: "Yaxun (Sam) Liu" <[email protected]>
Date: Sat, 28 Feb 2026 09:46:59 -0500
Subject: [PATCH 7/7] [SROA] Revert nullptr.cl test expectations after
 struct-to-vector guard

The struct-to-vector guard now blocks vector conversion for this case,
so revert the test to expect memset instead of vector store.
---
 clang/test/CodeGenOpenCL/nullptr.cl | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/clang/test/CodeGenOpenCL/nullptr.cl 
b/clang/test/CodeGenOpenCL/nullptr.cl
index f45df110ec243..976e12c0bef47 100644
--- a/clang/test/CodeGenOpenCL/nullptr.cl
+++ b/clang/test/CodeGenOpenCL/nullptr.cl
@@ -597,10 +597,10 @@ typedef struct {
 } StructTy3;
 
 // CHECK-LABEL: test_memset_private
-// SPIR64: store <4 x i64> zeroinitializer, ptr %ptr, align 8
+// SPIR64: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 
dereferenceable(32) %ptr, i8 0, i64 32, i1 false)
 // SPIR64: [[GEP:%.*]] = getelementptr inbounds nuw i8, ptr %ptr, i64 32
 // SPIR64: store ptr addrspacecast (ptr addrspace(4) null to ptr), ptr 
[[GEP]], align 8
-// AMDGCN: store <4 x i64> zeroinitializer, ptr addrspace(5) %ptr, align 8
+// AMDGCN: call void @llvm.memset.p5.i64(ptr addrspace(5) noundef align 8 
{{.*}}, i8 0, i64 32, i1 false)
 // AMDGCN: [[GEP:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(5) %ptr, 
i32 32
 // AMDGCN: store ptr addrspace(5) addrspacecast (ptr null to ptr 
addrspace(5)), ptr addrspace(5) [[GEP]]
 // AMDGCN: [[GEP1:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(5) 
{{.*}}, i32 36

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to