https://github.com/c-rhodes updated 
https://github.com/llvm/llvm-project/pull/179117

>From 0e8db607e3cbc62b210fc4869b09457337fdee9c Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <[email protected]>
Date: Sun, 1 Feb 2026 10:57:51 +0000
Subject: [PATCH 1/2] [X86] Add test coverage for #179057 (#179092)

Incorrect folding of fixupimm scalar intrinsics passthrough when the
mask is known zero

(cherry picked from commit 618d71dc98df760d0c724cff6fa69b780e8c0372)
---
 llvm/test/CodeGen/X86/avx512-intrinsics.ll | 36 ++++++++++++++++++++++
 1 file changed, 36 insertions(+)

diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics.ll 
b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
index f9b5994a18d36..21bac9e7bb04d 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
@@ -5518,6 +5518,14 @@ define <4 x 
float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x fl
   ret <4 x float> %res4
 }
 
+define <4 x 
float>@test_int_x86_avx512_mask_fixupimm_ss_passthrough_zero_mask(<4 x float> 
%x0, <4 x float> %x1, <4 x i32> %x2) {
+; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ss_passthrough_zero_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
+  %res = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, 
<4 x float> %x1, <4 x i32> %x2, i32 5, i8 -2, i32 4)
+  ret <4 x float> %res
+}
+
 declare <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float>, <4 x 
float>, <4 x i32>, i32, i8, i32)
 
 define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x 
float> %x1, <4 x i32> %x2, i8 %x4) {
@@ -5555,6 +5563,16 @@ define <4 x 
float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x f
   ret <4 x float> %res4
 }
 
+define <4 x 
float>@test_int_x86_avx512_maskz_fixupimm_ss_passthrough_zero_mask(<4 x float> 
%x0, <4 x float> %x1, <4 x i32> %x2) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ss_passthrough_zero_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; CHECK-NEXT:    ret{{[l|q]}}
+  %res = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, 
<4 x float> %x1, <4 x i32> %x2, i32 5, i8 -2, i32 4)
+  ret <4 x float> %res
+}
+
 declare <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float>, <16 x 
float>, <16 x i32>, i32, i16, i32)
 
 define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512(<16 x float> %x0, 
<16 x float> %x1, <16 x i32> %x2, i16 %x4) {
@@ -5680,6 +5698,14 @@ define <2 x 
double>@test_int_x86_avx512_mask_fixupimm_sd(<2 x double> %x0, <2 x
   ret <2 x double> %res4
 }
 
+define <2 x 
double>@test_int_x86_avx512_mask_fixupimm_sd_passthrough_zero_mask(<2 x double> 
%x0, <2 x double> %x1, <2 x i64> %x2) {
+; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_sd_passthrough_zero_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
+  %res = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, 
<2 x double> %x1, <2 x i64> %x2, i32 5, i8 -2, i32 4)
+  ret <2 x double> %res
+}
+
 declare <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double>, <2 x 
double>, <2 x i64>, i32, i8, i32)
 
 define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 
x double> %x1, <2 x i64> %x2, i8 %x4) {
@@ -5717,6 +5743,16 @@ define <2 x 
double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x
   ret <2 x double> %res4
 }
 
+define <2 x 
double>@test_int_x86_avx512_maskz_fixupimm_sd_passthrough_zero_mask(<2 x 
double> %x0, <2 x double> %x1, <2 x i64> %x2) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_sd_passthrough_zero_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; CHECK-NEXT:    ret{{[l|q]}}
+  %res = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> 
%x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 -2, i32 4)
+  ret <2 x double> %res
+}
+
 declare double @llvm.fma.f64(double, double, double) #1
 declare double @llvm.x86.avx512.vfmadd.f64(double, double, double, i32) #0
 

>From 6e0577f5cf893b9d948b8332b9baeed352ba669c Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <[email protected]>
Date: Sun, 1 Feb 2026 14:00:53 +0000
Subject: [PATCH 2/2] [X86] getScalarMaskingNode - FIXUPIMM scalar ops take
 upper elements from second operand (#179101)

FIXUPIMMSS/SD instructions passthrough the SECOND operand upper elements, and 
not the first like most (2-op) instructions

Fixes #179057

(cherry picked from commit 49d2323447aec77c3d1ae8c941f3f8a126ff1480)
---
 llvm/lib/Target/X86/X86ISelLowering.cpp    | 10 ++++++----
 llvm/test/CodeGen/X86/avx512-intrinsics.ll | 10 ++++++----
 2 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp 
b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a354704c5958b..5935f2eb344e1 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -26574,7 +26574,8 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue 
Mask,
 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
                                     SDValue PreservedSrc,
                                     const X86Subtarget &Subtarget,
-                                    SelectionDAG &DAG) {
+                                    SelectionDAG &DAG,
+                                    unsigned UpperEltOpSrc = 0) {
   auto *MaskConst = dyn_cast<ConstantSDNode>(Mask);
   if (MaskConst && (MaskConst->getZExtValue() & 0x1))
     return Op;
@@ -26600,8 +26601,8 @@ static SDValue getScalarMaskingNode(SDValue Op, SDValue 
Mask,
     SmallVector<int, 16> ShuffleMask(VT.getVectorNumElements());
     std::iota(ShuffleMask.begin(), ShuffleMask.end(), 0);
     ShuffleMask[0] = VT.getVectorNumElements();
-    return DAG.getVectorShuffle(VT, dl, Op.getOperand(0), PreservedSrc,
-                                ShuffleMask);
+    return DAG.getVectorShuffle(VT, dl, Op.getOperand(UpperEltOpSrc),
+                                PreservedSrc, ShuffleMask);
   }
 
   return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
@@ -27262,7 +27263,8 @@ SDValue 
X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
       if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
         return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
 
-      return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
+      return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG,
+                                  /*UpperEltOpSrc=*/1);
     }
     case ROUNDP: {
       assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics.ll 
b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
index 21bac9e7bb04d..b979f7531cd36 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
@@ -5521,6 +5521,7 @@ define <4 x 
float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x fl
 define <4 x 
float>@test_int_x86_avx512_mask_fixupimm_ss_passthrough_zero_mask(<4 x float> 
%x0, <4 x float> %x1, <4 x i32> %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ss_passthrough_zero_mask:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, 
<4 x float> %x1, <4 x i32> %x2, i32 5, i8 -2, i32 4)
   ret <4 x float> %res
@@ -5566,8 +5567,8 @@ define <4 x 
float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x f
 define <4 x 
float>@test_int_x86_avx512_maskz_fixupimm_ss_passthrough_zero_mask(<4 x float> 
%x0, <4 x float> %x1, <4 x i32> %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ss_passthrough_zero_mask:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, 
<4 x float> %x1, <4 x i32> %x2, i32 5, i8 -2, i32 4)
   ret <4 x float> %res
@@ -5701,6 +5702,7 @@ define <2 x 
double>@test_int_x86_avx512_mask_fixupimm_sd(<2 x double> %x0, <2 x
 define <2 x 
double>@test_int_x86_avx512_mask_fixupimm_sd_passthrough_zero_mask(<2 x double> 
%x0, <2 x double> %x1, <2 x i64> %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_sd_passthrough_zero_mask:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, 
<2 x double> %x1, <2 x i64> %x2, i32 5, i8 -2, i32 4)
   ret <2 x double> %res
@@ -5746,8 +5748,8 @@ define <2 x 
double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x
 define <2 x 
double>@test_int_x86_avx512_maskz_fixupimm_sd_passthrough_zero_mask(<2 x 
double> %x0, <2 x double> %x1, <2 x i64> %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_sd_passthrough_zero_mask:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> 
%x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 -2, i32 4)
   ret <2 x double> %res

_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to