https://github.com/jhuber6 updated 
https://github.com/llvm/llvm-project/pull/175076

>From de5bc9915855ca75035bd8cf695d4abb633dbad0 Mon Sep 17 00:00:00 2001
From: Joseph Huber <[email protected]>
Date: Thu, 8 Jan 2026 15:48:20 -0600
Subject: [PATCH] [SPIR-V] Enable variadic function lowering for the SPIR-V
 target

Summary:
We support variadic functions in AMDGPU / NVPTX via an LLVM-IR pass.
This patch applies the same handling here to support them on this
target.

I am unsure what the ABI should look like here, I have mostly copied the
one we use for NVPTX where it's basically a struct layout with natural
alignment. This wastes some space, which is why AMDGPU does not pad
them.

Additionally, this required allowing the SPIRV_FUNC calling convention.
I'm assuming this is compatible with the C calling convention in IR, but
I will need someone to confirm that for me.
---
 clang/lib/CodeGen/Targets/SPIR.cpp            |  15 ++
 ...rargs-with-nonzero-default-address-space.c |  25 ++--
 clang/test/CodeGenSPIRV/Builtins/variadic.c   |  76 ++++++++++
 llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp |   6 -
 llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp  |   2 +
 llvm/lib/Transforms/IPO/ExpandVariadics.cpp   |  56 +++++++-
 llvm/test/CodeGen/SPIRV/function/vararg.ll    |  10 --
 .../SPIRV/function/variadics-lowering.ll      | 135 ++++++++++++++++++
 llvm/test/CodeGen/SPIRV/llc-pipeline.ll       |   2 +
 9 files changed, 299 insertions(+), 28 deletions(-)
 create mode 100644 clang/test/CodeGenSPIRV/Builtins/variadic.c
 delete mode 100644 llvm/test/CodeGen/SPIRV/function/vararg.ll
 create mode 100644 llvm/test/CodeGen/SPIRV/function/variadics-lowering.ll

diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp 
b/clang/lib/CodeGen/Targets/SPIR.cpp
index 6c6c4794bba49..ba90ab3e67053 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -36,6 +36,8 @@ class SPIRVABIInfo : public CommonSPIRABIInfo {
 public:
   SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
   void computeInfo(CGFunctionInfo &FI) const override;
+  RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+                   AggValueSlot Slot) const override;
 
 private:
   ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
@@ -207,6 +209,11 @@ void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
   // arguments handling.
   llvm::CallingConv::ID CC = FI.getCallingConvention();
 
+  for (auto &&[ArgumentsCount, I] : llvm::enumerate(FI.arguments()))
+    I.info = ArgumentsCount < FI.getNumRequiredArgs()
+                 ? classifyArgumentType(I.type)
+                 : ABIArgInfo::getDirect();
+
   if (!getCXXABI().classifyReturnType(FI))
     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
 
@@ -219,6 +226,14 @@ void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
   }
 }
 
+RValue SPIRVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+                               QualType Ty, AggValueSlot Slot) const {
+  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*IsIndirect=*/false,
+                          getContext().getTypeInfoInChars(Ty),
+                          CharUnits::fromQuantity(1),
+                          /*AllowHigherAlign=*/true, Slot);
+}
+
 unsigned AMDGCNSPIRVABIInfo::numRegsForType(QualType Ty) const {
   // This duplicates the AMDGPUABI computation.
   unsigned NumRegs = 0;
diff --git a/clang/test/CodeGen/varargs-with-nonzero-default-address-space.c 
b/clang/test/CodeGen/varargs-with-nonzero-default-address-space.c
index b087da34c3dfb..16840ec7d0bfb 100644
--- a/clang/test/CodeGen/varargs-with-nonzero-default-address-space.c
+++ b/clang/test/CodeGen/varargs-with-nonzero-default-address-space.c
@@ -1,4 +1,4 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 
UTC_ARGS: --version 4
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 
UTC_ARGS: --version 6
 // RUN: %clang_cc1 -triple spirv64-unknown-unknown -fcuda-is-device -emit-llvm 
-o - %s | FileCheck %s
 
 struct x {
@@ -8,32 +8,37 @@ struct x {
 
 // CHECK-LABEL: define spir_func void @testva(
 // CHECK-SAME: i32 noundef [[N:%.*]], ...) #[[ATTR0:[0-9]+]] {
-// CHECK-NEXT:  entry:
+// CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
 // CHECK-NEXT:    [[AP:%.*]] = alloca ptr addrspace(4), align 8
 // CHECK-NEXT:    [[T:%.*]] = alloca [[STRUCT_X:%.*]], align 8
 // CHECK-NEXT:    [[AP2:%.*]] = alloca ptr addrspace(4), align 8
 // CHECK-NEXT:    [[V:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    [[VARET:%.*]] = alloca i32, align 4
 // CHECK-NEXT:    [[N_ADDR_ASCAST:%.*]] = addrspacecast ptr [[N_ADDR]] to ptr 
addrspace(4)
 // CHECK-NEXT:    [[AP_ASCAST:%.*]] = addrspacecast ptr [[AP]] to ptr 
addrspace(4)
 // CHECK-NEXT:    [[T_ASCAST:%.*]] = addrspacecast ptr [[T]] to ptr 
addrspace(4)
 // CHECK-NEXT:    [[AP2_ASCAST:%.*]] = addrspacecast ptr [[AP2]] to ptr 
addrspace(4)
 // CHECK-NEXT:    [[V_ASCAST:%.*]] = addrspacecast ptr [[V]] to ptr 
addrspace(4)
-// CHECK-NEXT:    [[VARET_ASCAST:%.*]] = addrspacecast ptr [[VARET]] to ptr 
addrspace(4)
 // CHECK-NEXT:    store i32 [[N]], ptr addrspace(4) [[N_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.va_start.p4(ptr addrspace(4) [[AP_ASCAST]])
-// CHECK-NEXT:    [[TMP0:%.*]] = va_arg ptr addrspace(4) [[AP_ASCAST]], ptr
-// CHECK-NEXT:    call void @llvm.memcpy.p4.p0.i64(ptr addrspace(4) align 8 
[[T_ASCAST]], ptr align 8 [[TMP0]], i64 16, i1 false)
+// CHECK-NEXT:    [[ARGP_CUR:%.*]] = load ptr addrspace(4), ptr addrspace(4) 
[[AP_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr addrspace(4) 
[[ARGP_CUR]], i32 7
+// CHECK-NEXT:    [[ARGP_CUR_ALIGNED:%.*]] = call ptr addrspace(4) 
@llvm.ptrmask.p4.i64(ptr addrspace(4) [[TMP0]], i64 -8)
+// CHECK-NEXT:    [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr 
addrspace(4) [[ARGP_CUR_ALIGNED]], i64 16
+// CHECK-NEXT:    store ptr addrspace(4) [[ARGP_NEXT]], ptr addrspace(4) 
[[AP_ASCAST]], align 8
+// CHECK-NEXT:    call void @llvm.memcpy.p4.p4.i64(ptr addrspace(4) align 8 
[[T_ASCAST]], ptr addrspace(4) align 8 [[ARGP_CUR_ALIGNED]], i64 16, i1 false)
 // CHECK-NEXT:    call void @llvm.va_copy.p4(ptr addrspace(4) [[AP2_ASCAST]], 
ptr addrspace(4) [[AP_ASCAST]])
-// CHECK-NEXT:    [[TMP1:%.*]] = va_arg ptr addrspace(4) [[AP2_ASCAST]], i32
-// CHECK-NEXT:    store i32 [[TMP1]], ptr addrspace(4) [[VARET_ASCAST]], align 
4
-// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(4) [[VARET_ASCAST]], 
align 4
+// CHECK-NEXT:    [[ARGP_CUR1:%.*]] = load ptr addrspace(4), ptr addrspace(4) 
[[AP2_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr addrspace(4) 
[[ARGP_CUR1]], i32 3
+// CHECK-NEXT:    [[ARGP_CUR1_ALIGNED:%.*]] = call ptr addrspace(4) 
@llvm.ptrmask.p4.i64(ptr addrspace(4) [[TMP1]], i64 -4)
+// CHECK-NEXT:    [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr 
addrspace(4) [[ARGP_CUR1_ALIGNED]], i64 4
+// CHECK-NEXT:    store ptr addrspace(4) [[ARGP_NEXT2]], ptr addrspace(4) 
[[AP2_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(4) 
[[ARGP_CUR1_ALIGNED]], align 4
 // CHECK-NEXT:    store i32 [[TMP2]], ptr addrspace(4) [[V_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.va_end.p4(ptr addrspace(4) [[AP2_ASCAST]])
 // CHECK-NEXT:    call void @llvm.va_end.p4(ptr addrspace(4) [[AP_ASCAST]])
 // CHECK-NEXT:    ret void
-
+//
 void testva(int n, ...) {
   __builtin_va_list ap;
   __builtin_va_start(ap, n);
diff --git a/clang/test/CodeGenSPIRV/Builtins/variadic.c 
b/clang/test/CodeGenSPIRV/Builtins/variadic.c
new file mode 100644
index 0000000000000..adf7b117812eb
--- /dev/null
+++ b/clang/test/CodeGenSPIRV/Builtins/variadic.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 
UTC_ARGS: --version 6
+// RUN: %clang_cc1 -triple spirv64 -emit-llvm -o - %s | FileCheck %s
+
+extern void varargs_simple(int, ...);
+
+// CHECK-LABEL: define spir_func void @foo(
+// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[C:%.*]] = alloca i8, align 1
+// CHECK-NEXT:    [[S:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[L:%.*]] = alloca i64, align 8
+// CHECK-NEXT:    [[F:%.*]] = alloca float, align 4
+// CHECK-NEXT:    [[D:%.*]] = alloca double, align 8
+// CHECK-NEXT:    [[A:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
+// CHECK-NEXT:    [[V:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT:    [[T:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
+// CHECK-NEXT:    store i8 1, ptr [[C]], align 1
+// CHECK-NEXT:    store i16 1, ptr [[S]], align 2
+// CHECK-NEXT:    store i32 1, ptr [[I]], align 4
+// CHECK-NEXT:    store i64 1, ptr [[L]], align 8
+// CHECK-NEXT:    store float 1.000000e+00, ptr [[F]], align 4
+// CHECK-NEXT:    store double 1.000000e+00, ptr [[D]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[C]], align 1
+// CHECK-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr [[S]], align 2
+// CHECK-NEXT:    [[CONV1:%.*]] = sext i16 [[TMP1]] to i32
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[I]], align 4
+// CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr [[L]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = load float, ptr [[F]], align 4
+// CHECK-NEXT:    [[CONV2:%.*]] = fpext float [[TMP4]] to double
+// CHECK-NEXT:    [[TMP5:%.*]] = load double, ptr [[D]], align 8
+// CHECK-NEXT:    call spir_func void (i32, ...) @varargs_simple(i32 noundef 
0, i32 noundef [[CONV]], i32 noundef [[CONV1]], i32 noundef [[TMP2]], i64 
noundef [[TMP3]], double noundef [[CONV2]], double noundef [[TMP5]])
+// CHECK-NEXT:    call void @llvm.memcpy.p0.p1.i64(ptr align 4 [[A]], ptr 
addrspace(1) align 4 @__const.foo.a, i64 12, i1 false)
+// CHECK-NEXT:    call spir_func void (i32, ...) @varargs_simple(i32 noundef 
0, ptr noundef byval([[STRUCT_ANON]]) align 4 [[A]])
+// CHECK-NEXT:    store <4 x i32> splat (i32 1), ptr [[V]], align 16
+// CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i32>, ptr [[V]], align 16
+// CHECK-NEXT:    call spir_func void (i32, ...) @varargs_simple(i32 noundef 
0, <4 x i32> noundef [[TMP6]])
+// CHECK-NEXT:    call spir_func void (i32, ...) @varargs_simple(i32 noundef 
0, ptr noundef byval([[STRUCT_ANON_0]]) align 1 [[T]], ptr noundef 
byval([[STRUCT_ANON_0]]) align 1 [[T]], i32 noundef 0, ptr noundef 
byval([[STRUCT_ANON_0]]) align 1 [[T]])
+// CHECK-NEXT:    ret void
+//
+void foo() {
+  char c = '\x1';
+  short s = 1;
+  int i = 1;
+  long l = 1;
+  float f = 1.f;
+  double d = 1.;
+  varargs_simple(0, c, s, i, l, f, d);
+
+  struct {int x; char c; int y;} a = {1, '\x1', 1};
+  varargs_simple(0, a);
+
+  typedef int __attribute__((ext_vector_type(4))) int4;
+  int4 v = {1, 1, 1, 1};
+  varargs_simple(0, v);
+
+  struct {char c, d;} t;
+  varargs_simple(0, t, t, 0, t);
+}
+
+typedef struct {long x; long y;} S;
+extern void varargs_complex(S, S, ...);
+
+// CHECK-LABEL: define spir_func void @bar(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK-NEXT:    call void @llvm.memcpy.p0.p1.i64(ptr align 8 [[S]], ptr 
addrspace(1) align 8 @__const.bar.s, i64 16, i1 false)
+// CHECK-NEXT:    call spir_func void (ptr, ptr, ...) @varargs_complex(ptr 
noundef byval([[STRUCT_S]]) align 8 [[S]], ptr noundef byval([[STRUCT_S]]) 
align 8 [[S]], i32 noundef 1, i64 noundef 1, double noundef 1.000000e+00)
+// CHECK-NEXT:    ret void
+//
+void bar() {
+  S s = {1l, 1l};
+  varargs_complex(s, s, 1, 1l, 1.0);
+}
diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp 
b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
index 9a2b0771e4dc0..adaf34b897ab3 100644
--- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
@@ -1020,12 +1020,6 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeFunction(
     const FunctionType *Ty, SPIRVType *RetType,
     const SmallVectorImpl<SPIRVType *> &ArgTypes,
     MachineIRBuilder &MIRBuilder) {
-  if (Ty->isVarArg()) {
-    Function &Fn = MIRBuilder.getMF().getFunction();
-    Ty->getContext().diagnose(DiagnosticInfoUnsupported(
-        Fn, "SPIR-V does not support variadic functions",
-        MIRBuilder.getDebugLoc()));
-  }
   return createOpType(MIRBuilder, [&](MachineIRBuilder &MIRBuilder) {
     auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeFunction)
                    .addDef(createTypeVReg(MIRBuilder))
diff --git a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp 
b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp
index 10038753f4a75..4769d6a92eba4 100644
--- a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp
@@ -33,6 +33,7 @@
 #include "llvm/Passes/PassBuilder.h"
 #include "llvm/Support/Compiler.h"
 #include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/IPO/ExpandVariadics.h"
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Transforms/Utils.h"
 #include <optional>
@@ -178,6 +179,7 @@ void SPIRVPassConfig::addIRPasses() {
   addPass(createSPIRVRegularizerPass());
   addPass(createSPIRVPrepareFunctionsPass(TM));
   addPass(createSPIRVPrepareGlobalsPass());
+  addPass(createExpandVariadicsPass(ExpandVariadicsMode::Lowering));
 }
 
 void SPIRVPassConfig::addISelPrepare() {
diff --git a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp 
b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
index 4863d6ba789a8..36c94e9395c80 100644
--- a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
+++ b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
@@ -124,6 +124,9 @@ class VariadicABIInfo {
   };
   virtual VAArgSlotInfo slotInfo(const DataLayout &DL, Type *Parameter) = 0;
 
+  // Per-target overrides of special symbols.
+  virtual bool ignoreFunction(Function *F) { return false; }
+
   // Targets implemented so far all have the same trivial lowering for these
   bool vaEndIsNop() { return true; }
   bool vaCopyIsMemcpy() { return true; }
@@ -153,6 +156,11 @@ class ExpandVariadics : public ModulePass {
 
   bool rewriteABI() { return Mode == ExpandVariadicsMode::Lowering; }
 
+  template <typename T> bool isValidCallingConv(T *F) {
+    return F->getCallingConv() == CallingConv::C ||
+           F->getCallingConv() == CallingConv::SPIR_FUNC;
+  }
+
   bool runOnModule(Module &M) override;
 
   bool runOnFunction(Module &M, IRBuilder<> &Builder, Function *F);
@@ -230,7 +238,10 @@ class ExpandVariadics : public ModulePass {
         F->hasFnAttribute(Attribute::Naked))
       return false;
 
-    if (F->getCallingConv() != CallingConv::C)
+    if (ABI->ignoreFunction(F))
+      return false;
+
+    if (!isValidCallingConv(F))
       return false;
 
     if (rewriteABI())
@@ -249,7 +260,7 @@ class ExpandVariadics : public ModulePass {
         return false;
       }
 
-      if (CI->getCallingConv() != CallingConv::C)
+      if (!isValidCallingConv(CI))
         return false;
 
       return true;
@@ -609,6 +620,9 @@ bool ExpandVariadics::expandCall(Module &M, IRBuilder<> 
&Builder, CallBase *CB,
   bool Changed = false;
   const DataLayout &DL = M.getDataLayout();
 
+  if (ABI->ignoreFunction(CB->getCalledFunction()))
+    return Changed;
+
   if (!expansionApplicableToFunctionCall(CB)) {
     if (rewriteABI())
       report_fatal_error("Cannot lower callbase instruction");
@@ -940,6 +954,39 @@ struct NVPTX final : public VariadicABIInfo {
   }
 };
 
+struct SPIRV final : public VariadicABIInfo {
+
+  bool enableForTarget() override { return true; }
+
+  bool vaListPassedInSSARegister() override { return true; }
+
+  Type *vaListType(LLVMContext &Ctx) override {
+    return PointerType::getUnqual(Ctx);
+  }
+
+  Type *vaListParameterType(Module &M) override {
+    return PointerType::getUnqual(M.getContext());
+  }
+
+  Value *initializeVaList(Module &M, LLVMContext &Ctx, IRBuilder<> &Builder,
+                          AllocaInst *, Value *Buffer) override {
+    return Builder.CreateAddrSpaceCast(Buffer, vaListParameterType(M));
+  }
+
+  VAArgSlotInfo slotInfo(const DataLayout &DL, Type *Parameter) override {
+    // Expects natural alignment in all cases. The variadic call ABI will 
handle
+    // promoting types to their appropriate size and alignment.
+    Align A = DL.getABITypeAlign(Parameter);
+    return {A, false};
+  }
+
+  // The SPIR-V backend has special handling for SPIR-V mangled printf
+  // functions.
+  bool ignoreFunction(Function *F) override {
+    return F->getName().starts_with('_') && F->getName().contains("printf");
+  }
+};
+
 struct Wasm final : public VariadicABIInfo {
 
   bool enableForTarget() override {
@@ -995,6 +1042,11 @@ std::unique_ptr<VariadicABIInfo> 
VariadicABIInfo::create(const Triple &T) {
     return std::make_unique<NVPTX>();
   }
 
+  case Triple::spirv:
+  case Triple::spirv64: {
+    return std::make_unique<SPIRV>();
+  }
+
   default:
     return {};
   }
diff --git a/llvm/test/CodeGen/SPIRV/function/vararg.ll 
b/llvm/test/CodeGen/SPIRV/function/vararg.ll
deleted file mode 100644
index 7f734834ccf51..0000000000000
--- a/llvm/test/CodeGen/SPIRV/function/vararg.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: not llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown 
--spirv-ext=+SPV_INTEL_function_pointers < %s 2>&1 | FileCheck %s
-
-define void @bar() {
-entry:
-  call spir_func void (i32, ...) @_Z3fooiz(i32 5, i32 3)
-  ret void
-}
-
-; CHECK:error: {{.*}} in function bar void (): SPIR-V does not support 
variadic functions
-declare spir_func void @_Z3fooiz(i32, ...)
diff --git a/llvm/test/CodeGen/SPIRV/function/variadics-lowering.ll 
b/llvm/test/CodeGen/SPIRV/function/variadics-lowering.ll
new file mode 100644
index 0000000000000..5ce4414c65ada
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/function/variadics-lowering.ll
@@ -0,0 +1,135 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 
UTC_ARGS: --version 6
+; RUN: opt -S -mtriple=spirv64-- --passes=expand-variadics 
--expand-variadics-override=lowering < %s | FileCheck %s
+
+%struct.S = type { i64, i64 }
+%struct.anon = type { i32, i8, i32 }
+%struct.anon.0 = type { i8, i8 }
+
+@__const.foo.a = private unnamed_addr addrspace(1) constant { i32, i8, [3 x 
i8], i32 } { i32 1, i8 1, [3 x i8] zeroinitializer, i32 1 }, align 4
+@__const.bar.s = private unnamed_addr addrspace(1) constant %struct.S { i64 1, 
i64 1 }, align 8
+
+define spir_func void @foo() {
+; CHECK-LABEL: define spir_func void @foo() {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[C:%.*]] = alloca i8, align 1
+; CHECK-NEXT:    [[S:%.*]] = alloca i16, align 2
+; CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[L:%.*]] = alloca i64, align 8
+; CHECK-NEXT:    [[F:%.*]] = alloca float, align 4
+; CHECK-NEXT:    [[D:%.*]] = alloca double, align 8
+; CHECK-NEXT:    [[A:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
+; CHECK-NEXT:    [[V:%.*]] = alloca <4 x i32>, align 16
+; CHECK-NEXT:    [[T:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
+; CHECK-NEXT:    [[VARARG_BUFFER:%.*]] = alloca [[FOO_VARARG:%.*]], align 4
+; CHECK-NEXT:    [[VARARG_BUFFER1:%.*]] = alloca [[FOO_VARARG_0:%.*]], align 16
+; CHECK-NEXT:    [[VARARG_BUFFER2:%.*]] = alloca [[FOO_VARARG_1:%.*]], align 4
+; CHECK-NEXT:    [[VARARG_BUFFER3:%.*]] = alloca [[FOO_VARARG_2:%.*]], align 8
+; CHECK-NEXT:    [[VARARG_BUFFER4:%.*]] = alloca [[FOO_VARARG_3:%.*]], align 8
+; CHECK-NEXT:    store i8 1, ptr [[C]], align 1
+; CHECK-NEXT:    store i16 1, ptr [[S]], align 2
+; CHECK-NEXT:    store i32 1, ptr [[I]], align 4
+; CHECK-NEXT:    store i64 1, ptr [[L]], align 8
+; CHECK-NEXT:    store float 1.000000e+00, ptr [[F]], align 4
+; CHECK-NEXT:    store double 1.000000e+00, ptr [[D]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[C]], align 1
+; CHECK-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr [[S]], align 2
+; CHECK-NEXT:    [[CONV1:%.*]] = sext i16 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr [[L]], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load float, ptr [[F]], align 4
+; CHECK-NEXT:    [[CONV2:%.*]] = fpext float [[TMP4]] to double
+; CHECK-NEXT:    [[TMP5:%.*]] = load double, ptr [[D]], align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER3]])
+; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw [[FOO_VARARG_2]], 
ptr [[VARARG_BUFFER3]], i32 0, i32 0
+; CHECK-NEXT:    store i32 [[CONV]], ptr [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds nuw [[FOO_VARARG_2]], 
ptr [[VARARG_BUFFER3]], i32 0, i32 1
+; CHECK-NEXT:    store i32 [[CONV1]], ptr [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw [[FOO_VARARG_2]], 
ptr [[VARARG_BUFFER3]], i32 0, i32 2
+; CHECK-NEXT:    store i32 [[TMP2]], ptr [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds nuw [[FOO_VARARG_2]], 
ptr [[VARARG_BUFFER3]], i32 0, i32 4
+; CHECK-NEXT:    store i64 [[TMP3]], ptr [[TMP9]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds nuw [[FOO_VARARG_2]], 
ptr [[VARARG_BUFFER3]], i32 0, i32 5
+; CHECK-NEXT:    store double [[CONV2]], ptr [[TMP10]], align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds nuw [[FOO_VARARG_2]], 
ptr [[VARARG_BUFFER3]], i32 0, i32 6
+; CHECK-NEXT:    store double [[TMP5]], ptr [[TMP11]], align 8
+; CHECK-NEXT:    call spir_func void @varargs_simple(i32 0, ptr 
[[VARARG_BUFFER3]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER3]])
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p1.i64(ptr align 4 [[A]], ptr 
addrspace(1) align 4 @__const.foo.a, i64 12, i1 false)
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER2]])
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds nuw [[FOO_VARARG_1]], 
ptr [[VARARG_BUFFER2]], i32 0, i32 0
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[TMP12]], ptr [[A]], 
i64 12, i1 false)
+; CHECK-NEXT:    call spir_func void @varargs_simple(i32 0, ptr 
[[VARARG_BUFFER2]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER2]])
+; CHECK-NEXT:    store <4 x i32> splat (i32 1), ptr [[V]], align 16
+; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i32>, ptr [[V]], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER1]])
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds nuw [[FOO_VARARG_0]], 
ptr [[VARARG_BUFFER1]], i32 0, i32 0
+; CHECK-NEXT:    store <4 x i32> [[TMP6]], ptr [[TMP14]], align 16
+; CHECK-NEXT:    call spir_func void @varargs_simple(i32 0, ptr 
[[VARARG_BUFFER1]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER1]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], 
ptr [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[TMP15]], ptr [[T]], 
i64 2, i1 false)
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], 
ptr [[VARARG_BUFFER]], i32 0, i32 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[TMP16]], ptr [[T]], 
i64 2, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], 
ptr [[VARARG_BUFFER]], i32 0, i32 2
+; CHECK-NEXT:    store i32 0, ptr [[TMP17]], align 4
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], 
ptr [[VARARG_BUFFER]], i32 0, i32 3
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr [[TMP18]], ptr [[T]], 
i64 2, i1 false)
+; CHECK-NEXT:    call spir_func void @varargs_simple(i32 0, ptr 
[[VARARG_BUFFER]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
+; CHECK-NEXT:    [[R:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p1.i64(ptr align 8 [[R]], ptr 
addrspace(1) align 8 @__const.bar.s, i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER4]])
+; CHECK-NEXT:    [[TMP19:%.*]] = getelementptr inbounds nuw [[FOO_VARARG_3]], 
ptr [[VARARG_BUFFER4]], i32 0, i32 0
+; CHECK-NEXT:    store i32 1, ptr [[TMP19]], align 4
+; CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds nuw [[FOO_VARARG_3]], 
ptr [[VARARG_BUFFER4]], i32 0, i32 2
+; CHECK-NEXT:    store i64 1, ptr [[TMP20]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr inbounds nuw [[FOO_VARARG_3]], 
ptr [[VARARG_BUFFER4]], i32 0, i32 3
+; CHECK-NEXT:    store double 1.000000e+00, ptr [[TMP21]], align 8
+; CHECK-NEXT:    call spir_func void @varargs_complex(ptr byval([[STRUCT_S]]) 
align 8 [[R]], ptr byval([[STRUCT_S]]) align 8 [[S]], ptr [[VARARG_BUFFER4]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER4]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %c = alloca i8, align 1
+  %s = alloca i16, align 2
+  %i = alloca i32, align 4
+  %l = alloca i64, align 8
+  %f = alloca float, align 4
+  %d = alloca double, align 8
+  %a = alloca %struct.anon, align 4
+  %v = alloca <4 x i32>, align 16
+  %t = alloca %struct.anon.0, align 1
+  store i8 1, ptr %c, align 1
+  store i16 1, ptr %s, align 2
+  store i32 1, ptr %i, align 4
+  store i64 1, ptr %l, align 8
+  store float 1.000000e+00, ptr %f, align 4
+  store double 1.000000e+00, ptr %d, align 8
+  %0 = load i8, ptr %c, align 1
+  %conv = sext i8 %0 to i32
+  %1 = load i16, ptr %s, align 2
+  %conv1 = sext i16 %1 to i32
+  %2 = load i32, ptr %i, align 4
+  %3 = load i64, ptr %l, align 8
+  %4 = load float, ptr %f, align 4
+  %conv2 = fpext float %4 to double
+  %5 = load double, ptr %d, align 8
+  call spir_func void (i32, ...) @varargs_simple(i32 0, i32 %conv, i32 %conv1, 
i32 %2, i64 %3, double %conv2, double %5)
+  call void @llvm.memcpy.p0.p1.i64(ptr align 4 %a, ptr addrspace(1) align 4 
@__const.foo.a, i64 12, i1 false)
+  call spir_func void (i32, ...) @varargs_simple(i32 0, ptr 
byval(%struct.anon) align 4 %a)
+  store <4 x i32> splat (i32 1), ptr %v, align 16
+  %6 = load <4 x i32>, ptr %v, align 16
+  call spir_func void (i32, ...) @varargs_simple(i32 0, <4 x i32> %6)
+  call spir_func void (i32, ...) @varargs_simple(i32 0, ptr 
byval(%struct.anon.0) align 1 %t, ptr byval(%struct.anon.0) align 1 %t, i32 0, 
ptr byval(%struct.anon.0) align 1 %t)
+  %r = alloca %struct.S, align 8
+  call void @llvm.memcpy.p0.p1.i64(ptr align 8 %r, ptr addrspace(1) align 8 
@__const.bar.s, i64 16, i1 false)
+  call spir_func void (ptr, ptr, ...) @varargs_complex(ptr byval(%struct.S) 
align 8 %r, ptr byval(%struct.S) align 8 %s, i32 1, i64 1, double 1.000000e+00)
+  ret void
+}
+
+declare spir_func void @varargs_simple(i32 noundef, ...)
+
+declare spir_func void @varargs_complex(ptr byval(%struct.S) align 8, ptr 
byval(%struct.S) align 8, ...)
diff --git a/llvm/test/CodeGen/SPIRV/llc-pipeline.ll 
b/llvm/test/CodeGen/SPIRV/llc-pipeline.ll
index cbd06ae1eec4e..2f8b9decaaba3 100644
--- a/llvm/test/CodeGen/SPIRV/llc-pipeline.ll
+++ b/llvm/test/CodeGen/SPIRV/llc-pipeline.ll
@@ -33,6 +33,7 @@
 ; SPIRV-O0-NEXT:      SPIR-V Regularizer
 ; SPIRV-O0-NEXT:    SPIRV prepare functions
 ; SPIRV-O0-NEXT:    SPIRV prepare global variables
+; SPIRV-O0-NEXT:    Expand variadic functions
 ; SPIRV-O0-NEXT:    FunctionPass Manager
 ; SPIRV-O0-NEXT:      Lower invoke and unwind, for unwindless code generators
 ; SPIRV-O0-NEXT:      Remove unreachable blocks from the CFG
@@ -136,6 +137,7 @@
 ; SPIRV-Opt-NEXT:      SPIR-V Regularizer
 ; SPIRV-Opt-NEXT:    SPIRV prepare functions
 ; SPIRV-Opt-NEXT:    SPIRV prepare global variables
+; SPIRV-Opt-NEXT:    Expand variadic functions
 ; SPIRV-Opt-NEXT:    FunctionPass Manager
 ; SPIRV-Opt-NEXT:      Dominator Tree Construction
 ; SPIRV-Opt-NEXT:      Natural Loop Information

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to