https://github.com/chichunchen updated https://github.com/llvm/llvm-project/pull/178087
>From 8c9ffd1cf3e046865917ddf2c4eebbc180cf9701 Mon Sep 17 00:00:00 2001 From: cchen <[email protected]> Date: Wed, 21 Jan 2026 23:19:22 -0600 Subject: [PATCH] [mlir][OpenMP] Translate omp.declare_simd to LLVM IR (Only for x86) This mod aim to generate same vector ABI [1] for declare simd as Clang and reuse function paramater mangling and codegen logic authored by @alexey-bataev in [2]. Codegen for AArch64 is not included in this patch. --- For each `omp.declare_simd`, lowering computes: 1) ParamAttrs: one entry per function argument, classifying it as Vector / Uniform / Linear (+ step or var-stride) / Aligned. 2) Branch kind: Undefined / Inbranch / Notinbranch. 3) VLEN: either from `simdlen(...)` or derived from the CDT size. llvm then emits x86 declare-simd variants by attaching mangled function attributes of the form: _ZGV <ISA> <Mask> <VLEN> <ParamAttrs> _ <FunctionName> where: - ISA : b (SSE), c (AVX), d (AVX2), e (AVX-512) - Mask : M (inbranch), N (notinbranch), or both if unspecified - VLEN : explicit simdlen or computed from CDT size - ParamAttrs encoding: v = vector, u = uniform, l = linear sN = var-stride using argument index N aN = alignment N [1] https://sourceware.org/glibc/wiki/libmvec?action=AttachFile&do=view&target=VectorABI.txt [2] https://github.com/llvm/llvm-project/commit/c7a82b41a706728ce7c212b5bc40c74d1cce53c7 --- clang/lib/CodeGen/CGOpenMPRuntime.cpp | 114 +++------ .../llvm/Frontend/OpenMP/OMPDeclareSimd.h | 61 +++++ llvm/lib/Frontend/OpenMP/CMakeLists.txt | 1 + llvm/lib/Frontend/OpenMP/OMPDeclareSimd.cpp | 182 +++++++++++++++ mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp | 77 ++++--- .../OpenMP/OpenMPToLLVMIRTranslation.cpp | 174 ++++++++++++++ mlir/test/Dialect/OpenMP/invalid.mlir | 20 ++ .../LLVMIR/openmp-declare-simd-aarch64.mlir | 19 ++ .../LLVMIR/openmp-declare-simd-x86.mlir | 218 ++++++++++++++++++ 9 files changed, 751 insertions(+), 115 deletions(-) create mode 100644 llvm/include/llvm/Frontend/OpenMP/OMPDeclareSimd.h create mode 100644 llvm/lib/Frontend/OpenMP/OMPDeclareSimd.cpp create mode 100644 mlir/test/Target/LLVMIR/openmp-declare-simd-aarch64.mlir create mode 100644 mlir/test/Target/LLVMIR/openmp-declare-simd-x86.mlir diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp index ac16ce6b6e768..6c19193e179ef 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -33,6 +33,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Bitcode/BitcodeReader.h" +#include "llvm/Frontend/OpenMP/OMPDeclareSimd.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/GlobalValue.h" @@ -11812,27 +11813,8 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall( } } -namespace { - /// Kind of parameter in a function with 'declare simd' directive. -enum ParamKindTy { - Linear, - LinearRef, - LinearUVal, - LinearVal, - Uniform, - Vector, -}; -/// Attribute set of the parameter. -struct ParamAttrTy { - ParamKindTy Kind = Vector; - llvm::APSInt StrideOrArg; - llvm::APSInt Alignment; - bool HasVarStride = false; -}; -} // namespace - static unsigned evaluateCDTSize(const FunctionDecl *FD, - ArrayRef<ParamAttrTy> ParamAttrs) { + ArrayRef<DeclareSimdAttrTy> ParamAttrs) { // Every vector variant of a SIMD-enabled function has a vector length (VLEN). // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument // of that clause. The VLEN value must be power of 2. @@ -11862,13 +11844,13 @@ static unsigned evaluateCDTSize(const FunctionDecl *FD, } else { unsigned Offset = 0; if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { - if (ParamAttrs[Offset].Kind == Vector) + if (ParamAttrs[Offset].Kind == DeclareSimdKindTy::Vector) CDT = C.getPointerType(C.getCanonicalTagType(MD->getParent())); ++Offset; } if (CDT.isNull()) { for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) { - if (ParamAttrs[I + Offset].Kind == Vector) { + if (ParamAttrs[I + Offset].Kind == DeclareSimdKindTy::Vector) { CDT = FD->getParamDecl(I)->getType(); break; } @@ -11883,56 +11865,10 @@ static unsigned evaluateCDTSize(const FunctionDecl *FD, return C.getTypeSize(CDT); } -/// Mangle the parameter part of the vector function name according to -/// their OpenMP classification. The mangling function is defined in -/// section 4.5 of the AAVFABI(2021Q1). -static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) { - SmallString<256> Buffer; - llvm::raw_svector_ostream Out(Buffer); - for (const auto &ParamAttr : ParamAttrs) { - switch (ParamAttr.Kind) { - case Linear: - Out << 'l'; - break; - case LinearRef: - Out << 'R'; - break; - case LinearUVal: - Out << 'U'; - break; - case LinearVal: - Out << 'L'; - break; - case Uniform: - Out << 'u'; - break; - case Vector: - Out << 'v'; - break; - } - if (ParamAttr.HasVarStride) - Out << "s" << ParamAttr.StrideOrArg; - else if (ParamAttr.Kind == Linear || ParamAttr.Kind == LinearRef || - ParamAttr.Kind == LinearUVal || ParamAttr.Kind == LinearVal) { - // Don't print the step value if it is not present or if it is - // equal to 1. - if (ParamAttr.StrideOrArg < 0) - Out << 'n' << -ParamAttr.StrideOrArg; - else if (ParamAttr.StrideOrArg != 1) - Out << ParamAttr.StrideOrArg; - } - - if (!!ParamAttr.Alignment) - Out << 'a' << ParamAttr.Alignment; - } - - return std::string(Out.str()); -} - static void emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn, const llvm::APSInt &VLENVal, - ArrayRef<ParamAttrTy> ParamAttrs, + ArrayRef<DeclareSimdAttrTy> ParamAttrs, OMPDeclareSimdDeclAttr::BranchStateTy State) { struct ISADataTy { char ISA; @@ -11977,7 +11913,7 @@ emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn, } else { Out << VLENVal; } - Out << mangleVectorParameters(ParamAttrs); + Out << llvm::omp::mangleVectorParameters(ParamAttrs); Out << '_' << Fn->getName(); Fn->addFnAttr(Out.str()); } @@ -11991,19 +11927,21 @@ emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn, // https://developer.arm.com/products/software-development-tools/hpc/arm-compiler-for-hpc/vector-function-abi. /// Maps To Vector (MTV), as defined in 4.1.1 of the AAVFABI (2021Q1). -static bool getAArch64MTV(QualType QT, ParamKindTy Kind) { +static bool getAArch64MTV(QualType QT, DeclareSimdKindTy Kind) { QT = QT.getCanonicalType(); if (QT->isVoidType()) return false; - if (Kind == ParamKindTy::Uniform) + if (Kind == DeclareSimdKindTy::Uniform) return false; - if (Kind == ParamKindTy::LinearUVal || Kind == ParamKindTy::LinearRef) + if (Kind == DeclareSimdKindTy::LinearUVal || + Kind == DeclareSimdKindTy::LinearRef) return false; - if ((Kind == ParamKindTy::Linear || Kind == ParamKindTy::LinearVal) && + if ((Kind == DeclareSimdKindTy::Linear || + Kind == DeclareSimdKindTy::LinearVal) && !QT->isReferenceType()) return false; @@ -12036,7 +11974,8 @@ static bool getAArch64PBV(QualType QT, ASTContext &C) { /// Computes the lane size (LS) of a return type or of an input parameter, /// as defined by `LS(P)` in 3.2.1 of the AAVFABI. /// TODO: Add support for references, section 3.2.1, item 1. -static unsigned getAArch64LS(QualType QT, ParamKindTy Kind, ASTContext &C) { +static unsigned getAArch64LS(QualType QT, DeclareSimdKindTy Kind, + ASTContext &C) { if (!getAArch64MTV(QT, Kind) && QT.getCanonicalType()->isPointerType()) { QualType PTy = QT.getCanonicalType()->getPointeeType(); if (getAArch64PBV(PTy, C)) @@ -12052,7 +11991,7 @@ static unsigned getAArch64LS(QualType QT, ParamKindTy Kind, ASTContext &C) { // signature of the scalar function, as defined in 3.2.2 of the // AAVFABI. static std::tuple<unsigned, unsigned, bool> -getNDSWDS(const FunctionDecl *FD, ArrayRef<ParamAttrTy> ParamAttrs) { +getNDSWDS(const FunctionDecl *FD, ArrayRef<DeclareSimdAttrTy> ParamAttrs) { QualType RetType = FD->getReturnType().getCanonicalType(); ASTContext &C = FD->getASTContext(); @@ -12061,7 +12000,7 @@ getNDSWDS(const FunctionDecl *FD, ArrayRef<ParamAttrTy> ParamAttrs) { llvm::SmallVector<unsigned, 8> Sizes; if (!RetType->isVoidType()) { - Sizes.push_back(getAArch64LS(RetType, ParamKindTy::Vector, C)); + Sizes.push_back(getAArch64LS(RetType, DeclareSimdKindTy::Vector, C)); if (!getAArch64PBV(RetType, C) && getAArch64MTV(RetType, {})) OutputBecomesInput = true; } @@ -12140,7 +12079,7 @@ static void addAArch64AdvSIMDNDSNames(unsigned NDS, StringRef Mask, /// Emit vector function attributes for AArch64, as defined in the AAVFABI. static void emitAArch64DeclareSimdFunction( CodeGenModule &CGM, const FunctionDecl *FD, unsigned UserVLEN, - ArrayRef<ParamAttrTy> ParamAttrs, + ArrayRef<DeclareSimdAttrTy> ParamAttrs, OMPDeclareSimdDeclAttr::BranchStateTy State, StringRef MangledName, char ISA, unsigned VecRegSize, llvm::Function *Fn, SourceLocation SLoc) { @@ -12174,7 +12113,7 @@ static void emitAArch64DeclareSimdFunction( } // Sort out parameter sequence. - const std::string ParSeq = mangleVectorParameters(ParamAttrs); + const std::string ParSeq = llvm::omp::mangleVectorParameters(ParamAttrs); StringRef Prefix = "_ZGV"; // Generate simdlen from user input (if any). if (UserVLEN) { @@ -12250,7 +12189,7 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD, ++ParamPos; } for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) { - llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size()); + llvm::SmallVector<DeclareSimdAttrTy, 8> ParamAttrs(ParamPositions.size()); // Mark uniform parameters. for (const Expr *E : Attr->uniforms()) { E = E->IgnoreParenImpCasts(); @@ -12264,7 +12203,7 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD, assert(It != ParamPositions.end() && "Function parameter not found"); Pos = It->second; } - ParamAttrs[Pos].Kind = Uniform; + ParamAttrs[Pos].Kind = DeclareSimdKindTy::Uniform; } // Get alignment info. auto *NI = Attr->alignments_begin(); @@ -12325,15 +12264,15 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD, .getQuantity(); } } - ParamAttrTy &ParamAttr = ParamAttrs[Pos]; + DeclareSimdAttrTy &ParamAttr = ParamAttrs[Pos]; if (*MI == OMPC_LINEAR_ref) - ParamAttr.Kind = LinearRef; + ParamAttr.Kind = DeclareSimdKindTy::LinearRef; else if (*MI == OMPC_LINEAR_uval) - ParamAttr.Kind = LinearUVal; + ParamAttr.Kind = DeclareSimdKindTy::LinearUVal; else if (IsReferenceType) - ParamAttr.Kind = LinearVal; + ParamAttr.Kind = DeclareSimdKindTy::LinearVal; else - ParamAttr.Kind = Linear; + ParamAttr.Kind = DeclareSimdKindTy::Linear; // Assuming a stride of 1, for `linear` without modifiers. ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(1); if (*SI) { @@ -12358,7 +12297,8 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD, // rescale the value of linear_step with the byte size of the // pointee type. if (!ParamAttr.HasVarStride && - (ParamAttr.Kind == Linear || ParamAttr.Kind == LinearRef)) + (ParamAttr.Kind == DeclareSimdKindTy::Linear || + ParamAttr.Kind == DeclareSimdKindTy::LinearRef)) ParamAttr.StrideOrArg = ParamAttr.StrideOrArg * PtrRescalingFactor; ++SI; ++MI; diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPDeclareSimd.h b/llvm/include/llvm/Frontend/OpenMP/OMPDeclareSimd.h new file mode 100644 index 0000000000000..bbfa63a9cd4ec --- /dev/null +++ b/llvm/include/llvm/Frontend/OpenMP/OMPDeclareSimd.h @@ -0,0 +1,61 @@ +//===- OMPDeclareSimd.h - OpenMP declare simd types and helpers - C++ -*-=====// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// +/// This file defines types and helpers used when dealing with OpenMP declare +/// simd. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_FRONTEND_OPENMP_OMPDECLARESIMD_H +#define LLVM_FRONTEND_OPENMP_OMPDECLARESIMD_H + +#include "llvm/ADT/APSInt.h" +#include "llvm/IR/Function.h" + +namespace llvm { +namespace omp { + +/// Kind of parameter in a function with 'declare simd' directive. +enum class DeclareSimdKindTy { + Linear, + LinearRef, + LinearUVal, + LinearVal, + Uniform, + Vector, +}; + +/// Attribute set of the `declare simd` parameter. +struct DeclareSimdAttrTy { + DeclareSimdKindTy Kind = DeclareSimdKindTy::Vector; + llvm::APSInt StrideOrArg; + llvm::APSInt Alignment; + bool HasVarStride = false; +}; + +/// Type of branch clause of the `declare simd` directive. +enum class DeclareSimdBranch { + Undefined, + Notinbranch, + Inbranch, +}; + +std::string +mangleVectorParameters(llvm::ArrayRef<llvm::omp::DeclareSimdAttrTy> ParamAttrs); + +void emitDeclareSimdFunction( + llvm::Function *Fn, const llvm::APSInt &VLENVal, + llvm::ArrayRef<llvm::omp::DeclareSimdAttrTy> ParamAttrs, + DeclareSimdBranch Branch); + +} // end namespace omp + +} // end namespace llvm + +#endif // LLVM_FRONTEND_OPENMP_OMPDECLARESIMD_H diff --git a/llvm/lib/Frontend/OpenMP/CMakeLists.txt b/llvm/lib/Frontend/OpenMP/CMakeLists.txt index e60b59c1203b9..68db83531a625 100644 --- a/llvm/lib/Frontend/OpenMP/CMakeLists.txt +++ b/llvm/lib/Frontend/OpenMP/CMakeLists.txt @@ -1,6 +1,7 @@ add_llvm_component_library(LLVMFrontendOpenMP OMP.cpp OMPContext.cpp + OMPDeclareSimd.cpp OMPIRBuilder.cpp DirectiveNameParser.cpp diff --git a/llvm/lib/Frontend/OpenMP/OMPDeclareSimd.cpp b/llvm/lib/Frontend/OpenMP/OMPDeclareSimd.cpp new file mode 100644 index 0000000000000..5ed29c890639a --- /dev/null +++ b/llvm/lib/Frontend/OpenMP/OMPDeclareSimd.cpp @@ -0,0 +1,182 @@ +//===- OMPDeclareSimd.cpp --- Helpers for OpenMP DeclareSimd --------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// +/// This file defines types and helpers used when dealing with OpenMP declare +/// simd. +/// +//===----------------------------------------------------------------------===// + +#include "llvm/Frontend/OpenMP/OMPDeclareSimd.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/IR/Module.h" +#include "llvm/TargetParser/Triple.h" + +std::string +mangleVectorParameters(llvm::ArrayRef<llvm::omp::DeclareSimdAttrTy> ParamAttrs); + +/// Return type size in bits for `Ty` using DL. +/// If scalable, return known-min as a conservative approximation. +static unsigned getTypeSizeInBits(llvm::Type *Ty, const llvm::DataLayout &DL) { + if (!Ty) + return 0; + llvm::TypeSize TS = DL.getTypeSizeInBits(Ty); + + if (TS.isScalable()) + return (unsigned)TS.getKnownMinValue(); + return (unsigned)TS.getFixedValue(); +} + +/// Returns size in *bits* of the Characteristic Data Type (CDT). +static unsigned +evaluateCDTSize(const llvm::Function *Fn, + llvm::ArrayRef<llvm::omp::DeclareSimdAttrTy> ParamAttrs) { + const llvm::DataLayout &DL = Fn->getParent()->getDataLayout(); + + llvm::Type *RetTy = Fn->getReturnType(); + llvm::Type *CDT = nullptr; + + // Non-void return => CDT = return type + if (RetTy && !RetTy->isVoidTy()) { + CDT = RetTy; + } else { + // First "Vector" param (ParamAttrs aligned with function params) + // If ParamAttrs is shorter than the parameter list, treat missing as Vector + // (matches the idea "default Kind is Vector"). + unsigned NumParams = Fn->getFunctionType()->getNumParams(); + for (unsigned I = 0; I < NumParams; ++I) { + bool IsVector = + (I < ParamAttrs.size()) + ? ParamAttrs[I].Kind == llvm::omp::DeclareSimdKindTy::Vector + : true; + if (!IsVector) + continue; + CDT = Fn->getFunctionType()->getParamType(I); + break; + } + } + + llvm::Type *IntTy = llvm::Type::getInt32Ty(Fn->getContext()); + if (!CDT || CDT->isStructTy() || CDT->isArrayTy()) + CDT = IntTy; + + return getTypeSizeInBits(CDT, DL); +} + +static void emitX86DeclareSimdFunction( + llvm::Function *Fn, const llvm::APSInt &VLENVal, + llvm::ArrayRef<llvm::omp::DeclareSimdAttrTy> ParamAttrs, + llvm::omp::DeclareSimdBranch Branch) { + struct ISADataTy { + char ISA; + unsigned VecRegSize; + }; + ISADataTy ISAData[] = { + {'b', 128}, // SSE + {'c', 256}, // AVX + {'d', 256}, // AVX2 + {'e', 512}, // AVX512 + }; + llvm::SmallVector<char, 2> Masked; + switch (Branch) { + case llvm::omp::DeclareSimdBranch::Undefined: + Masked.push_back('N'); + Masked.push_back('M'); + break; + case llvm::omp::DeclareSimdBranch::Notinbranch: + Masked.push_back('N'); + break; + case llvm::omp::DeclareSimdBranch::Inbranch: + Masked.push_back('M'); + break; + } + for (char Mask : Masked) { + for (const ISADataTy &Data : ISAData) { + llvm::SmallString<256> Buffer; + llvm::raw_svector_ostream Out(Buffer); + Out << "_ZGV" << Data.ISA << Mask; + if (!VLENVal) { + unsigned NumElts = evaluateCDTSize(Fn, ParamAttrs); + assert(NumElts && "Non-zero simdlen/cdtsize expected"); + Out << llvm::APSInt::getUnsigned(Data.VecRegSize / NumElts); + } else { + Out << VLENVal; + } + Out << llvm::omp::mangleVectorParameters(ParamAttrs); + Out << '_' << Fn->getName(); + Fn->addFnAttr(Out.str()); + } + } +} + +namespace llvm { + +namespace omp { + +/// Mangle the parameter part of the vector function name according to +/// their OpenMP classification. The mangling function is defined in +/// section 4.5 of the AAVFABI(2021Q1). +std::string mangleVectorParameters( + llvm::ArrayRef<llvm::omp::DeclareSimdAttrTy> ParamAttrs) { + llvm::SmallString<256> Buffer; + llvm::raw_svector_ostream Out(Buffer); + for (const auto &ParamAttr : ParamAttrs) { + switch (ParamAttr.Kind) { + case llvm::omp::DeclareSimdKindTy::Linear: + Out << 'l'; + break; + case llvm::omp::DeclareSimdKindTy::LinearRef: + Out << 'R'; + break; + case llvm::omp::DeclareSimdKindTy::LinearUVal: + Out << 'U'; + break; + case llvm::omp::DeclareSimdKindTy::LinearVal: + Out << 'L'; + break; + case llvm::omp::DeclareSimdKindTy::Uniform: + Out << 'u'; + break; + case llvm::omp::DeclareSimdKindTy::Vector: + Out << 'v'; + break; + } + if (ParamAttr.HasVarStride) + Out << "s" << ParamAttr.StrideOrArg; + else if (ParamAttr.Kind == llvm::omp::DeclareSimdKindTy::Linear || + ParamAttr.Kind == llvm::omp::DeclareSimdKindTy::LinearRef || + ParamAttr.Kind == llvm::omp::DeclareSimdKindTy::LinearUVal || + ParamAttr.Kind == llvm::omp::DeclareSimdKindTy::LinearVal) { + // Don't print the step value if it is not present or if it is + // equal to 1. + if (ParamAttr.StrideOrArg < 0) + Out << 'n' << -ParamAttr.StrideOrArg; + else if (ParamAttr.StrideOrArg != 1) + Out << ParamAttr.StrideOrArg; + } + + if (!!ParamAttr.Alignment) + Out << 'a' << ParamAttr.Alignment; + } + + return std::string(Out.str()); +} + +void emitDeclareSimdFunction( + llvm::Function *Fn, const llvm::APSInt &VLENVal, + llvm::ArrayRef<llvm::omp::DeclareSimdAttrTy> ParamAttrs, + llvm::omp::DeclareSimdBranch Branch) { + Module *M = Fn->getParent(); + const llvm::Triple &Triple = M->getTargetTriple(); + + if (Triple.isX86()) + emitX86DeclareSimdFunction(Fn, VLENVal, ParamAttrs, Branch); +} + +} // end namespace omp +} // end namespace llvm diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp index 70753b0f2a69a..82b84ef01a24c 100644 --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -4466,34 +4466,7 @@ LogicalResult WorkdistributeOp::verify() { } //===----------------------------------------------------------------------===// -// Declare simd [7.7] -//===----------------------------------------------------------------------===// - -LogicalResult DeclareSimdOp::verify() { - // Must be nested inside a function-like op - auto func = - dyn_cast_if_present<mlir::FunctionOpInterface>((*this)->getParentOp()); - if (!func) - return emitOpError() << "must be nested inside a function"; - - if (getInbranch() && getNotinbranch()) - return emitOpError("cannot have both 'inbranch' and 'notinbranch'"); - - return verifyAlignedClause(*this, getAlignments(), getAlignedVars()); -} - -void DeclareSimdOp::build(OpBuilder &odsBuilder, OperationState &odsState, - const DeclareSimdOperands &clauses) { - MLIRContext *ctx = odsBuilder.getContext(); - DeclareSimdOp::build(odsBuilder, odsState, clauses.alignedVars, - makeArrayAttr(ctx, clauses.alignments), clauses.inbranch, - clauses.linearVars, clauses.linearStepVars, - clauses.linearVarTypes, clauses.notinbranch, - clauses.simdlen, clauses.uniformVars); -} - -//===----------------------------------------------------------------------===// -// Parser and printer for Uniform Clause +// Parser, printer, and verifier for Uniform Clause //===----------------------------------------------------------------------===// /// uniform ::= `uniform` `(` uniform-list `)` @@ -4521,6 +4494,54 @@ static void printUniformClause(OpAsmPrinter &p, Operation *op, } } +/// Verify Uniform Clauses +static LogicalResult verifyUniformClause(Operation *op, + FunctionOpInterface func, + ValueRange uniformVars) { + Block &entry = func.getFunctionBody().front(); + + for (Value u : uniformVars) { + auto barg = dyn_cast<BlockArgument>(u); + if (!barg || barg.getOwner() != &entry) + return op->emitOpError() << "uniform expects function argument"; + } + + return success(); +} + +//===----------------------------------------------------------------------===// +// Declare simd [7.7] +//===----------------------------------------------------------------------===// + +LogicalResult DeclareSimdOp::verify() { + // Must be nested inside a function-like op + auto func = + dyn_cast_if_present<mlir::FunctionOpInterface>((*this)->getParentOp()); + if (!func) + return emitOpError() << "must be nested inside a function"; + + if (getInbranch() && getNotinbranch()) + return emitOpError("cannot have both 'inbranch' and 'notinbranch'"); + + if (failed(verifyUniformClause(*this, func, getUniformVars()))) + return failure(); + + if (failed(verifyAlignedClause(*this, getAlignments(), getAlignedVars()))) + return failure(); + + return success(); +} + +void DeclareSimdOp::build(OpBuilder &odsBuilder, OperationState &odsState, + const DeclareSimdOperands &clauses) { + MLIRContext *ctx = odsBuilder.getContext(); + DeclareSimdOp::build(odsBuilder, odsState, clauses.alignedVars, + makeArrayAttr(ctx, clauses.alignments), clauses.inbranch, + clauses.linearVars, clauses.linearStepVars, + clauses.linearVarTypes, clauses.notinbranch, + clauses.simdlen, clauses.uniformVars); +} + #define GET_ATTRDEF_CLASSES #include "mlir/Dialect/OpenMP/OpenMPOpsAttributes.cpp.inc" diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp index 781928e79db35..de68a728474be 100644 --- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp @@ -21,10 +21,12 @@ #include "mlir/Target/LLVMIR/Dialect/OpenMPCommon.h" #include "mlir/Target/LLVMIR/ModuleTranslation.h" +#include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TypeSwitch.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" +#include "llvm/Frontend/OpenMP/OMPDeclareSimd.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfoMetadata.h" @@ -6870,6 +6872,175 @@ convertTargetFreeMemOp(Operation &opInst, llvm::IRBuilderBase &builder, return success(); } +// if `v` is a function block-arg, return its index. +// If `v` is `llvm.load %argN`, return N as well (var-stride common case). +static std::optional<unsigned> getFuncArgIndex(mlir::LLVM::LLVMFuncOp func, + mlir::Value v) { + if (!v) + return std::nullopt; + + // Direct block argument case: %argN + if (auto barg = mlir::dyn_cast<mlir::BlockArgument>(v)) { + // Make sure this block arg belongs to this function. + // For LLVMFuncOp, the body is a Region; its entry block holds the args. + mlir::Block &entry = func.getBody().front(); + if (barg.getOwner() == &entry) + return barg.getArgNumber(); + return std::nullopt; + } + + // Common LLVM dialect pattern: %v = llvm.load %argN + if (auto load = v.getDefiningOp<mlir::LLVM::LoadOp>()) { + mlir::Value addr = load.getAddr(); + if (auto addrBArg = mlir::dyn_cast<mlir::BlockArgument>(addr)) { + mlir::Block &entry = func.getBody().front(); + if (addrBArg.getOwner() == &entry) + return addrBArg.getArgNumber(); + } + } + + return std::nullopt; +} + +static void +applyUniform(LLVM::LLVMFuncOp funcOp, mlir::omp::DeclareSimdOp ds, + llvm::SmallVectorImpl<llvm::omp::DeclareSimdAttrTy> &attrs) { + for (mlir::Value u : ds.getUniformVars()) { + auto idx = getFuncArgIndex(funcOp, u); + assert(idx && "uniform variable must be a function argument"); + attrs[*idx].Kind = llvm::omp::DeclareSimdKindTy::Uniform; + } +} + +static void +applyAligned(LLVM::LLVMFuncOp funcOp, mlir::omp::DeclareSimdOp ds, + llvm::SmallVectorImpl<llvm::omp::DeclareSimdAttrTy> &attrs) { + auto alignedVars = ds.getAlignedVars(); + std::optional<mlir::ArrayAttr> maybeAlignArr = ds.getAlignments(); + if (alignedVars.empty() || !maybeAlignArr || !*maybeAlignArr) + return; + + mlir::ArrayAttr alignArr = *maybeAlignArr; + + unsigned n = std::min<unsigned>(alignedVars.size(), alignArr.size()); + assert(alignedVars.size() == alignArr.size() && + "aligned vars and alignments must have the same length"); + + for (unsigned i = 0; i < n; ++i) { + auto idx = getFuncArgIndex(funcOp, alignedVars[i]); + assert(idx && "aligned variable must be a function argument"); + + auto intAttr = mlir::dyn_cast<mlir::IntegerAttr>(alignArr[i]); + assert(intAttr && "alignment entry must be an IntegerAttr"); + + attrs[*idx].Alignment = + llvm::APSInt(intAttr.getValue(), /*isUnsigned=*/true); + } +} + +/// Helper: fill linear kind + step. +/// linear(%arg2 = %2 : !llvm.ptr) +/// - linear var: %arg2 (must be function arg) +/// - step value: %2 (may be constant) or another function arg (var stride) +static void +applyLinear(LLVM::LLVMFuncOp func, mlir::omp::DeclareSimdOp ds, + llvm::SmallVectorImpl<llvm::omp::DeclareSimdAttrTy> &attrs) { + auto linearVars = ds.getLinearVars(); + auto linearSteps = ds.getLinearStepVars(); + + if (!linearSteps.empty()) { + assert(linearSteps.size() == linearVars.size() && + "linear vars and steps must have the same length when steps exist"); + } + + // Default step=1 + llvm::APSInt one(/*Bits=*/llvm::APInt(32, 1), /*isUnsigned=*/true); + + for (unsigned i = 0; i < linearVars.size(); ++i) { + auto idx = getFuncArgIndex(func, linearVars[i]); + assert(idx && "linear variable must be a function argument"); + + llvm::omp::DeclareSimdAttrTy ¶mAttr = attrs[*idx]; + paramAttr.Kind = llvm::omp::DeclareSimdKindTy::Linear; + paramAttr.HasVarStride = false; + paramAttr.StrideOrArg = one; + + if (i >= linearSteps.size()) + continue; + + mlir::Value stepV = linearSteps[i]; + + // Var-stride: step comes from a function arg (directly or via llvm.load + // %argN). + if (auto stepArgIdx = getFuncArgIndex(func, stepV)) { + paramAttr.HasVarStride = true; + paramAttr.StrideOrArg = llvm::APSInt(llvm::APInt(32, *stepArgIdx), + /*isUnsigned=*/true); + continue; + } + + // Constant step: llvm.constant -> IntegerAttr. + if (auto cst = stepV.getDefiningOp<mlir::LLVM::ConstantOp>()) { + if (auto intAttr = mlir::dyn_cast<mlir::IntegerAttr>(cst.getValue())) { + paramAttr.HasVarStride = false; + paramAttr.StrideOrArg = + llvm::APSInt(intAttr.getValue(), /*isUnsigned=*/false); + continue; + } + } + + // If we get here, we couldn't decode the step. This should not happen in + // well-formed IR; prefer asserting so bugs don't silently change mangling. + assert(false && + "unhandled linear step form (expected const or arg/load-of-arg)"); + } +} + +static llvm::omp::DeclareSimdBranch +getDeclareSimdBranch(mlir::omp::DeclareSimdOp &op) { + if (op.getInbranch()) + return llvm::omp::DeclareSimdBranch::Inbranch; + if (op.getNotinbranch()) + return llvm::omp::DeclareSimdBranch::Notinbranch; + return llvm::omp::DeclareSimdBranch::Undefined; +} + +static LogicalResult +convertDeclareSimdOp(Operation &opInst, llvm::IRBuilderBase &builder, + LLVM::ModuleTranslation &moduleTranslation) { + auto funcOp = opInst.getParentOfType<LLVM::LLVMFuncOp>(); + assert(funcOp && "declare_simd must be defined inside an LLVM function"); + + llvm::Function *fn = moduleTranslation.lookupFunction(funcOp.getName()); + assert(fn && "Failed to find corresponding LLVM function for LLVMFuncOp"); + + const llvm::Triple &T = fn->getParent()->getTargetTriple(); + if (!T.isX86()) + return opInst.emitOpError() + << "to LLVM IR currently only supported on x86 (got " << T.str() + << ")"; + + funcOp.walk([&](mlir::omp::DeclareSimdOp ds) { + llvm::SmallVector<llvm::omp::DeclareSimdAttrTy, 8> paramAttrs( + funcOp.getNumArguments()); + + applyUniform(funcOp, ds, paramAttrs); + applyAligned(funcOp, ds, paramAttrs); + applyLinear(funcOp, ds, paramAttrs); + + llvm::APSInt VLENVal; + if (std::optional<int64_t> simdlen = ds.getSimdlen()) { + VLENVal = llvm::APSInt(llvm::APInt(/*numBits=*/64, *simdlen), + /*isUnsigned=*/false); + } + + llvm::omp::emitDeclareSimdFunction(fn, VLENVal, paramAttrs, + getDeclareSimdBranch(ds)); + }); + + return success(); +} + /// Given an OpenMP MLIR operation, create the corresponding LLVM IR (including /// OpenMP runtime calls). static LogicalResult @@ -6992,6 +7163,9 @@ convertHostOrTargetOperation(Operation *op, llvm::IRBuilderBase &builder, .Case([&](omp::TaskwaitOp op) { return convertOmpTaskwaitOp(op, builder, moduleTranslation); }) + .Case([&](omp::DeclareSimdOp op) { + return convertDeclareSimdOp(*op, builder, moduleTranslation); + }) .Case<omp::YieldOp, omp::TerminatorOp, omp::DeclareMapperOp, omp::DeclareMapperInfoOp, omp::DeclareReductionOp, omp::CriticalDeclareOp>([](auto op) { diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir index 0d9d1f1663ef9..388e7b18a2e38 100644 --- a/mlir/test/Dialect/OpenMP/invalid.mlir +++ b/mlir/test/Dialect/OpenMP/invalid.mlir @@ -3150,3 +3150,23 @@ func.func @omp_declare_simd_branch() -> () { omp.declare_simd inbranch notinbranch return } + +// ----- +// CHECK-LABEL: @omp_declare_simd_uniform_local_alloca +func.func @omp_declare_simd_uniform_local_alloca(%x: memref<i32>) { + %local = memref.alloca() : memref<i32> + // expected-error @+1 {{uniform expects function argument}} + omp.declare_simd uniform(%local : memref<i32>) + return +} + +// ----- +// CHECK-LABEL: @omp_declare_simd_uniform_blockarg_not_entry +func.func @omp_declare_simd_uniform_blockarg_not_entry(%x: memref<i32>) { + cf.br ^bb1(%x : memref<i32>) + +^bb1(%arg_in_bb1: memref<i32>): + // expected-error @+1 {{uniform expects function argument}} + omp.declare_simd uniform(%arg_in_bb1 : memref<i32>) + return +} diff --git a/mlir/test/Target/LLVMIR/openmp-declare-simd-aarch64.mlir b/mlir/test/Target/LLVMIR/openmp-declare-simd-aarch64.mlir new file mode 100644 index 0000000000000..52f6899e658e7 --- /dev/null +++ b/mlir/test/Target/LLVMIR/openmp-declare-simd-aarch64.mlir @@ -0,0 +1,19 @@ +// RUN: not mlir-translate --mlir-to-llvmir %s 2>&1 | FileCheck %s + +// Remove this test when codegen for aarch64 has been done +module attributes { + llvm.target_triple = "aarch64-unknown-linux-gnu", + llvm.data_layout = "e-m:e-i64:64-n32:64" +} { + llvm.func @omp_declare_simd_nonx86(%x: !llvm.ptr, %y: !llvm.ptr) -> i32 { + omp.declare_simd + %vx = llvm.load %x : !llvm.ptr -> i32 + %vy = llvm.load %y : !llvm.ptr -> i32 + %sum = llvm.add %vx, %vy : i32 + llvm.return %sum : i32 + } +} + +// CHECK: error: 'omp.declare_simd' op to LLVM IR currently only supported on x86 +// CHECK-SAME: (got aarch64-unknown-linux-gnu) + diff --git a/mlir/test/Target/LLVMIR/openmp-declare-simd-x86.mlir b/mlir/test/Target/LLVMIR/openmp-declare-simd-x86.mlir new file mode 100644 index 0000000000000..a8ea612f057d3 --- /dev/null +++ b/mlir/test/Target/LLVMIR/openmp-declare-simd-x86.mlir @@ -0,0 +1,218 @@ +// RUN: mlir-translate --mlir-to-llvmir %s | FileCheck %s +// +// This test exercises translation of `omp.declare_simd` from MLIR LLVM dialect +// to LLVM IR function attributes via llvm. +// +// For each `omp.declare_simd`, lowering computes: +// 1) ParamAttrs: one entry per function argument, classifying it as +// Vector / Uniform / Linear (+ step or var-stride) / Aligned. +// 2) Branch kind: Undefined / Inbranch / Notinbranch. +// 3) VLEN: either from `simdlen(...)` or derived from the CDT size. +// +// llvm then emits x86 declare-simd variants by attaching +// mangled function attributes of the form: +// +// _ZGV <ISA> <Mask> <VLEN> <ParamAttrs> _ <FunctionName> +// +// where: +// - ISA : b (SSE), c (AVX), d (AVX2), e (AVX-512) +// - Mask : M (inbranch), N (notinbranch), or both if unspecified +// - VLEN : explicit simdlen or computed from CDT size +// - ParamAttrs encoding: +// v = vector, u = uniform, l = linear +// sN = var-stride using argument index N +// aN = alignment N +// + +module attributes { + llvm.target_triple = "x86_64-unknown-linux-gnu", + llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" +} { + + // - All parameters default to Vector + // - No branch clause => both masked (M) and unmasked (N) variants emitted + // - No simdlen => VLEN derived from CDT + // * CDT = return type i32 => 32 bits + // * VLEN = vector-register-size / 32 + // + // CHECK-LABEL: define i32 @ds_minimal + llvm.func @ds_minimal(%x: !llvm.ptr, %y: !llvm.ptr) -> i32 { + omp.declare_simd + %vx = llvm.load %x : !llvm.ptr -> i32 + %vy = llvm.load %y : !llvm.ptr -> i32 + %sum = llvm.add %vx, %vy : i32 + llvm.return %sum : i32 + } + + // uniform + linear with variable stride + simdlen + // + // The linear step is produced by: + // %stepv = llvm.load %step + // + // This is recognized as a var-stride case: + // - Linear.HasVarStride = true + // - Linear.StrideOrArg = argument index of %step + // + // ParamAttrs: + // [0] Vector + // [1] Uniform + // [2] Linear(var-stride = arg3) + // [3] Vector + // + // No branch clause => both masked (M) and unmasked (N) variants emitted. + // + // CHECK-LABEL: define i32 @ds_uniform_linear_const_step_inbranch + llvm.func @ds_uniform_linear_const_step_inbranch( + %x: !llvm.ptr, %y: !llvm.ptr, %i: !llvm.ptr) -> i32 { + %c1 = llvm.mlir.constant(1 : i32) : i32 + omp.declare_simd simdlen(8) uniform(%y : !llvm.ptr) linear(%i = %c1 : !llvm.ptr) inbranch {linear_var_types = [i32]} + %vx = llvm.load %x : !llvm.ptr -> i32 + %vy = llvm.load %y : !llvm.ptr -> i32 + %sum = llvm.add %vx, %vy : i32 + %vi = llvm.load %i : !llvm.ptr -> i32 + %out = llvm.add %sum, %vi : i32 + llvm.return %out : i32 + } + + // uniform + linear with variable stride + simdlen + // + // The linear step is produced by: + // %stepv = llvm.load %step + // + // This is recognized as a var-stride case: + // - Linear.HasVarStride = true + // - Linear.StrideOrArg = argument index of %step + // + // ParamAttrs: + // [0] Vector + // [1] Uniform + // [2] Linear(var-stride = arg3) + // [3] Vector + // + // No branch clause => both masked (M) and unmasked (N) variants emitted. + // + // CHECK-LABEL: define i32 @ds_uniform_linear_var_stride + llvm.func @ds_uniform_linear_var_stride( + %x: !llvm.ptr, %y: !llvm.ptr, %i: !llvm.ptr, %step: !llvm.ptr) -> i32 { + %stepv = llvm.load %step : !llvm.ptr -> i32 + omp.declare_simd simdlen(8) uniform(%y : !llvm.ptr) linear(%i = %stepv : !llvm.ptr) {linear_var_types = [i32]} + %vx = llvm.load %x : !llvm.ptr -> i32 + %vy = llvm.load %y : !llvm.ptr -> i32 + %sum = llvm.add %vx, %vy : i32 + %vi = llvm.load %i : !llvm.ptr -> i32 + %prod = llvm.mul %vi, %stepv : i32 + %out = llvm.add %sum, %prod : i32 + llvm.return %out : i32 + } + + // ------------------------------------------------------------------------- + // aligned + uniform + notinbranch (no simdlen) + // + // ParamAttrs: + // [0] Vector, Alignment = 32 + // [1] Uniform, Alignment = 128 + // [2] Vector + // + // Branch: + // Notinbranch => only unmasked (N) variants emitted + // + // VLEN: + // No simdlen => derived from CDT (i32) + // + // CHECK-LABEL: define i32 @ds_aligned_uniform_notinbranch + llvm.func @ds_aligned_uniform_notinbranch( + %p0: !llvm.ptr, %p1: !llvm.ptr, %i: !llvm.ptr) -> i32 { + omp.declare_simd aligned(%p0 : !llvm.ptr -> 32 : i64, + %p1 : !llvm.ptr -> 128 : i64) + uniform(%p1 : !llvm.ptr) + notinbranch + %v0 = llvm.load %p0 : !llvm.ptr -> i32 + %v1 = llvm.load %p1 : !llvm.ptr -> i32 + %sum = llvm.add %v0, %v1 : i32 + %vi = llvm.load %i : !llvm.ptr -> i32 + %out = llvm.add %sum, %vi : i32 + llvm.return %out : i32 + } + + // Multiple declare_simd ops in the same function body + // + // Each omp.declare_simd independently contributes a set of + // vector-function attributes to the same LLVM function. + // + // CHECK-LABEL: define i32 @ds_multiple_ops_same_function + llvm.func @ds_multiple_ops_same_function(%a: !llvm.ptr, %b: !llvm.ptr, %i: !llvm.ptr) -> i32 { + %c1 = llvm.mlir.constant(1 : i32) : i32 + omp.declare_simd uniform(%b : !llvm.ptr) linear(%i = %c1 : !llvm.ptr) simdlen(4) {linear_var_types = [i32]} + omp.declare_simd uniform(%a : !llvm.ptr) simdlen(8) + + %va = llvm.load %a : !llvm.ptr -> i32 + %vb = llvm.load %b : !llvm.ptr -> i32 + %sum = llvm.add %va, %vb : i32 + %vi = llvm.load %i : !llvm.ptr -> i32 + %out = llvm.add %sum, %vi : i32 + llvm.return %out : i32 + } +} + +// no branch clause => both N and M, VLEN from CDT(i32)=32b +// +// CHECK: attributes #[[ATTR_0:[0-9]+]] = { +// CHECK-SAME: "_ZGVbM4vv_ds_minimal" +// CHECK-SAME: "_ZGVbN4vv_ds_minimal" +// CHECK-SAME: "_ZGVcN8vv_ds_minimal" +// CHECK-SAME: "_ZGVdM8vv_ds_minimal" +// CHECK-SAME: "_ZGVeM16vv_ds_minimal" +// CHECK-SAME: "_ZGVeN16vv_ds_minimal" +// CHECK-SAME: } +// +// uniform + linear with constant step + simdlen + inbranch +// +// CHECK: attributes #[[ATTR_1:[0-9]+]] = { +// CHECK-SAME: "_ZGVbM8vul_ds_uniform_linear_const_step_inbranch" +// CHECK-SAME: "_ZGVcM8vul_ds_uniform_linear_const_step_inbranch" +// CHECK-SAME: "_ZGVdM8vul_ds_uniform_linear_const_step_inbranch" +// CHECK-SAME: "_ZGVeM8vul_ds_uniform_linear_const_step_inbranch" +// CHECK-SAME: } +// +// uniform + linear with var-stride via `llvm.load %step` + simdlen +// +// CHECK: attributes #[[ATTR_2:[0-9]+]] = { +// CHECK-SAME: "_ZGVbM8vuls3v_ds_uniform_linear_var_stride" +// CHECK-SAME: "_ZGVbN8vuls3v_ds_uniform_linear_var_stride" +// CHECK-SAME: "_ZGVcM8vuls3v_ds_uniform_linear_var_stride" +// CHECK-SAME: "_ZGVcN8vuls3v_ds_uniform_linear_var_stride" +// CHECK-SAME: "_ZGVdM8vuls3v_ds_uniform_linear_var_stride" +// CHECK-SAME: "_ZGVdN8vuls3v_ds_uniform_linear_var_stride" +// CHECK-SAME: "_ZGVeM8vuls3v_ds_uniform_linear_var_stride" +// CHECK-SAME: "_ZGVeN8vuls3v_ds_uniform_linear_var_stride" +// CHECK-SAME: } +// +// aligned + uniform + notinbranch +// +// CHECK: attributes #[[ATTR_3:[0-9]+]] = { +// CHECK-SAME: "_ZGVbN4va32ua128v_ds_aligned_uniform_notinbranch" +// CHECK-SAME: "_ZGVcN8va32ua128v_ds_aligned_uniform_notinbranch" +// CHECK-SAME: "_ZGVdN8va32ua128v_ds_aligned_uniform_notinbranch" +// CHECK-SAME: "_ZGVeN16va32ua128v_ds_aligned_uniform_notinbranch" +// CHECK-SAME: } +// +// multiple declare_simd ops in the same function body +// +// CHECK: attributes #[[ATTR_4:[0-9]+]] = { +// CHECK-SAME: "_ZGVbM4vul_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVbM8uvv_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVbN4vul_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVbN8uvv_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVcM4vul_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVcM8uvv_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVcN4vul_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVcN8uvv_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVdM4vul_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVdM8uvv_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVdN4vul_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVdN8uvv_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVeM4vul_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVeM8uvv_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVeN4vul_ds_multiple_ops_same_function" +// CHECK-SAME: "_ZGVeN8uvv_ds_multiple_ops_same_function" +// CHECK-SAME: } _______________________________________________ cfe-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
