[clang] Turn 'counted_by' into a type attribute and parse it into 'CountAttributedType' (PR #78000)

2024-03-12 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/78000

>From 3d2716ad6088f600c14d6ff724aa90453130a1f7 Mon Sep 17 00:00:00 2001
From: Yeoul Na 
Date: Mon, 18 Dec 2023 10:58:16 +0900
Subject: [PATCH 01/13] [BoundsSafety] Introduce CountAttributedType

CountAttributedType is a sugar type to represent a type with
a 'counted_by' attribute and the likes, which provides bounds
information to the underlying type. The type contains an
the argument of attribute as an expression. Additionally, the type
holds metadata about declarations referenced by the expression in
order to make it easier for Sema to access declarations on which
the type depends.

This also adjusts the CountedBy attribute definition and implements
parsing CountAttributedType.

__bdos and array-checks sanitizer use CountAttributedType instead
of hasAttr.

Implements special lookup for counted_by argument in structs.

Adjust test/Sema/attr-counted-by.c to match the default diags
generated by the expression parser.
---
 clang/include/clang/AST/ASTContext.h  |   7 +
 clang/include/clang/AST/PropertiesBase.td |   1 +
 clang/include/clang/AST/RecursiveASTVisitor.h |   9 +
 clang/include/clang/AST/Type.h| 152 +++
 clang/include/clang/AST/TypeLoc.h |  26 +++
 clang/include/clang/AST/TypeProperties.td |  19 ++
 clang/include/clang/Basic/Attr.td |   7 +-
 .../clang/Basic/DiagnosticSemaKinds.td|  14 +-
 clang/include/clang/Basic/TypeNodes.td|   2 +
 clang/include/clang/Parse/Parser.h|   5 +
 .../clang/Serialization/ASTRecordReader.h |   2 +
 .../clang/Serialization/ASTRecordWriter.h |   5 +
 .../clang/Serialization/TypeBitCodes.def  |   2 +-
 clang/lib/AST/ASTContext.cpp  |  55 ++
 clang/lib/AST/ASTImporter.cpp |  22 +++
 clang/lib/AST/ASTStructuralEquivalence.cpp|   7 +
 clang/lib/AST/ItaniumMangle.cpp   |   1 +
 clang/lib/AST/Type.cpp|  65 +++
 clang/lib/AST/TypeLoc.cpp |   4 +
 clang/lib/AST/TypePrinter.cpp |  30 +++
 clang/lib/CodeGen/CGBuiltin.cpp   |   6 +-
 clang/lib/CodeGen/CGDebugInfo.cpp |   1 +
 clang/lib/CodeGen/CGExpr.cpp  |  37 +---
 clang/lib/CodeGen/CodeGenFunction.cpp |   1 +
 clang/lib/Parse/ParseDecl.cpp |  90 +
 clang/lib/Sema/SemaDecl.cpp   |   6 -
 clang/lib/Sema/SemaDeclAttr.cpp   | 183 --
 clang/lib/Sema/SemaExpr.cpp   |  25 ++-
 clang/lib/Sema/SemaType.cpp   |  12 ++
 clang/lib/Sema/TreeTransform.h|   7 +
 clang/lib/Serialization/ASTReader.cpp |   8 +
 clang/lib/Serialization/ASTWriter.cpp |   4 +
 clang/test/Sema/attr-counted-by.c |  20 +-
 clang/tools/libclang/CIndex.cpp   |   4 +
 34 files changed, 679 insertions(+), 160 deletions(-)

diff --git a/clang/include/clang/AST/ASTContext.h 
b/clang/include/clang/AST/ASTContext.h
index ff6b64c7f72d57..002f36ecbbaa3f 100644
--- a/clang/include/clang/AST/ASTContext.h
+++ b/clang/include/clang/AST/ASTContext.h
@@ -250,6 +250,8 @@ class ASTContext : public RefCountedBase {
   DependentBitIntTypes;
   llvm::FoldingSet BTFTagAttributedTypes;
 
+  mutable llvm::FoldingSet CountAttributedTypes;
+
   mutable llvm::FoldingSet QualifiedTemplateNames;
   mutable llvm::FoldingSet DependentTemplateNames;
   mutable llvm::FoldingSet
@@ -1341,6 +1343,11 @@ class ASTContext : public RefCountedBase {
 return CanQualType::CreateUnsafe(getPointerType((QualType) T));
   }
 
+  QualType
+  getCountAttributedType(QualType T, Expr *CountExpr, bool CountInBytes,
+ bool OrNull,
+ ArrayRef DependentDecls) 
const;
+
   /// Return the uniqued reference to a type adjusted from the original
   /// type to a new type.
   QualType getAdjustedType(QualType Orig, QualType New) const;
diff --git a/clang/include/clang/AST/PropertiesBase.td 
b/clang/include/clang/AST/PropertiesBase.td
index 0270c086d06b6a..6df1d93a7ba2eb 100644
--- a/clang/include/clang/AST/PropertiesBase.td
+++ b/clang/include/clang/AST/PropertiesBase.td
@@ -143,6 +143,7 @@ def UInt32 : CountPropertyType<"uint32_t">;
 def UInt64 : CountPropertyType<"uint64_t">;
 def UnaryTypeTransformKind : EnumPropertyType<"UnaryTransformType::UTTKind">;
 def VectorKind : EnumPropertyType<"VectorKind">;
+def TypeCoupledDeclRefInfo : PropertyType;
 
 def ExceptionSpecInfo : PropertyType<"FunctionProtoType::ExceptionSpecInfo"> {
   let BufferElementTypes = [ QualType ];
diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h 
b/clang/include/clang/AST/RecursiveASTVisitor.h
index 5080551ada4fc6..4a1ff222ecadcd 100644
--- a/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -1110,6 +1110,12 @@ 

[libcxx] [compiler-rt] [llvm] [libc] [lldb] [lld] [flang] [mlir] [clang] [mlir][sparse] Implement parsing n out of m (PR #79935)

2024-02-06 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/79935

>From b4610de041d1fd9c362a4155ee50325c738eebda Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Tue, 30 Jan 2024 01:01:52 +
Subject: [PATCH 01/13] [mlir][sparse] Expand LevelType to 64 bit and implement
 n out of m

---
 mlir/include/mlir-c/Dialect/SparseTensor.h|  28 +--
 .../mlir/Dialect/SparseTensor/IR/Enums.h  | 209 +++---
 .../SparseTensor/IR/SparseTensorAttrDefs.td   |   4 +-
 .../SparseTensor/IR/SparseTensorType.h|   2 +-
 .../mlir/Dialect/SparseTensor/Utils/Merger.h  |   2 +-
 .../ExecutionEngine/SparseTensor/Storage.h|  14 +-
 .../Bindings/Python/DialectSparseTensor.cpp   |   2 +-
 mlir/lib/CAPI/Dialect/SparseTensor.cpp|  49 ++--
 .../IR/Detail/DimLvlMapParser.cpp |   2 +
 .../SparseTensor/IR/Detail/LvlTypeParser.cpp  |  55 -
 .../SparseTensor/IR/Detail/LvlTypeParser.h|   6 +-
 .../Transforms/SparseGPUCodegen.cpp   |   2 +-
 .../Transforms/SparseTensorCodegen.cpp|   6 +-
 .../Transforms/Sparsification.cpp |   2 +-
 .../Transforms/Utils/SparseTensorLevel.cpp|   2 +-
 .../lib/Dialect/SparseTensor/Utils/Merger.cpp |   4 +-
 .../ExecutionEngine/SparseTensor/Storage.cpp  |   2 +-
 mlir/test/CAPI/sparse_tensor.c|   6 +-
 .../SparseTensor/GPU/gpu_matmul24_lib.mlir|   2 +-
 .../SparseTensor/roundtrip_encoding.mlir  |  12 +-
 .../SparseTensor/sparse_fill_zero.mlir|   2 +-
 .../SparseTensor/CPU/sparse_block_matmul.mlir |   2 +-
 .../Dialect/SparseTensor/CPU/sparse_ds.mlir   |   2 +-
 .../CUDA/sm80-lt/sparse-matmul-2-4-lib.mlir   |   2 +-
 .../CUDA/sm80-lt/sparse-matmul-2-4-prune.mlir |   2 +-
 .../python/dialects/sparse_tensor/dialect.py  | 106 -
 26 files changed, 316 insertions(+), 211 deletions(-)

diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h 
b/mlir/include/mlir-c/Dialect/SparseTensor.h
index 42d8400cb5e95..947a746b60a65 100644
--- a/mlir/include/mlir-c/Dialect/SparseTensor.h
+++ b/mlir/include/mlir-c/Dialect/SparseTensor.h
@@ -28,20 +28,20 @@ MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(SparseTensor, 
sparse_tensor);
 typedef uint64_t MlirSparseTensorLevelType;
 
 enum MlirBaseSparseTensorLevelType {
-  MLIR_SPARSE_TENSOR_LEVEL_DENSE = 4,   // 0b1_00
-  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED = 8,  // 0b00010_00
-  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU = 9,   // 0b00010_01
-  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO = 10,  // 0b00010_10
-  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO = 11,   // 0b00010_11
-  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON = 16,  // 0b00100_00
-  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU = 17,   // 0b00100_01
-  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO = 18,   // 0b00100_10
-  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO = 19,// 0b00100_11
-  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED = 32,   // 0b01000_00
-  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU = 33,// 0b01000_01
-  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO = 34,// 0b01000_10
-  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO = 35, // 0b01000_11
-  MLIR_SPARSE_TENSOR_LEVEL_TWO_OUT_OF_FOUR = 64,// 0b1_00
+  MLIR_SPARSE_TENSOR_LEVEL_DENSE = 65536,   // 
0x00_00_0001_
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED = 131072, // 
0x00_00_0002_
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU = 131073,  // 
0x00_00_0002_0001
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO = 131074,  // 
0x00_00_0002_0002
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO = 131075,   // 
0x00_00_0002_0003
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON = 262144,  // 
0x00_00_0004_
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU = 262145,   // 
0x00_00_0004_0001
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO = 262146,   // 
0x00_00_0004_0002
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO = 262147,// 
0x00_00_0004_0003
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED = 524288,   // 
0x00_00_0008_
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU = 524289,// 
0x00_00_0008_0001
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO = 524290,// 
0x00_00_0008_0002
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO = 524291, // 
0x00_00_0008_0003
+  MLIR_SPARSE_TENSOR_LEVEL_N_OUT_OF_M = 1048576,// 
0x00_00_0010_
 };
 
 
//===--===//
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index 1f662e2042304..b70ac57dfd00a 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -154,9 +154,10 @@ enum class Action : uint32_t {
 
 /// This enum defines all the sparse representations supportable by
 /// the SparseTensor dialect. We use a lightweight encoding to encode
-/// both the 

[llvm] [clang-tools-extra] [mlir] [clang] [mlir][sparse] Change LevelType enum to 64 bit (PR #80501)

2024-02-05 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/80501

>From 1754868fc5f600ff59fac02d21ce9a37dc9ebcad Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Fri, 2 Feb 2024 22:08:10 +
Subject: [PATCH 1/7] [mlir][sparse] Change enum to be 64 bit

---
 mlir/include/mlir-c/Dialect/SparseTensor.h|  6 ++-
 .../mlir/Dialect/SparseTensor/IR/Enums.h  | 38 +--
 .../Bindings/Python/DialectSparseTensor.cpp   |  4 +-
 mlir/lib/CAPI/Dialect/SparseTensor.cpp|  6 +--
 .../Transforms/Utils/CodegenUtils.h   |  2 +-
 mlir/test/CAPI/sparse_tensor.c|  6 +--
 .../test/Dialect/SparseTensor/conversion.mlir | 18 -
 .../SparseTensor/sparse_fill_zero.mlir| 12 +++---
 .../python/dialects/sparse_tensor/dialect.py  |  4 +-
 9 files changed, 49 insertions(+), 47 deletions(-)

diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h 
b/mlir/include/mlir-c/Dialect/SparseTensor.h
index 41d024db04964..fe3a5d7248350 100644
--- a/mlir/include/mlir-c/Dialect/SparseTensor.h
+++ b/mlir/include/mlir-c/Dialect/SparseTensor.h
@@ -25,6 +25,8 @@ MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(SparseTensor, 
sparse_tensor);
 /// These correspond to SparseTensorEncodingAttr::LevelType in the C++ API.
 /// If updating, keep them in sync and update the static_assert in the impl
 /// file.
+typedef uint64_t level_type;
+
 enum MlirSparseTensorLevelType {
   MLIR_SPARSE_TENSOR_LEVEL_DENSE = 4,   // 0b1_00
   MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED = 8,  // 0b00010_00
@@ -53,7 +55,7 @@ mlirAttributeIsASparseTensorEncodingAttr(MlirAttribute attr);
 /// Creates a `sparse_tensor.encoding` attribute with the given parameters.
 MLIR_CAPI_EXPORTED MlirAttribute mlirSparseTensorEncodingAttrGet(
 MlirContext ctx, intptr_t lvlRank,
-enum MlirSparseTensorLevelType const *lvlTypes, MlirAffineMap dimToLvl,
+level_type const *lvlTypes, MlirAffineMap dimToLvl,
 MlirAffineMap lvlTodim, int posWidth, int crdWidth);
 
 /// Returns the level-rank of the `sparse_tensor.encoding` attribute.
@@ -61,7 +63,7 @@ MLIR_CAPI_EXPORTED intptr_t
 mlirSparseTensorEncodingGetLvlRank(MlirAttribute attr);
 
 /// Returns a specified level-type of the `sparse_tensor.encoding` attribute.
-MLIR_CAPI_EXPORTED enum MlirSparseTensorLevelType
+MLIR_CAPI_EXPORTED level_type
 mlirSparseTensorEncodingAttrGetLvlType(MlirAttribute attr, intptr_t lvl);
 
 /// Returns the dimension-to-level mapping of the `sparse_tensor.encoding`
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index ac91bfa5ae622..1f662e2042304 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -165,7 +165,7 @@ enum class Action : uint32_t {
 /// where we need to store an undefined or indeterminate `LevelType`.
 /// It should not be used externally, since it does not indicate an
 /// actual/representable format.
-enum class LevelType : uint8_t {
+enum class LevelType : uint64_t {
   Undef = 0,// 0b0_00
   Dense = 4,// 0b1_00
   Compressed = 8,   // 0b00010_00
@@ -184,7 +184,7 @@ enum class LevelType : uint8_t {
 };
 
 /// This enum defines all supported storage format without the level 
properties.
-enum class LevelFormat : uint8_t {
+enum class LevelFormat : uint64_t {
   Dense = 4,// 0b1_00
   Compressed = 8,   // 0b00010_00
   Singleton = 16,   // 0b00100_00
@@ -193,7 +193,7 @@ enum class LevelFormat : uint8_t {
 };
 
 /// This enum defines all the nondefault properties for storage formats.
-enum class LevelPropertyNondefault : uint8_t {
+enum class LevelPropertyNondefault : uint64_t {
   Nonunique = 1,  // 0b0_01
   Nonordered = 2, // 0b0_10
 };
@@ -237,8 +237,8 @@ constexpr const char *toMLIRString(LevelType lt) {
 
 /// Check that the `LevelType` contains a valid (possibly undefined) value.
 constexpr bool isValidLT(LevelType lt) {
-  const uint8_t formatBits = static_cast(lt) >> 2;
-  const uint8_t propertyBits = static_cast(lt) & 3;
+  const uint64_t formatBits = static_cast(lt) >> 2;
+  const uint64_t propertyBits = static_cast(lt) & 3;
   // If undefined or dense, then must be unique and ordered.
   // Otherwise, the format must be one of the known ones.
   return (formatBits <= 1 || formatBits == 16)
@@ -251,32 +251,32 @@ constexpr bool isUndefLT(LevelType lt) { return lt == 
LevelType::Undef; }
 
 /// Check if the `LevelType` is dense (regardless of properties).
 constexpr bool isDenseLT(LevelType lt) {
-  return (static_cast(lt) & ~3) ==
- static_cast(LevelType::Dense);
+  return (static_cast(lt) & ~3) ==
+ static_cast(LevelType::Dense);
 }
 
 /// Check if the `LevelType` is compressed (regardless of properties).
 constexpr bool isCompressedLT(LevelType lt) {
-  return (static_cast(lt) & ~3) ==
- static_cast(LevelType::Compressed);
+  return 

[libc] [compiler-rt] [mlir] [clang] [libcxx] [flang] [libcxxabi] [lld] [lldb] [llvm] [mlir][sparse] Expand LevelType to 64 bits and implement n out of m (PR #79935)

2024-01-30 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/79935

>From fa5210448dea1f88d8e0a242543ad1be655087e0 Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Tue, 30 Jan 2024 01:01:52 +
Subject: [PATCH 1/3] [mlir][sparse] Expand LevelType to 64 bit and implement n
 out of m

---
 mlir/include/mlir-c/Dialect/SparseTensor.h|  28 +--
 .../mlir/Dialect/SparseTensor/IR/Enums.h  | 225 +++---
 .../SparseTensor/IR/SparseTensorAttrDefs.td   |   4 +-
 .../SparseTensor/IR/SparseTensorType.h|   2 +-
 .../mlir/Dialect/SparseTensor/Utils/Merger.h  |   2 +-
 .../ExecutionEngine/SparseTensor/Storage.h|  14 +-
 .../Bindings/Python/DialectSparseTensor.cpp   |   2 +-
 mlir/lib/CAPI/Dialect/SparseTensor.cpp|  49 ++--
 .../IR/Detail/DimLvlMapParser.cpp |   2 +
 .../SparseTensor/IR/Detail/LvlTypeParser.cpp  |  55 -
 .../SparseTensor/IR/Detail/LvlTypeParser.h|   6 +-
 .../Transforms/SparseGPUCodegen.cpp   |   2 +-
 .../Transforms/SparseTensorCodegen.cpp|   6 +-
 .../Transforms/Sparsification.cpp |   2 +-
 .../Transforms/Utils/CodegenUtils.h   |   2 +-
 .../Transforms/Utils/SparseTensorLevel.cpp|   2 +-
 .../lib/Dialect/SparseTensor/Utils/Merger.cpp |   4 +-
 .../ExecutionEngine/SparseTensor/Storage.cpp  |   2 +-
 mlir/test/CAPI/sparse_tensor.c|   6 +-
 .../SparseTensor/GPU/gpu_matmul24_lib.mlir|   2 +-
 .../test/Dialect/SparseTensor/conversion.mlir |  16 +-
 .../SparseTensor/roundtrip_encoding.mlir  |  12 +-
 .../SparseTensor/sparse_fill_zero.mlir|  12 +-
 .../SparseTensor/CPU/sparse_block_matmul.mlir |   2 +-
 .../Dialect/SparseTensor/CPU/sparse_ds.mlir   |   2 +-
 .../CUDA/sm80-lt/sparse-matmul-2-4-lib.mlir   |   2 +-
 .../CUDA/sm80-lt/sparse-matmul-2-4-prune.mlir |   2 +-
 .../python/dialects/sparse_tensor/dialect.py  | 148 ++--
 28 files changed, 358 insertions(+), 255 deletions(-)

diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h 
b/mlir/include/mlir-c/Dialect/SparseTensor.h
index 41d024db04964..5fc1f51452482 100644
--- a/mlir/include/mlir-c/Dialect/SparseTensor.h
+++ b/mlir/include/mlir-c/Dialect/SparseTensor.h
@@ -26,20 +26,20 @@ MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(SparseTensor, 
sparse_tensor);
 /// If updating, keep them in sync and update the static_assert in the impl
 /// file.
 enum MlirSparseTensorLevelType {
-  MLIR_SPARSE_TENSOR_LEVEL_DENSE = 4,   // 0b1_00
-  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED = 8,  // 0b00010_00
-  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU = 9,   // 0b00010_01
-  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO = 10,  // 0b00010_10
-  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO = 11,   // 0b00010_11
-  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON = 16,  // 0b00100_00
-  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU = 17,   // 0b00100_01
-  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO = 18,   // 0b00100_10
-  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO = 19,// 0b00100_11
-  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED = 32,   // 0b01000_00
-  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU = 33,// 0b01000_01
-  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO = 34,// 0b01000_10
-  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO = 35, // 0b01000_11
-  MLIR_SPARSE_TENSOR_LEVEL_TWO_OUT_OF_FOUR = 64,// 0b1_00
+  MLIR_SPARSE_TENSOR_LEVEL_DENSE = 65536,   // 
0x00_00_0001_
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED = 131072, // 
0x00_00_0002_
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU = 131073,  // 
0x00_00_0002_0001
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO = 131074,  // 
0x00_00_0002_0002
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO = 131075,   // 
0x00_00_0002_0003
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON = 262144,  // 
0x00_00_0004_
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU = 262145,   // 
0x00_00_0004_0001
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO = 262146,   // 
0x00_00_0004_0002
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO = 262147,// 
0x00_00_0004_0003
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED = 524288,   // 
0x00_00_0008_
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU = 524289,// 
0x00_00_0008_0001
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO = 524290,// 
0x00_00_0008_0002
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO = 524291, // 
0x00_00_0008_0003
+  MLIR_SPARSE_TENSOR_LEVEL_N_OUT_OF_M = 1048576,// 
0x00_00_0010_
 };
 
 
//===--===//
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index ac91bfa5ae622..6ddc9326179fe 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -154,9 +154,10 @@ enum class Action : uint32_t {
 
 /// 

[compiler-rt] [mlir] [lld] [clang] [libcxx] [libc] [lldb] [llvm] [flang] [mlir][sparse] Expand LevelType to 64 bits and implement n out of m (PR #79935)

2024-01-29 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/79935

>From fa5210448dea1f88d8e0a242543ad1be655087e0 Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Tue, 30 Jan 2024 01:01:52 +
Subject: [PATCH 1/3] [mlir][sparse] Expand LevelType to 64 bit and implement n
 out of m

---
 mlir/include/mlir-c/Dialect/SparseTensor.h|  28 +--
 .../mlir/Dialect/SparseTensor/IR/Enums.h  | 225 +++---
 .../SparseTensor/IR/SparseTensorAttrDefs.td   |   4 +-
 .../SparseTensor/IR/SparseTensorType.h|   2 +-
 .../mlir/Dialect/SparseTensor/Utils/Merger.h  |   2 +-
 .../ExecutionEngine/SparseTensor/Storage.h|  14 +-
 .../Bindings/Python/DialectSparseTensor.cpp   |   2 +-
 mlir/lib/CAPI/Dialect/SparseTensor.cpp|  49 ++--
 .../IR/Detail/DimLvlMapParser.cpp |   2 +
 .../SparseTensor/IR/Detail/LvlTypeParser.cpp  |  55 -
 .../SparseTensor/IR/Detail/LvlTypeParser.h|   6 +-
 .../Transforms/SparseGPUCodegen.cpp   |   2 +-
 .../Transforms/SparseTensorCodegen.cpp|   6 +-
 .../Transforms/Sparsification.cpp |   2 +-
 .../Transforms/Utils/CodegenUtils.h   |   2 +-
 .../Transforms/Utils/SparseTensorLevel.cpp|   2 +-
 .../lib/Dialect/SparseTensor/Utils/Merger.cpp |   4 +-
 .../ExecutionEngine/SparseTensor/Storage.cpp  |   2 +-
 mlir/test/CAPI/sparse_tensor.c|   6 +-
 .../SparseTensor/GPU/gpu_matmul24_lib.mlir|   2 +-
 .../test/Dialect/SparseTensor/conversion.mlir |  16 +-
 .../SparseTensor/roundtrip_encoding.mlir  |  12 +-
 .../SparseTensor/sparse_fill_zero.mlir|  12 +-
 .../SparseTensor/CPU/sparse_block_matmul.mlir |   2 +-
 .../Dialect/SparseTensor/CPU/sparse_ds.mlir   |   2 +-
 .../CUDA/sm80-lt/sparse-matmul-2-4-lib.mlir   |   2 +-
 .../CUDA/sm80-lt/sparse-matmul-2-4-prune.mlir |   2 +-
 .../python/dialects/sparse_tensor/dialect.py  | 148 ++--
 28 files changed, 358 insertions(+), 255 deletions(-)

diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h 
b/mlir/include/mlir-c/Dialect/SparseTensor.h
index 41d024db04964..5fc1f51452482 100644
--- a/mlir/include/mlir-c/Dialect/SparseTensor.h
+++ b/mlir/include/mlir-c/Dialect/SparseTensor.h
@@ -26,20 +26,20 @@ MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(SparseTensor, 
sparse_tensor);
 /// If updating, keep them in sync and update the static_assert in the impl
 /// file.
 enum MlirSparseTensorLevelType {
-  MLIR_SPARSE_TENSOR_LEVEL_DENSE = 4,   // 0b1_00
-  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED = 8,  // 0b00010_00
-  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU = 9,   // 0b00010_01
-  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO = 10,  // 0b00010_10
-  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO = 11,   // 0b00010_11
-  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON = 16,  // 0b00100_00
-  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU = 17,   // 0b00100_01
-  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO = 18,   // 0b00100_10
-  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO = 19,// 0b00100_11
-  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED = 32,   // 0b01000_00
-  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU = 33,// 0b01000_01
-  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO = 34,// 0b01000_10
-  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO = 35, // 0b01000_11
-  MLIR_SPARSE_TENSOR_LEVEL_TWO_OUT_OF_FOUR = 64,// 0b1_00
+  MLIR_SPARSE_TENSOR_LEVEL_DENSE = 65536,   // 
0x00_00_0001_
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED = 131072, // 
0x00_00_0002_
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU = 131073,  // 
0x00_00_0002_0001
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO = 131074,  // 
0x00_00_0002_0002
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO = 131075,   // 
0x00_00_0002_0003
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON = 262144,  // 
0x00_00_0004_
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU = 262145,   // 
0x00_00_0004_0001
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO = 262146,   // 
0x00_00_0004_0002
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO = 262147,// 
0x00_00_0004_0003
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED = 524288,   // 
0x00_00_0008_
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU = 524289,// 
0x00_00_0008_0001
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO = 524290,// 
0x00_00_0008_0002
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO = 524291, // 
0x00_00_0008_0003
+  MLIR_SPARSE_TENSOR_LEVEL_N_OUT_OF_M = 1048576,// 
0x00_00_0010_
 };
 
 
//===--===//
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index ac91bfa5ae622..6ddc9326179fe 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -154,9 +154,10 @@ enum class Action : uint32_t {
 
 /// 

[clang] [mlir] [llvm] [mlir][verifyMemref] Fix bug and support more types for verifyMemref (PR #77682)

2024-01-10 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/77682

>From abb4cfff1b74a34f074e3feccb5c03598ba48e64 Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Wed, 10 Jan 2024 20:00:32 +
Subject: [PATCH] [mlir][verifyMemref] Fix bug and support more types for
 verifyMemref in RunnerUtils

1. Fix a bug in verifyMemref to pass in data instead of bareptr, which didn't 
verify data correctly.
2. Add == for f16 and bf16.
3. Add a comprehensive tests of verifyMemref for all supported types.
---
 .../mlir/ExecutionEngine/Float16bits.h|   3 +
 .../mlir/ExecutionEngine/RunnerUtils.h|  12 +-
 mlir/lib/ExecutionEngine/Float16bits.cpp  |   8 +
 mlir/lib/ExecutionEngine/RunnerUtils.cpp  |  30 
 .../Dialect/Memref/verify-memref.mlir | 167 ++
 5 files changed, 219 insertions(+), 1 deletion(-)
 create mode 100644 mlir/test/Integration/Dialect/Memref/verify-memref.mlir

diff --git a/mlir/include/mlir/ExecutionEngine/Float16bits.h 
b/mlir/include/mlir/ExecutionEngine/Float16bits.h
index 5eb1f2ce07639d..ad409841b2a927 100644
--- a/mlir/include/mlir/ExecutionEngine/Float16bits.h
+++ b/mlir/include/mlir/ExecutionEngine/Float16bits.h
@@ -48,6 +48,9 @@ MLIR_FLOAT16_EXPORT std::ostream <<(std::ostream 
, const f16 );
 // Outputs a bfloat value.
 MLIR_FLOAT16_EXPORT std::ostream <<(std::ostream , const bf16 );
 
+MLIR_FLOAT16_EXPORT bool operator==(const f16 , const f16 );
+MLIR_FLOAT16_EXPORT bool operator==(const bf16 , const bf16 );
+
 extern "C" MLIR_FLOAT16_EXPORT void printF16(uint16_t bits);
 extern "C" MLIR_FLOAT16_EXPORT void printBF16(uint16_t bits);
 
diff --git a/mlir/include/mlir/ExecutionEngine/RunnerUtils.h 
b/mlir/include/mlir/ExecutionEngine/RunnerUtils.h
index ebf95f90f37450..965dff83816dbf 100644
--- a/mlir/include/mlir/ExecutionEngine/RunnerUtils.h
+++ b/mlir/include/mlir/ExecutionEngine/RunnerUtils.h
@@ -332,7 +332,7 @@ int64_t verifyMemRef(const DynamicMemRefType ,
   // Return the number of errors.
   int64_t printCounter = 0;
   return MemRefDataVerifier::verify(
-  std::cerr, actual.basePtr, expected.basePtr, actual.rank, actual.offset,
+  std::cerr, actual.data, expected.data, actual.rank, actual.offset,
   actual.sizes, actual.strides, printCounter);
 }
 
@@ -429,8 +429,18 @@ 
_mlir_ciface_printMemref1dC64(StridedMemRefType *m);
 extern "C" MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefVector4x4xf32(
 StridedMemRefType, 2> *m);
 
+extern "C" MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefI8(
+UnrankedMemRefType *actual, UnrankedMemRefType *expected);
+extern "C" MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefI16(
+UnrankedMemRefType *actual, UnrankedMemRefType 
*expected);
 extern "C" MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefI32(
 UnrankedMemRefType *actual, UnrankedMemRefType 
*expected);
+extern "C" MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefI64(
+UnrankedMemRefType *actual, UnrankedMemRefType 
*expected);
+extern "C" MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefBF16(
+UnrankedMemRefType *actual, UnrankedMemRefType *expected);
+extern "C" MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefF16(
+UnrankedMemRefType *actual, UnrankedMemRefType *expected);
 extern "C" MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefF32(
 UnrankedMemRefType *actual, UnrankedMemRefType *expected);
 extern "C" MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefF64(
diff --git a/mlir/lib/ExecutionEngine/Float16bits.cpp 
b/mlir/lib/ExecutionEngine/Float16bits.cpp
index 38a05fe86bbdda..2ac9c18b6821ac 100644
--- a/mlir/lib/ExecutionEngine/Float16bits.cpp
+++ b/mlir/lib/ExecutionEngine/Float16bits.cpp
@@ -150,6 +150,14 @@ std::ostream <<(std::ostream , const bf16 ) {
   return os;
 }
 
+bool operator==(const f16 , const f16 ) {
+  return f1.bits == f2.bits;
+}
+
+bool operator==(const bf16 , const bf16 ) {
+  return f1.bits == f2.bits;
+}
+
 // Mark these symbols as weak so they don't conflict when compiler-rt also
 // defines them.
 #define ATTR_WEAK
diff --git a/mlir/lib/ExecutionEngine/RunnerUtils.cpp 
b/mlir/lib/ExecutionEngine/RunnerUtils.cpp
index 378aa7ce35ef16..d93ec2b7af0c0d 100644
--- a/mlir/lib/ExecutionEngine/RunnerUtils.cpp
+++ b/mlir/lib/ExecutionEngine/RunnerUtils.cpp
@@ -219,12 +219,42 @@ 
_mlir_ciface_printMemref1dC64(StridedMemRefType *M) {
   impl::printMemRef(*M);
 }
 
+extern "C" int64_t
+_mlir_ciface_verifyMemRefI8(UnrankedMemRefType *actual,
+ UnrankedMemRefType *expected) {
+  return impl::verifyMemRef(*actual, *expected);
+}
+
+extern "C" int64_t
+_mlir_ciface_verifyMemRefI16(UnrankedMemRefType *actual,
+ UnrankedMemRefType *expected) {
+  return impl::verifyMemRef(*actual, *expected);
+}
+
 extern "C" int64_t
 _mlir_ciface_verifyMemRefI32(UnrankedMemRefType *actual,
  UnrankedMemRefType *expected) {
   

[libclc] [libcxxabi] [compiler-rt] [mlir] [flang] [clang] [lld] [clang-tools-extra] [llvm] [libcxx] [libc] [libunwind] [lldb] [mlir][sparse][CRunnerUtils] Add shuffle in CRunnerUtils (PR #77124)

2024-01-09 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/77124

>From 1c774e6c6ae3c5c7be9291677651d20c8979c7f5 Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Fri, 5 Jan 2024 01:17:39 +
Subject: [PATCH 1/6] [mlir][sparse][CRunnerUtils] Add shuffle and shuffleFree
 in CRunnerUtils to generate unique and random numbers

It's helpful for generating tensor with specified sparsity level.
---
 .../mlir/ExecutionEngine/CRunnerUtils.h   |   4 +
 mlir/lib/ExecutionEngine/CRunnerUtils.cpp |  16 +++
 .../SparseTensor/CPU/sparse_generate.mlir | 108 ++
 3 files changed, 128 insertions(+)
 create mode 100644 
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir

diff --git a/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h 
b/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
index 76b04145b482e4..747e5ca40ca6f6 100644
--- a/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
+++ b/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
@@ -486,6 +486,10 @@ extern "C" MLIR_CRUNNERUTILS_EXPORT void *rtsrand(uint64_t 
s);
 extern "C" MLIR_CRUNNERUTILS_EXPORT uint64_t rtrand(void *, uint64_t m);
 // Deletes the random number generator.
 extern "C" MLIR_CRUNNERUTILS_EXPORT void rtdrand(void *);
+// Returns a pointer to an array of random numbers in the range of [0, s).
+extern "C" MLIR_CRUNNERUTILS_EXPORT void *shuffle(uint64_t s, void *g);
+// Deletes the array of random numbers.
+extern "C" MLIR_CRUNNERUTILS_EXPORT void shuffleFree(void *a);
 
 
//===--===//
 // Runtime support library to allow the use of std::sort in MLIR program.
diff --git a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp 
b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
index e28e75eb110303..3a3261d1ad4e03 100644
--- a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
+++ b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
@@ -160,6 +160,22 @@ extern "C" void mlirAlignedFree(void *ptr) {
 #endif
 }
 
+/// Generates an array with unique and random numbers from 0 to s-1.
+extern "C" void *shuffle(uint64_t s, void *g) {
+  std::mt19937 *generator = static_cast(g);
+  uint64_t *output = new uint64_t[s];
+  std::vector arr(s);
+  std::iota(arr.begin(), arr.end(), 0);
+  std::shuffle(arr.begin(), arr.end(), *generator);
+  std::copy(arr.begin(), arr.end(), output);
+  return output;
+}
+
+extern "C" void shuffleFree(void *a) {
+  uint64_t *arr = static_cast(a);
+  delete[] arr;
+}
+
 extern "C" void *rtsrand(uint64_t s) {
   // Standard mersenne_twister_engine seeded with s.
   return new std::mt19937(s);
diff --git 
a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir 
b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir
new file mode 100644
index 00..250993d874b370
--- /dev/null
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir
@@ -0,0 +1,108 @@
+//--
+// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
+//
+// Set-up that's shared across all tests in this directory. In principle, this
+// config could be moved to lit.local.cfg. However, there are downstream users 
that
+//  do not use these LIT config files. Hence why this is kept inline.
+//
+// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
+// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
+// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
+// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
+// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
+// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
+// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" 
%{run_opts} %{run_libs}
+//
+// DEFINE: %{env} =
+//--
+
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation.
+// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation and vectorization.
+// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false vl=2 
reassociate-fp-reductions=true enable-index-optimizations=true
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation and VLA vectorization.
+// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
+
+//
+// Integration test that generates a tensor with specified sparsity level.
+//
+
+!Generator = !llvm.ptr
+!Array = !llvm.ptr
+
+#SparseVector = #sparse_tensor.encoding<{
+  map = (d0) -> (d0 : compressed)
+}>
+
+module {
+  func.func private @rtsrand(index) -> (!Generator)
+  func.func private 

[clang] [libcxx] [llvm] [mlir] [mlir][sparse][CRunnerUtils] Add shuffle in CRunnerUtils (PR #77124)

2024-01-05 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li ready_for_review 
https://github.com/llvm/llvm-project/pull/77124
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[clang] [libcxx] [llvm] [mlir] [mlir][sparse][CRunnerUtils] Add shuffle in CRunnerUtils (PR #77124)

2024-01-05 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/77124

>From 9baa732a2ab5d26b975efe9ed093d8e56ff6129c Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Fri, 5 Jan 2024 01:17:39 +
Subject: [PATCH 1/2] [mlir][sparse][CRunnerUtils] Add shuffle and shuffleFree
 in CRunnerUtils to generate unique and random numbers

It's helpful for generating tensor with specified sparsity level.
---
 .../mlir/ExecutionEngine/CRunnerUtils.h   |   4 +
 mlir/lib/ExecutionEngine/CRunnerUtils.cpp |  16 +++
 .../SparseTensor/CPU/sparse_generate.mlir | 108 ++
 3 files changed, 128 insertions(+)
 create mode 100644 
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir

diff --git a/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h 
b/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
index 76b04145b482e4..747e5ca40ca6f6 100644
--- a/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
+++ b/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
@@ -486,6 +486,10 @@ extern "C" MLIR_CRUNNERUTILS_EXPORT void *rtsrand(uint64_t 
s);
 extern "C" MLIR_CRUNNERUTILS_EXPORT uint64_t rtrand(void *, uint64_t m);
 // Deletes the random number generator.
 extern "C" MLIR_CRUNNERUTILS_EXPORT void rtdrand(void *);
+// Returns a pointer to an array of random numbers in the range of [0, s).
+extern "C" MLIR_CRUNNERUTILS_EXPORT void *shuffle(uint64_t s, void *g);
+// Deletes the array of random numbers.
+extern "C" MLIR_CRUNNERUTILS_EXPORT void shuffleFree(void *a);
 
 
//===--===//
 // Runtime support library to allow the use of std::sort in MLIR program.
diff --git a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp 
b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
index e28e75eb110303..3a3261d1ad4e03 100644
--- a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
+++ b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
@@ -160,6 +160,22 @@ extern "C" void mlirAlignedFree(void *ptr) {
 #endif
 }
 
+/// Generates an array with unique and random numbers from 0 to s-1.
+extern "C" void *shuffle(uint64_t s, void *g) {
+  std::mt19937 *generator = static_cast(g);
+  uint64_t *output = new uint64_t[s];
+  std::vector arr(s);
+  std::iota(arr.begin(), arr.end(), 0);
+  std::shuffle(arr.begin(), arr.end(), *generator);
+  std::copy(arr.begin(), arr.end(), output);
+  return output;
+}
+
+extern "C" void shuffleFree(void *a) {
+  uint64_t *arr = static_cast(a);
+  delete[] arr;
+}
+
 extern "C" void *rtsrand(uint64_t s) {
   // Standard mersenne_twister_engine seeded with s.
   return new std::mt19937(s);
diff --git 
a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir 
b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir
new file mode 100644
index 00..250993d874b370
--- /dev/null
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir
@@ -0,0 +1,108 @@
+//--
+// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
+//
+// Set-up that's shared across all tests in this directory. In principle, this
+// config could be moved to lit.local.cfg. However, there are downstream users 
that
+//  do not use these LIT config files. Hence why this is kept inline.
+//
+// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
+// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
+// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
+// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
+// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
+// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
+// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" 
%{run_opts} %{run_libs}
+//
+// DEFINE: %{env} =
+//--
+
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation.
+// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation and vectorization.
+// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false vl=2 
reassociate-fp-reductions=true enable-index-optimizations=true
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation and VLA vectorization.
+// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
+
+//
+// Integration test that generates a tensor with specified sparsity level.
+//
+
+!Generator = !llvm.ptr
+!Array = !llvm.ptr
+
+#SparseVector = #sparse_tensor.encoding<{
+  map = (d0) -> (d0 : compressed)
+}>
+
+module {
+  func.func private @rtsrand(index) -> (!Generator)
+  func.func private 

[clang] [libcxx] [llvm] [mlir] [mlir][sparse][CRunnerUtils] Add shuffle in CRunnerUtils (PR #77124)

2024-01-05 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/77124

>From 9baa732a2ab5d26b975efe9ed093d8e56ff6129c Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Fri, 5 Jan 2024 01:17:39 +
Subject: [PATCH 1/2] [mlir][sparse][CRunnerUtils] Add shuffle and shuffleFree
 in CRunnerUtils to generate unique and random numbers

It's helpful for generating tensor with specified sparsity level.
---
 .../mlir/ExecutionEngine/CRunnerUtils.h   |   4 +
 mlir/lib/ExecutionEngine/CRunnerUtils.cpp |  16 +++
 .../SparseTensor/CPU/sparse_generate.mlir | 108 ++
 3 files changed, 128 insertions(+)
 create mode 100644 
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir

diff --git a/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h 
b/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
index 76b04145b482e4..747e5ca40ca6f6 100644
--- a/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
+++ b/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
@@ -486,6 +486,10 @@ extern "C" MLIR_CRUNNERUTILS_EXPORT void *rtsrand(uint64_t 
s);
 extern "C" MLIR_CRUNNERUTILS_EXPORT uint64_t rtrand(void *, uint64_t m);
 // Deletes the random number generator.
 extern "C" MLIR_CRUNNERUTILS_EXPORT void rtdrand(void *);
+// Returns a pointer to an array of random numbers in the range of [0, s).
+extern "C" MLIR_CRUNNERUTILS_EXPORT void *shuffle(uint64_t s, void *g);
+// Deletes the array of random numbers.
+extern "C" MLIR_CRUNNERUTILS_EXPORT void shuffleFree(void *a);
 
 
//===--===//
 // Runtime support library to allow the use of std::sort in MLIR program.
diff --git a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp 
b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
index e28e75eb110303..3a3261d1ad4e03 100644
--- a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
+++ b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
@@ -160,6 +160,22 @@ extern "C" void mlirAlignedFree(void *ptr) {
 #endif
 }
 
+/// Generates an array with unique and random numbers from 0 to s-1.
+extern "C" void *shuffle(uint64_t s, void *g) {
+  std::mt19937 *generator = static_cast(g);
+  uint64_t *output = new uint64_t[s];
+  std::vector arr(s);
+  std::iota(arr.begin(), arr.end(), 0);
+  std::shuffle(arr.begin(), arr.end(), *generator);
+  std::copy(arr.begin(), arr.end(), output);
+  return output;
+}
+
+extern "C" void shuffleFree(void *a) {
+  uint64_t *arr = static_cast(a);
+  delete[] arr;
+}
+
 extern "C" void *rtsrand(uint64_t s) {
   // Standard mersenne_twister_engine seeded with s.
   return new std::mt19937(s);
diff --git 
a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir 
b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir
new file mode 100644
index 00..250993d874b370
--- /dev/null
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir
@@ -0,0 +1,108 @@
+//--
+// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
+//
+// Set-up that's shared across all tests in this directory. In principle, this
+// config could be moved to lit.local.cfg. However, there are downstream users 
that
+//  do not use these LIT config files. Hence why this is kept inline.
+//
+// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
+// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
+// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
+// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
+// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
+// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
+// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" 
%{run_opts} %{run_libs}
+//
+// DEFINE: %{env} =
+//--
+
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation.
+// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation and vectorization.
+// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false vl=2 
reassociate-fp-reductions=true enable-index-optimizations=true
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation and VLA vectorization.
+// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
+
+//
+// Integration test that generates a tensor with specified sparsity level.
+//
+
+!Generator = !llvm.ptr
+!Array = !llvm.ptr
+
+#SparseVector = #sparse_tensor.encoding<{
+  map = (d0) -> (d0 : compressed)
+}>
+
+module {
+  func.func private @rtsrand(index) -> (!Generator)
+  func.func private 

[clang] [libcxx] [llvm] [mlir] [mlir][sparse][CRunnerUtils] Add shuffle in CRunnerUtils (PR #77124)

2024-01-05 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/77124

>From 9baa732a2ab5d26b975efe9ed093d8e56ff6129c Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Fri, 5 Jan 2024 01:17:39 +
Subject: [PATCH] [mlir][sparse][CRunnerUtils] Add shuffle and shuffleFree in
 CRunnerUtils to generate unique and random numbers

It's helpful for generating tensor with specified sparsity level.
---
 .../mlir/ExecutionEngine/CRunnerUtils.h   |   4 +
 mlir/lib/ExecutionEngine/CRunnerUtils.cpp |  16 +++
 .../SparseTensor/CPU/sparse_generate.mlir | 108 ++
 3 files changed, 128 insertions(+)
 create mode 100644 
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir

diff --git a/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h 
b/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
index 76b04145b482e4..747e5ca40ca6f6 100644
--- a/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
+++ b/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
@@ -486,6 +486,10 @@ extern "C" MLIR_CRUNNERUTILS_EXPORT void *rtsrand(uint64_t 
s);
 extern "C" MLIR_CRUNNERUTILS_EXPORT uint64_t rtrand(void *, uint64_t m);
 // Deletes the random number generator.
 extern "C" MLIR_CRUNNERUTILS_EXPORT void rtdrand(void *);
+// Returns a pointer to an array of random numbers in the range of [0, s).
+extern "C" MLIR_CRUNNERUTILS_EXPORT void *shuffle(uint64_t s, void *g);
+// Deletes the array of random numbers.
+extern "C" MLIR_CRUNNERUTILS_EXPORT void shuffleFree(void *a);
 
 
//===--===//
 // Runtime support library to allow the use of std::sort in MLIR program.
diff --git a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp 
b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
index e28e75eb110303..3a3261d1ad4e03 100644
--- a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
+++ b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
@@ -160,6 +160,22 @@ extern "C" void mlirAlignedFree(void *ptr) {
 #endif
 }
 
+/// Generates an array with unique and random numbers from 0 to s-1.
+extern "C" void *shuffle(uint64_t s, void *g) {
+  std::mt19937 *generator = static_cast(g);
+  uint64_t *output = new uint64_t[s];
+  std::vector arr(s);
+  std::iota(arr.begin(), arr.end(), 0);
+  std::shuffle(arr.begin(), arr.end(), *generator);
+  std::copy(arr.begin(), arr.end(), output);
+  return output;
+}
+
+extern "C" void shuffleFree(void *a) {
+  uint64_t *arr = static_cast(a);
+  delete[] arr;
+}
+
 extern "C" void *rtsrand(uint64_t s) {
   // Standard mersenne_twister_engine seeded with s.
   return new std::mt19937(s);
diff --git 
a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir 
b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir
new file mode 100644
index 00..250993d874b370
--- /dev/null
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir
@@ -0,0 +1,108 @@
+//--
+// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
+//
+// Set-up that's shared across all tests in this directory. In principle, this
+// config could be moved to lit.local.cfg. However, there are downstream users 
that
+//  do not use these LIT config files. Hence why this is kept inline.
+//
+// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
+// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
+// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
+// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
+// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
+// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
+// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" 
%{run_opts} %{run_libs}
+//
+// DEFINE: %{env} =
+//--
+
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation.
+// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation and vectorization.
+// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false vl=2 
reassociate-fp-reductions=true enable-index-optimizations=true
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation and VLA vectorization.
+// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
+
+//
+// Integration test that generates a tensor with specified sparsity level.
+//
+
+!Generator = !llvm.ptr
+!Array = !llvm.ptr
+
+#SparseVector = #sparse_tensor.encoding<{
+  map = (d0) -> (d0 : compressed)
+}>
+
+module {
+  func.func private @rtsrand(index) -> (!Generator)
+  func.func private 

[clang] [mlir][sparse] introduce MapRef, unify conversion/codegen for reader (PR #68360)

2023-10-06 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li approved this pull request.


https://github.com/llvm/llvm-project/pull/68360
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[clang] [mlir][sparse] introduce MapRef, unify conversion/codegen for reader (PR #68360)

2023-10-06 Thread Yinying Li via cfe-commits


@@ -729,3 +729,92 @@ Value sparse_tensor::createOrFoldSliceStrideOp(OpBuilder 
, Location loc,
 return constantIndex(builder, loc, *stride);
   return builder.create(loc, tensor, APInt(64, dim));
 }
+
+void sparse_tensor::fillDimShape(OpBuilder , Location loc,
+ SparseTensorType stt,
+ SmallVectorImpl ) {
+  out.clear();
+  out.reserve(stt.getDimRank());
+  for (const DynSize sh : stt.getDimShape()) {
+const auto s = ShapedType::isDynamic(sh) ? 0 : sh;
+out.push_back(constantIndex(builder, loc, s));
+  }
+}
+
+Value sparse_tensor::genReader(OpBuilder , Location loc,
+   SparseTensorType stt, Value tensor,
+   /*out*/ SmallVectorImpl ,
+   /*out*/ Value ) {
+  // Construct the dimShapes buffer. The buffer contains the static size
+  // per dimension, or otherwise a zero for a dynamic size.
+  fillDimShape(builder, loc, stt, dimShapesValues);
+  Value dimShapesBuffer = allocaBuffer(builder, loc, dimShapesValues);
+  // Create the `CheckedSparseTensorReader`. This reader performs a
+  // consistency check on the static sizes, but accepts any size
+  // of each dimension with a dynamic size.
+  Type opaqueTp = getOpaquePointerType(builder);
+  Type eltTp = stt.getElementType();
+  Value valTp = constantPrimaryTypeEncoding(builder, loc, eltTp);
+  Value reader =
+  createFuncCall(builder, loc, "createCheckedSparseTensorReader", opaqueTp,
+ {tensor, dimShapesBuffer, valTp}, EmitCInterface::On)
+  .getResult(0);
+  // For static shapes, the shape buffer can be used right away. For dynamic
+  // shapes, use the information from the reader to construct a buffer that
+  // supplies the actual size for each dynamic dimension.
+  dimSizesBuffer = dimShapesBuffer;
+  if (stt.hasDynamicDimShape()) {
+Type indexTp = builder.getIndexType();
+auto memTp = MemRefType::get({ShapedType::kDynamic}, indexTp);
+dimSizesBuffer =
+createFuncCall(builder, loc, "getSparseTensorReaderDimSizes", memTp,
+   reader, EmitCInterface::On)
+.getResult(0);
+  }
+  return reader;
+}
+
+Value sparse_tensor::genReaderBuffers(OpBuilder , Location loc,
+  SparseTensorType stt,
+  ArrayRef dimShapesValues,
+  Value dimSizesBuffer,
+  /*out*/ Value ,
+  /*out*/ Value ) {
+  const Dimension dimRank = stt.getDimRank();
+  const Level lvlRank = stt.getLvlRank();
+  // For an identify mapping, the dim2lvl and lvl2dim mappings are

yinying-lisa-li wrote:

identity?

https://github.com/llvm/llvm-project/pull/68360
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[clang-tools-extra] [mlir][sparse] Print new syntax (PR #68130)

2023-10-04 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/68130

>From 47b34bb327e1078678d3ba0c96ebce3fc89cf2ae Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Tue, 3 Oct 2023 16:43:50 +
Subject: [PATCH 1/5] [mlir][sparse] Print new syntax

Printing changes from #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> 
to map = (d0) -> (d0 : compressed). Level properties, ELL and slice are also 
supported.
---
 .../mlir/Dialect/SparseTensor/IR/Enums.h  |  20 +--
 .../SparseTensor/IR/SparseTensorDialect.cpp   |  64 ---
 mlir/test/Dialect/SparseTensor/codegen.mlir   |   8 +-
 .../SparseTensor/roundtrip_encoding.mlir  |  32 ++--
 .../Dialect/SparseTensor/sparse_reshape.mlir  |   8 +-
 .../SparseTensor/sparse_tensor_reshape.mlir   |   2 +-
 .../python/dialects/sparse_tensor/dialect.py  | 160 +-
 7 files changed, 159 insertions(+), 135 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index bc351ec52c0946b..2920ef79f461c6a 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -215,29 +215,29 @@ constexpr const char *toMLIRString(DimLevelType dlt) {
   case DimLevelType::Compressed:
 return "compressed";
   case DimLevelType::CompressedNu:
-return "compressed_nu";
+return "compressed(nonunique)";
   case DimLevelType::CompressedNo:
-return "compressed_no";
+return "compressed(nonordered)";
   case DimLevelType::CompressedNuNo:
-return "compressed_nu_no";
+return "compressed(nonunique, nonordered)";
   case DimLevelType::Singleton:
 return "singleton";
   case DimLevelType::SingletonNu:
-return "singleton_nu";
+return "singleton(nonunique)";
   case DimLevelType::SingletonNo:
-return "singleton_no";
+return "singleton(nonordered)";
   case DimLevelType::SingletonNuNo:
-return "singleton_nu_no";
+return "singleton(nonunique, nonordered)";
   case DimLevelType::LooseCompressed:
 return "loose_compressed";
   case DimLevelType::LooseCompressedNu:
-return "loose_compressed_nu";
+return "loose_compressed(nonunique)";
   case DimLevelType::LooseCompressedNo:
-return "loose_compressed_no";
+return "loose_compressed(nonordered)";
   case DimLevelType::LooseCompressedNuNo:
-return "loose_compressed_nu_no";
+return "loose_compressed(nonunique, nonordered)";
   case DimLevelType::TwoOutOfFour:
-return "compressed24";
+return "block2_4";
   }
   return "";
 }
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp 
b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 3897e1b9ea3597c..4c8dccdda6c0c7c 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -586,30 +586,56 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser 
, Type type) {
 }
 
 void SparseTensorEncodingAttr::print(AsmPrinter ) const {
-  // Print the struct-like storage in dictionary fashion.
-  printer << "<{ lvlTypes = [ ";
-  llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) {
-printer << "\"" << toMLIRString(dlt) << "\"";
-  });
-  printer << " ]";
+  auto map = static_cast(getDimToLvl());
+  auto lvlTypes = getLvlTypes();
+  // Empty affine map indicates identity map
+  if (!map) {
+map = AffineMap::getMultiDimIdentityMap(getLvlTypes().size(), 
getContext());
+  }
+  // Modified version of AsmPrinter::Impl::printAffineMap.
+  printer << "<{ map = ";
+  // Symbolic identifiers.
+  if (map.getNumSymbols() != 0) {
+printer << '[';
+for (unsigned i = 0; i < map.getNumSymbols() - 1; ++i)
+  printer << 's' << i << ", ";
+if (map.getNumSymbols() >= 1)
+  printer << 's' << map.getNumSymbols() - 1;
+printer << ']';
+  }
+  // Dimension identifiers.
+  printer << '(';
+  auto dimSlices = getDimSlices();
+  if (!dimSlices.empty()) {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << " : " << dimSlices[i] << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1 << " : "
+  << dimSlices[map.getNumDims() - 1];
+  } else {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1;
+  }
+  printer << ')';
+  // Level format and properties.
+  printer << " -> (";
+  for (unsigned i = 0; i < map.getNumResults() - 1; ++i) {
+map.getResult(i).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[i]) << ", ";
+  }
+  if (map.getNumResults() >= 1) {
+auto lastIndex = map.getNumResults() - 1;
+map.getResult(lastIndex).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[lastIndex]);
+  }
+  printer << ')';
   // Print remaining members only for non-default values.
-  if 

[clang] [mlir][sparse] Print new syntax (PR #68130)

2023-10-04 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/68130

>From 47b34bb327e1078678d3ba0c96ebce3fc89cf2ae Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Tue, 3 Oct 2023 16:43:50 +
Subject: [PATCH 1/5] [mlir][sparse] Print new syntax

Printing changes from #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> 
to map = (d0) -> (d0 : compressed). Level properties, ELL and slice are also 
supported.
---
 .../mlir/Dialect/SparseTensor/IR/Enums.h  |  20 +--
 .../SparseTensor/IR/SparseTensorDialect.cpp   |  64 ---
 mlir/test/Dialect/SparseTensor/codegen.mlir   |   8 +-
 .../SparseTensor/roundtrip_encoding.mlir  |  32 ++--
 .../Dialect/SparseTensor/sparse_reshape.mlir  |   8 +-
 .../SparseTensor/sparse_tensor_reshape.mlir   |   2 +-
 .../python/dialects/sparse_tensor/dialect.py  | 160 +-
 7 files changed, 159 insertions(+), 135 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index bc351ec52c0946b..2920ef79f461c6a 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -215,29 +215,29 @@ constexpr const char *toMLIRString(DimLevelType dlt) {
   case DimLevelType::Compressed:
 return "compressed";
   case DimLevelType::CompressedNu:
-return "compressed_nu";
+return "compressed(nonunique)";
   case DimLevelType::CompressedNo:
-return "compressed_no";
+return "compressed(nonordered)";
   case DimLevelType::CompressedNuNo:
-return "compressed_nu_no";
+return "compressed(nonunique, nonordered)";
   case DimLevelType::Singleton:
 return "singleton";
   case DimLevelType::SingletonNu:
-return "singleton_nu";
+return "singleton(nonunique)";
   case DimLevelType::SingletonNo:
-return "singleton_no";
+return "singleton(nonordered)";
   case DimLevelType::SingletonNuNo:
-return "singleton_nu_no";
+return "singleton(nonunique, nonordered)";
   case DimLevelType::LooseCompressed:
 return "loose_compressed";
   case DimLevelType::LooseCompressedNu:
-return "loose_compressed_nu";
+return "loose_compressed(nonunique)";
   case DimLevelType::LooseCompressedNo:
-return "loose_compressed_no";
+return "loose_compressed(nonordered)";
   case DimLevelType::LooseCompressedNuNo:
-return "loose_compressed_nu_no";
+return "loose_compressed(nonunique, nonordered)";
   case DimLevelType::TwoOutOfFour:
-return "compressed24";
+return "block2_4";
   }
   return "";
 }
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp 
b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 3897e1b9ea3597c..4c8dccdda6c0c7c 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -586,30 +586,56 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser 
, Type type) {
 }
 
 void SparseTensorEncodingAttr::print(AsmPrinter ) const {
-  // Print the struct-like storage in dictionary fashion.
-  printer << "<{ lvlTypes = [ ";
-  llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) {
-printer << "\"" << toMLIRString(dlt) << "\"";
-  });
-  printer << " ]";
+  auto map = static_cast(getDimToLvl());
+  auto lvlTypes = getLvlTypes();
+  // Empty affine map indicates identity map
+  if (!map) {
+map = AffineMap::getMultiDimIdentityMap(getLvlTypes().size(), 
getContext());
+  }
+  // Modified version of AsmPrinter::Impl::printAffineMap.
+  printer << "<{ map = ";
+  // Symbolic identifiers.
+  if (map.getNumSymbols() != 0) {
+printer << '[';
+for (unsigned i = 0; i < map.getNumSymbols() - 1; ++i)
+  printer << 's' << i << ", ";
+if (map.getNumSymbols() >= 1)
+  printer << 's' << map.getNumSymbols() - 1;
+printer << ']';
+  }
+  // Dimension identifiers.
+  printer << '(';
+  auto dimSlices = getDimSlices();
+  if (!dimSlices.empty()) {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << " : " << dimSlices[i] << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1 << " : "
+  << dimSlices[map.getNumDims() - 1];
+  } else {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1;
+  }
+  printer << ')';
+  // Level format and properties.
+  printer << " -> (";
+  for (unsigned i = 0; i < map.getNumResults() - 1; ++i) {
+map.getResult(i).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[i]) << ", ";
+  }
+  if (map.getNumResults() >= 1) {
+auto lastIndex = map.getNumResults() - 1;
+map.getResult(lastIndex).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[lastIndex]);
+  }
+  printer << ')';
   // Print remaining members only for non-default values.
-  if 

[clang-tools-extra] [mlir][sparse] Print new syntax (PR #68130)

2023-10-04 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/68130

>From 47b34bb327e1078678d3ba0c96ebce3fc89cf2ae Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Tue, 3 Oct 2023 16:43:50 +
Subject: [PATCH 1/4] [mlir][sparse] Print new syntax

Printing changes from #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> 
to map = (d0) -> (d0 : compressed). Level properties, ELL and slice are also 
supported.
---
 .../mlir/Dialect/SparseTensor/IR/Enums.h  |  20 +--
 .../SparseTensor/IR/SparseTensorDialect.cpp   |  64 ---
 mlir/test/Dialect/SparseTensor/codegen.mlir   |   8 +-
 .../SparseTensor/roundtrip_encoding.mlir  |  32 ++--
 .../Dialect/SparseTensor/sparse_reshape.mlir  |   8 +-
 .../SparseTensor/sparse_tensor_reshape.mlir   |   2 +-
 .../python/dialects/sparse_tensor/dialect.py  | 160 +-
 7 files changed, 159 insertions(+), 135 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index bc351ec52c0946b..2920ef79f461c6a 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -215,29 +215,29 @@ constexpr const char *toMLIRString(DimLevelType dlt) {
   case DimLevelType::Compressed:
 return "compressed";
   case DimLevelType::CompressedNu:
-return "compressed_nu";
+return "compressed(nonunique)";
   case DimLevelType::CompressedNo:
-return "compressed_no";
+return "compressed(nonordered)";
   case DimLevelType::CompressedNuNo:
-return "compressed_nu_no";
+return "compressed(nonunique, nonordered)";
   case DimLevelType::Singleton:
 return "singleton";
   case DimLevelType::SingletonNu:
-return "singleton_nu";
+return "singleton(nonunique)";
   case DimLevelType::SingletonNo:
-return "singleton_no";
+return "singleton(nonordered)";
   case DimLevelType::SingletonNuNo:
-return "singleton_nu_no";
+return "singleton(nonunique, nonordered)";
   case DimLevelType::LooseCompressed:
 return "loose_compressed";
   case DimLevelType::LooseCompressedNu:
-return "loose_compressed_nu";
+return "loose_compressed(nonunique)";
   case DimLevelType::LooseCompressedNo:
-return "loose_compressed_no";
+return "loose_compressed(nonordered)";
   case DimLevelType::LooseCompressedNuNo:
-return "loose_compressed_nu_no";
+return "loose_compressed(nonunique, nonordered)";
   case DimLevelType::TwoOutOfFour:
-return "compressed24";
+return "block2_4";
   }
   return "";
 }
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp 
b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 3897e1b9ea3597c..4c8dccdda6c0c7c 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -586,30 +586,56 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser 
, Type type) {
 }
 
 void SparseTensorEncodingAttr::print(AsmPrinter ) const {
-  // Print the struct-like storage in dictionary fashion.
-  printer << "<{ lvlTypes = [ ";
-  llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) {
-printer << "\"" << toMLIRString(dlt) << "\"";
-  });
-  printer << " ]";
+  auto map = static_cast(getDimToLvl());
+  auto lvlTypes = getLvlTypes();
+  // Empty affine map indicates identity map
+  if (!map) {
+map = AffineMap::getMultiDimIdentityMap(getLvlTypes().size(), 
getContext());
+  }
+  // Modified version of AsmPrinter::Impl::printAffineMap.
+  printer << "<{ map = ";
+  // Symbolic identifiers.
+  if (map.getNumSymbols() != 0) {
+printer << '[';
+for (unsigned i = 0; i < map.getNumSymbols() - 1; ++i)
+  printer << 's' << i << ", ";
+if (map.getNumSymbols() >= 1)
+  printer << 's' << map.getNumSymbols() - 1;
+printer << ']';
+  }
+  // Dimension identifiers.
+  printer << '(';
+  auto dimSlices = getDimSlices();
+  if (!dimSlices.empty()) {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << " : " << dimSlices[i] << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1 << " : "
+  << dimSlices[map.getNumDims() - 1];
+  } else {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1;
+  }
+  printer << ')';
+  // Level format and properties.
+  printer << " -> (";
+  for (unsigned i = 0; i < map.getNumResults() - 1; ++i) {
+map.getResult(i).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[i]) << ", ";
+  }
+  if (map.getNumResults() >= 1) {
+auto lastIndex = map.getNumResults() - 1;
+map.getResult(lastIndex).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[lastIndex]);
+  }
+  printer << ')';
   // Print remaining members only for non-default values.
-  if 

[clang] [mlir][sparse] Print new syntax (PR #68130)

2023-10-04 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/68130

>From 47b34bb327e1078678d3ba0c96ebce3fc89cf2ae Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Tue, 3 Oct 2023 16:43:50 +
Subject: [PATCH 1/4] [mlir][sparse] Print new syntax

Printing changes from #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> 
to map = (d0) -> (d0 : compressed). Level properties, ELL and slice are also 
supported.
---
 .../mlir/Dialect/SparseTensor/IR/Enums.h  |  20 +--
 .../SparseTensor/IR/SparseTensorDialect.cpp   |  64 ---
 mlir/test/Dialect/SparseTensor/codegen.mlir   |   8 +-
 .../SparseTensor/roundtrip_encoding.mlir  |  32 ++--
 .../Dialect/SparseTensor/sparse_reshape.mlir  |   8 +-
 .../SparseTensor/sparse_tensor_reshape.mlir   |   2 +-
 .../python/dialects/sparse_tensor/dialect.py  | 160 +-
 7 files changed, 159 insertions(+), 135 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index bc351ec52c0946b..2920ef79f461c6a 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -215,29 +215,29 @@ constexpr const char *toMLIRString(DimLevelType dlt) {
   case DimLevelType::Compressed:
 return "compressed";
   case DimLevelType::CompressedNu:
-return "compressed_nu";
+return "compressed(nonunique)";
   case DimLevelType::CompressedNo:
-return "compressed_no";
+return "compressed(nonordered)";
   case DimLevelType::CompressedNuNo:
-return "compressed_nu_no";
+return "compressed(nonunique, nonordered)";
   case DimLevelType::Singleton:
 return "singleton";
   case DimLevelType::SingletonNu:
-return "singleton_nu";
+return "singleton(nonunique)";
   case DimLevelType::SingletonNo:
-return "singleton_no";
+return "singleton(nonordered)";
   case DimLevelType::SingletonNuNo:
-return "singleton_nu_no";
+return "singleton(nonunique, nonordered)";
   case DimLevelType::LooseCompressed:
 return "loose_compressed";
   case DimLevelType::LooseCompressedNu:
-return "loose_compressed_nu";
+return "loose_compressed(nonunique)";
   case DimLevelType::LooseCompressedNo:
-return "loose_compressed_no";
+return "loose_compressed(nonordered)";
   case DimLevelType::LooseCompressedNuNo:
-return "loose_compressed_nu_no";
+return "loose_compressed(nonunique, nonordered)";
   case DimLevelType::TwoOutOfFour:
-return "compressed24";
+return "block2_4";
   }
   return "";
 }
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp 
b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 3897e1b9ea3597c..4c8dccdda6c0c7c 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -586,30 +586,56 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser 
, Type type) {
 }
 
 void SparseTensorEncodingAttr::print(AsmPrinter ) const {
-  // Print the struct-like storage in dictionary fashion.
-  printer << "<{ lvlTypes = [ ";
-  llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) {
-printer << "\"" << toMLIRString(dlt) << "\"";
-  });
-  printer << " ]";
+  auto map = static_cast(getDimToLvl());
+  auto lvlTypes = getLvlTypes();
+  // Empty affine map indicates identity map
+  if (!map) {
+map = AffineMap::getMultiDimIdentityMap(getLvlTypes().size(), 
getContext());
+  }
+  // Modified version of AsmPrinter::Impl::printAffineMap.
+  printer << "<{ map = ";
+  // Symbolic identifiers.
+  if (map.getNumSymbols() != 0) {
+printer << '[';
+for (unsigned i = 0; i < map.getNumSymbols() - 1; ++i)
+  printer << 's' << i << ", ";
+if (map.getNumSymbols() >= 1)
+  printer << 's' << map.getNumSymbols() - 1;
+printer << ']';
+  }
+  // Dimension identifiers.
+  printer << '(';
+  auto dimSlices = getDimSlices();
+  if (!dimSlices.empty()) {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << " : " << dimSlices[i] << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1 << " : "
+  << dimSlices[map.getNumDims() - 1];
+  } else {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1;
+  }
+  printer << ')';
+  // Level format and properties.
+  printer << " -> (";
+  for (unsigned i = 0; i < map.getNumResults() - 1; ++i) {
+map.getResult(i).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[i]) << ", ";
+  }
+  if (map.getNumResults() >= 1) {
+auto lastIndex = map.getNumResults() - 1;
+map.getResult(lastIndex).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[lastIndex]);
+  }
+  printer << ')';
   // Print remaining members only for non-default values.
-  if 

[clang] [mlir][sparse] Print new syntax (PR #68130)

2023-10-03 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/68130

>From 47b34bb327e1078678d3ba0c96ebce3fc89cf2ae Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Tue, 3 Oct 2023 16:43:50 +
Subject: [PATCH 1/4] [mlir][sparse] Print new syntax

Printing changes from #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> 
to map = (d0) -> (d0 : compressed). Level properties, ELL and slice are also 
supported.
---
 .../mlir/Dialect/SparseTensor/IR/Enums.h  |  20 +--
 .../SparseTensor/IR/SparseTensorDialect.cpp   |  64 ---
 mlir/test/Dialect/SparseTensor/codegen.mlir   |   8 +-
 .../SparseTensor/roundtrip_encoding.mlir  |  32 ++--
 .../Dialect/SparseTensor/sparse_reshape.mlir  |   8 +-
 .../SparseTensor/sparse_tensor_reshape.mlir   |   2 +-
 .../python/dialects/sparse_tensor/dialect.py  | 160 +-
 7 files changed, 159 insertions(+), 135 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index bc351ec52c0946b..2920ef79f461c6a 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -215,29 +215,29 @@ constexpr const char *toMLIRString(DimLevelType dlt) {
   case DimLevelType::Compressed:
 return "compressed";
   case DimLevelType::CompressedNu:
-return "compressed_nu";
+return "compressed(nonunique)";
   case DimLevelType::CompressedNo:
-return "compressed_no";
+return "compressed(nonordered)";
   case DimLevelType::CompressedNuNo:
-return "compressed_nu_no";
+return "compressed(nonunique, nonordered)";
   case DimLevelType::Singleton:
 return "singleton";
   case DimLevelType::SingletonNu:
-return "singleton_nu";
+return "singleton(nonunique)";
   case DimLevelType::SingletonNo:
-return "singleton_no";
+return "singleton(nonordered)";
   case DimLevelType::SingletonNuNo:
-return "singleton_nu_no";
+return "singleton(nonunique, nonordered)";
   case DimLevelType::LooseCompressed:
 return "loose_compressed";
   case DimLevelType::LooseCompressedNu:
-return "loose_compressed_nu";
+return "loose_compressed(nonunique)";
   case DimLevelType::LooseCompressedNo:
-return "loose_compressed_no";
+return "loose_compressed(nonordered)";
   case DimLevelType::LooseCompressedNuNo:
-return "loose_compressed_nu_no";
+return "loose_compressed(nonunique, nonordered)";
   case DimLevelType::TwoOutOfFour:
-return "compressed24";
+return "block2_4";
   }
   return "";
 }
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp 
b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 3897e1b9ea3597c..4c8dccdda6c0c7c 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -586,30 +586,56 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser 
, Type type) {
 }
 
 void SparseTensorEncodingAttr::print(AsmPrinter ) const {
-  // Print the struct-like storage in dictionary fashion.
-  printer << "<{ lvlTypes = [ ";
-  llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) {
-printer << "\"" << toMLIRString(dlt) << "\"";
-  });
-  printer << " ]";
+  auto map = static_cast(getDimToLvl());
+  auto lvlTypes = getLvlTypes();
+  // Empty affine map indicates identity map
+  if (!map) {
+map = AffineMap::getMultiDimIdentityMap(getLvlTypes().size(), 
getContext());
+  }
+  // Modified version of AsmPrinter::Impl::printAffineMap.
+  printer << "<{ map = ";
+  // Symbolic identifiers.
+  if (map.getNumSymbols() != 0) {
+printer << '[';
+for (unsigned i = 0; i < map.getNumSymbols() - 1; ++i)
+  printer << 's' << i << ", ";
+if (map.getNumSymbols() >= 1)
+  printer << 's' << map.getNumSymbols() - 1;
+printer << ']';
+  }
+  // Dimension identifiers.
+  printer << '(';
+  auto dimSlices = getDimSlices();
+  if (!dimSlices.empty()) {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << " : " << dimSlices[i] << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1 << " : "
+  << dimSlices[map.getNumDims() - 1];
+  } else {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1;
+  }
+  printer << ')';
+  // Level format and properties.
+  printer << " -> (";
+  for (unsigned i = 0; i < map.getNumResults() - 1; ++i) {
+map.getResult(i).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[i]) << ", ";
+  }
+  if (map.getNumResults() >= 1) {
+auto lastIndex = map.getNumResults() - 1;
+map.getResult(lastIndex).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[lastIndex]);
+  }
+  printer << ')';
   // Print remaining members only for non-default values.
-  if 

[clang] [mlir][sparse] Print new syntax (PR #68130)

2023-10-03 Thread Yinying Li via cfe-commits


@@ -533,7 +533,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, 
#CSR>,
 //   CHECK: %[[A13:.*]]:4 = scf.for %[[A14:.*]] = %[[A11]] to %[[A7]] 
step %[[A12]] iter_args(%[[A15:.*]] = %[[A0]], %[[A16:.*]] = %[[A1]], 
%[[A17:.*]] = %[[A2]], %[[A18:.*]] = %[[A3]]) -> (memref, 
memref, memref, !sparse_tensor.storage_specifier
 //   CHECK:   %[[A19:.*]] = memref.load %[[A6]]{{\[}}%[[A14]]] : 
memref
 //   CHECK:   %[[A20:.*]] = memref.load %[[A4]]{{\[}}%[[A19]]] : 
memref
-//   CHECK:   %[[A21:.*]]:4 = func.call 
@_insert_dense_compressed_no_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], 
%[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref, memref, 
memref, !sparse_tensor.storage_specifier
+//   CHECK:   %[[A21:.*]]:4 = func.call 
@"_insert_dense_compressed(nonordered)_8_8_f64_0_0"(%[[A15]], %[[A16]], 
%[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref, 
memref, memref, !sparse_tensor.storage_specifier

yinying-lisa-li wrote:

Done!

https://github.com/llvm/llvm-project/pull/68130
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[clang] [mlir][sparse] Print new syntax (PR #68130)

2023-10-03 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/68130

>From 47b34bb327e1078678d3ba0c96ebce3fc89cf2ae Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Tue, 3 Oct 2023 16:43:50 +
Subject: [PATCH 1/3] [mlir][sparse] Print new syntax

Printing changes from #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> 
to map = (d0) -> (d0 : compressed). Level properties, ELL and slice are also 
supported.
---
 .../mlir/Dialect/SparseTensor/IR/Enums.h  |  20 +--
 .../SparseTensor/IR/SparseTensorDialect.cpp   |  64 ---
 mlir/test/Dialect/SparseTensor/codegen.mlir   |   8 +-
 .../SparseTensor/roundtrip_encoding.mlir  |  32 ++--
 .../Dialect/SparseTensor/sparse_reshape.mlir  |   8 +-
 .../SparseTensor/sparse_tensor_reshape.mlir   |   2 +-
 .../python/dialects/sparse_tensor/dialect.py  | 160 +-
 7 files changed, 159 insertions(+), 135 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index bc351ec52c0946b..2920ef79f461c6a 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -215,29 +215,29 @@ constexpr const char *toMLIRString(DimLevelType dlt) {
   case DimLevelType::Compressed:
 return "compressed";
   case DimLevelType::CompressedNu:
-return "compressed_nu";
+return "compressed(nonunique)";
   case DimLevelType::CompressedNo:
-return "compressed_no";
+return "compressed(nonordered)";
   case DimLevelType::CompressedNuNo:
-return "compressed_nu_no";
+return "compressed(nonunique, nonordered)";
   case DimLevelType::Singleton:
 return "singleton";
   case DimLevelType::SingletonNu:
-return "singleton_nu";
+return "singleton(nonunique)";
   case DimLevelType::SingletonNo:
-return "singleton_no";
+return "singleton(nonordered)";
   case DimLevelType::SingletonNuNo:
-return "singleton_nu_no";
+return "singleton(nonunique, nonordered)";
   case DimLevelType::LooseCompressed:
 return "loose_compressed";
   case DimLevelType::LooseCompressedNu:
-return "loose_compressed_nu";
+return "loose_compressed(nonunique)";
   case DimLevelType::LooseCompressedNo:
-return "loose_compressed_no";
+return "loose_compressed(nonordered)";
   case DimLevelType::LooseCompressedNuNo:
-return "loose_compressed_nu_no";
+return "loose_compressed(nonunique, nonordered)";
   case DimLevelType::TwoOutOfFour:
-return "compressed24";
+return "block2_4";
   }
   return "";
 }
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp 
b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 3897e1b9ea3597c..4c8dccdda6c0c7c 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -586,30 +586,56 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser 
, Type type) {
 }
 
 void SparseTensorEncodingAttr::print(AsmPrinter ) const {
-  // Print the struct-like storage in dictionary fashion.
-  printer << "<{ lvlTypes = [ ";
-  llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) {
-printer << "\"" << toMLIRString(dlt) << "\"";
-  });
-  printer << " ]";
+  auto map = static_cast(getDimToLvl());
+  auto lvlTypes = getLvlTypes();
+  // Empty affine map indicates identity map
+  if (!map) {
+map = AffineMap::getMultiDimIdentityMap(getLvlTypes().size(), 
getContext());
+  }
+  // Modified version of AsmPrinter::Impl::printAffineMap.
+  printer << "<{ map = ";
+  // Symbolic identifiers.
+  if (map.getNumSymbols() != 0) {
+printer << '[';
+for (unsigned i = 0; i < map.getNumSymbols() - 1; ++i)
+  printer << 's' << i << ", ";
+if (map.getNumSymbols() >= 1)
+  printer << 's' << map.getNumSymbols() - 1;
+printer << ']';
+  }
+  // Dimension identifiers.
+  printer << '(';
+  auto dimSlices = getDimSlices();
+  if (!dimSlices.empty()) {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << " : " << dimSlices[i] << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1 << " : "
+  << dimSlices[map.getNumDims() - 1];
+  } else {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1;
+  }
+  printer << ')';
+  // Level format and properties.
+  printer << " -> (";
+  for (unsigned i = 0; i < map.getNumResults() - 1; ++i) {
+map.getResult(i).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[i]) << ", ";
+  }
+  if (map.getNumResults() >= 1) {
+auto lastIndex = map.getNumResults() - 1;
+map.getResult(lastIndex).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[lastIndex]);
+  }
+  printer << ')';
   // Print remaining members only for non-default values.
-  if 

[clang] [mlir][sparse] Print new syntax (PR #68130)

2023-10-03 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li edited 
https://github.com/llvm/llvm-project/pull/68130
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[clang] [mlir][sparse] Print new syntax (PR #68130)

2023-10-03 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/68130

>From 47b34bb327e1078678d3ba0c96ebce3fc89cf2ae Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Tue, 3 Oct 2023 16:43:50 +
Subject: [PATCH 1/2] [mlir][sparse] Print new syntax

Printing changes from #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> 
to map = (d0) -> (d0 : compressed). Level properties, ELL and slice are also 
supported.
---
 .../mlir/Dialect/SparseTensor/IR/Enums.h  |  20 +--
 .../SparseTensor/IR/SparseTensorDialect.cpp   |  64 ---
 mlir/test/Dialect/SparseTensor/codegen.mlir   |   8 +-
 .../SparseTensor/roundtrip_encoding.mlir  |  32 ++--
 .../Dialect/SparseTensor/sparse_reshape.mlir  |   8 +-
 .../SparseTensor/sparse_tensor_reshape.mlir   |   2 +-
 .../python/dialects/sparse_tensor/dialect.py  | 160 +-
 7 files changed, 159 insertions(+), 135 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index bc351ec52c0946b..2920ef79f461c6a 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -215,29 +215,29 @@ constexpr const char *toMLIRString(DimLevelType dlt) {
   case DimLevelType::Compressed:
 return "compressed";
   case DimLevelType::CompressedNu:
-return "compressed_nu";
+return "compressed(nonunique)";
   case DimLevelType::CompressedNo:
-return "compressed_no";
+return "compressed(nonordered)";
   case DimLevelType::CompressedNuNo:
-return "compressed_nu_no";
+return "compressed(nonunique, nonordered)";
   case DimLevelType::Singleton:
 return "singleton";
   case DimLevelType::SingletonNu:
-return "singleton_nu";
+return "singleton(nonunique)";
   case DimLevelType::SingletonNo:
-return "singleton_no";
+return "singleton(nonordered)";
   case DimLevelType::SingletonNuNo:
-return "singleton_nu_no";
+return "singleton(nonunique, nonordered)";
   case DimLevelType::LooseCompressed:
 return "loose_compressed";
   case DimLevelType::LooseCompressedNu:
-return "loose_compressed_nu";
+return "loose_compressed(nonunique)";
   case DimLevelType::LooseCompressedNo:
-return "loose_compressed_no";
+return "loose_compressed(nonordered)";
   case DimLevelType::LooseCompressedNuNo:
-return "loose_compressed_nu_no";
+return "loose_compressed(nonunique, nonordered)";
   case DimLevelType::TwoOutOfFour:
-return "compressed24";
+return "block2_4";
   }
   return "";
 }
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp 
b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 3897e1b9ea3597c..4c8dccdda6c0c7c 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -586,30 +586,56 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser 
, Type type) {
 }
 
 void SparseTensorEncodingAttr::print(AsmPrinter ) const {
-  // Print the struct-like storage in dictionary fashion.
-  printer << "<{ lvlTypes = [ ";
-  llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) {
-printer << "\"" << toMLIRString(dlt) << "\"";
-  });
-  printer << " ]";
+  auto map = static_cast(getDimToLvl());
+  auto lvlTypes = getLvlTypes();
+  // Empty affine map indicates identity map
+  if (!map) {
+map = AffineMap::getMultiDimIdentityMap(getLvlTypes().size(), 
getContext());
+  }
+  // Modified version of AsmPrinter::Impl::printAffineMap.
+  printer << "<{ map = ";
+  // Symbolic identifiers.
+  if (map.getNumSymbols() != 0) {
+printer << '[';
+for (unsigned i = 0; i < map.getNumSymbols() - 1; ++i)
+  printer << 's' << i << ", ";
+if (map.getNumSymbols() >= 1)
+  printer << 's' << map.getNumSymbols() - 1;
+printer << ']';
+  }
+  // Dimension identifiers.
+  printer << '(';
+  auto dimSlices = getDimSlices();
+  if (!dimSlices.empty()) {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << " : " << dimSlices[i] << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1 << " : "
+  << dimSlices[map.getNumDims() - 1];
+  } else {
+for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
+  printer << 'd' << i << ", ";
+if (map.getNumDims() >= 1)
+  printer << 'd' << map.getNumDims() - 1;
+  }
+  printer << ')';
+  // Level format and properties.
+  printer << " -> (";
+  for (unsigned i = 0; i < map.getNumResults() - 1; ++i) {
+map.getResult(i).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[i]) << ", ";
+  }
+  if (map.getNumResults() >= 1) {
+auto lastIndex = map.getNumResults() - 1;
+map.getResult(lastIndex).print(printer.getStream());
+printer << " : " << toMLIRString(lvlTypes[lastIndex]);
+  }
+  printer << ')';
   // Print remaining members only for non-default values.
-  if 

[clang-tools-extra] [mlir][sparse] Update Enum name for CompressedWithHigh (PR #67845)

2023-10-02 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li closed 
https://github.com/llvm/llvm-project/pull/67845
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[clang] [mlir][sparse] Update Enum name for CompressedWithHigh (PR #67845)

2023-09-29 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/67845

>From ab46cf0f76aeaa78f2eb8865d647400f810e35ec Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Fri, 29 Sep 2023 19:09:21 +
Subject: [PATCH] [mlir][sparse] Update Enum name for CompressedWithHigh

Change CompressedWithHigh to LooseCompressed.
---
 mlir/include/mlir-c/Dialect/SparseTensor.h|  28 ++---
 .../mlir/Dialect/SparseTensor/IR/Enums.h  | 106 +-
 .../Dialect/SparseTensor/IR/SparseTensor.h|   4 +-
 .../SparseTensor/IR/SparseTensorAttrDefs.td   |   2 +-
 .../Bindings/Python/DialectSparseTensor.cpp   |  14 +--
 .../SparseTensor/IR/Detail/LvlTypeParser.cpp  |   2 +-
 .../SparseTensor/IR/SparseTensorDialect.cpp   |  10 +-
 .../SparseTensor/Transforms/LoopEmitter.cpp   |  16 +--
 .../Transforms/SparseTensorCodegen.cpp|   8 +-
 .../Transforms/Sparsification.cpp |   6 +-
 .../lib/Dialect/SparseTensor/Utils/Merger.cpp |   4 +-
 .../SparseTensor/roundtrip_encoding.mlir  |   2 +-
 .../SparseTensor/CPU/sparse_pack_libgen.mlir  |   2 +-
 13 files changed, 102 insertions(+), 102 deletions(-)

diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h 
b/mlir/include/mlir-c/Dialect/SparseTensor.h
index fecbeaf6b0f9d6c..7e47e54e7361d54 100644
--- a/mlir/include/mlir-c/Dialect/SparseTensor.h
+++ b/mlir/include/mlir-c/Dialect/SparseTensor.h
@@ -26,20 +26,20 @@ MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(SparseTensor, 
sparse_tensor);
 /// If updating, keep them in sync and update the static_assert in the impl
 /// file.
 enum MlirSparseTensorDimLevelType {
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE = 4, // 0b1_00
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED = 8,// 0b00010_00
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU = 9, // 0b00010_01
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO = 10,// 0b00010_10
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO = 11, // 0b00010_11
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON = 16,// 0b00100_00
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU = 17, // 0b00100_01
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO = 18, // 0b00100_10
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO = 19,  // 0b00100_11
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI = 32,   // 0b01000_00
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NU = 33,// 0b01000_01
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NO = 34,// 0b01000_10
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NU_NO = 35, // 0b01000_11
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_TWO_OUT_OF_FOUR = 64,  // 0b1_00
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE = 4,   // 0b1_00
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED = 8,  // 0b00010_00
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU = 9,   // 0b00010_01
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO = 10,  // 0b00010_10
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO = 11,   // 0b00010_11
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON = 16,  // 0b00100_00
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU = 17,   // 0b00100_01
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO = 18,   // 0b00100_10
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO = 19,// 0b00100_11
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED = 32,   // 0b01000_00
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NU = 33,// 0b01000_01
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NO = 34,// 0b01000_10
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NU_NO = 35, // 0b01000_11
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_TWO_OUT_OF_FOUR = 64,// 0b1_00
 };
 
 
//===--===//
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index 7a1aed509c2a360..bc351ec52c0946b 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -170,33 +170,33 @@ enum class Action : uint32_t {
 // TODO: We should generalize TwoOutOfFour to N out of M and use property to
 // encode the value of N and M.
 // TODO: Update DimLevelType to use lower 8 bits for storage formats and the
-// higher 4 bits to store level properties. Consider CompressedWithHi and
+// higher 4 bits to store level properties. Consider LooseCompressed and
 // TwoOutOfFour as properties instead of formats.
 enum class DimLevelType : uint8_t {
-  Undef = 0, // 0b0_00
-  Dense = 4, // 0b1_00
-  Compressed = 8,// 0b00010_00
-  CompressedNu = 9,  // 0b00010_01
-  CompressedNo = 10, // 0b00010_10
-  CompressedNuNo = 11,   // 0b00010_11
-  Singleton = 16,// 0b00100_00
-  SingletonNu = 17,  // 0b00100_01
-  SingletonNo = 18,  // 0b00100_10
-  SingletonNuNo = 

[clang-tools-extra] [mlir][sparse] Update Enum name for CompressedWithHigh (PR #67845)

2023-09-29 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/67845

>From ab46cf0f76aeaa78f2eb8865d647400f810e35ec Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Fri, 29 Sep 2023 19:09:21 +
Subject: [PATCH] [mlir][sparse] Update Enum name for CompressedWithHigh

Change CompressedWithHigh to LooseCompressed.
---
 mlir/include/mlir-c/Dialect/SparseTensor.h|  28 ++---
 .../mlir/Dialect/SparseTensor/IR/Enums.h  | 106 +-
 .../Dialect/SparseTensor/IR/SparseTensor.h|   4 +-
 .../SparseTensor/IR/SparseTensorAttrDefs.td   |   2 +-
 .../Bindings/Python/DialectSparseTensor.cpp   |  14 +--
 .../SparseTensor/IR/Detail/LvlTypeParser.cpp  |   2 +-
 .../SparseTensor/IR/SparseTensorDialect.cpp   |  10 +-
 .../SparseTensor/Transforms/LoopEmitter.cpp   |  16 +--
 .../Transforms/SparseTensorCodegen.cpp|   8 +-
 .../Transforms/Sparsification.cpp |   6 +-
 .../lib/Dialect/SparseTensor/Utils/Merger.cpp |   4 +-
 .../SparseTensor/roundtrip_encoding.mlir  |   2 +-
 .../SparseTensor/CPU/sparse_pack_libgen.mlir  |   2 +-
 13 files changed, 102 insertions(+), 102 deletions(-)

diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h 
b/mlir/include/mlir-c/Dialect/SparseTensor.h
index fecbeaf6b0f9d6c..7e47e54e7361d54 100644
--- a/mlir/include/mlir-c/Dialect/SparseTensor.h
+++ b/mlir/include/mlir-c/Dialect/SparseTensor.h
@@ -26,20 +26,20 @@ MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(SparseTensor, 
sparse_tensor);
 /// If updating, keep them in sync and update the static_assert in the impl
 /// file.
 enum MlirSparseTensorDimLevelType {
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE = 4, // 0b1_00
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED = 8,// 0b00010_00
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU = 9, // 0b00010_01
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO = 10,// 0b00010_10
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO = 11, // 0b00010_11
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON = 16,// 0b00100_00
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU = 17, // 0b00100_01
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO = 18, // 0b00100_10
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO = 19,  // 0b00100_11
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI = 32,   // 0b01000_00
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NU = 33,// 0b01000_01
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NO = 34,// 0b01000_10
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NU_NO = 35, // 0b01000_11
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_TWO_OUT_OF_FOUR = 64,  // 0b1_00
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE = 4,   // 0b1_00
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED = 8,  // 0b00010_00
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU = 9,   // 0b00010_01
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO = 10,  // 0b00010_10
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO = 11,   // 0b00010_11
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON = 16,  // 0b00100_00
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU = 17,   // 0b00100_01
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO = 18,   // 0b00100_10
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO = 19,// 0b00100_11
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED = 32,   // 0b01000_00
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NU = 33,// 0b01000_01
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NO = 34,// 0b01000_10
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NU_NO = 35, // 0b01000_11
+  MLIR_SPARSE_TENSOR_DIM_LEVEL_TWO_OUT_OF_FOUR = 64,// 0b1_00
 };
 
 
//===--===//
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h 
b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index 7a1aed509c2a360..bc351ec52c0946b 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -170,33 +170,33 @@ enum class Action : uint32_t {
 // TODO: We should generalize TwoOutOfFour to N out of M and use property to
 // encode the value of N and M.
 // TODO: Update DimLevelType to use lower 8 bits for storage formats and the
-// higher 4 bits to store level properties. Consider CompressedWithHi and
+// higher 4 bits to store level properties. Consider LooseCompressed and
 // TwoOutOfFour as properties instead of formats.
 enum class DimLevelType : uint8_t {
-  Undef = 0, // 0b0_00
-  Dense = 4, // 0b1_00
-  Compressed = 8,// 0b00010_00
-  CompressedNu = 9,  // 0b00010_01
-  CompressedNo = 10, // 0b00010_10
-  CompressedNuNo = 11,   // 0b00010_11
-  Singleton = 16,// 0b00100_00
-  SingletonNu = 17,  // 0b00100_01
-  SingletonNo = 18,  // 0b00100_10
-  SingletonNuNo = 

[clang] [mlir][sparse] Change tests to use new syntax for ELL and slice (PR #67569)

2023-09-27 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li closed 
https://github.com/llvm/llvm-project/pull/67569
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[clang-tools-extra] [mlir][sparse] Change tests to use new syntax for ELL and slice (PR #67569)

2023-09-27 Thread Yinying Li via cfe-commits


@@ -240,8 +240,9 @@ def SparseTensorEncodingAttr : 
SparseTensor_Attr<"SparseTensorEncoding",
 // CSR slice (offset = 0, size = 4, stride = 1 on the first dimension;
 // offset = 0, size = 8, and a dynamic stride on the second dimension).
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (0, 4, 1), (0, 8, ?) ]
+  map = (d0 : #sparse_tensor,

yinying-lisa-li wrote:

Yeah, it's a bit hard to comprehend. But for now, let's keep it this way and we 
could optimize it later once the migrate is done.

https://github.com/llvm/llvm-project/pull/67569
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[clang] [mlir][sparse] Change tests to use new syntax for ELL and slice (PR #67569)

2023-09-27 Thread Yinying Li via cfe-commits


@@ -240,8 +240,9 @@ def SparseTensorEncodingAttr : 
SparseTensor_Attr<"SparseTensorEncoding",
 // CSR slice (offset = 0, size = 4, stride = 1 on the first dimension;
 // offset = 0, size = 8, and a dynamic stride on the second dimension).
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (0, 4, 1), (0, 8, ?) ]
+  map = (d0 : #sparse_tensor,

yinying-lisa-li wrote:

Yeah, it's a bit hard to comprehend. But for now, let's keep it this way and we 
could optimize it later once the migrate is done.

https://github.com/llvm/llvm-project/pull/67569
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[clang-tools-extra] [mlir][sparse] Change tests to use new syntax for ELL and slice (PR #67569)

2023-09-27 Thread Yinying Li via cfe-commits


@@ -240,8 +240,9 @@ def SparseTensorEncodingAttr : 
SparseTensor_Attr<"SparseTensorEncoding",
 // CSR slice (offset = 0, size = 4, stride = 1 on the first dimension;

yinying-lisa-li wrote:

Done!

https://github.com/llvm/llvm-project/pull/67569
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[clang-tools-extra] [mlir][sparse] Change tests to use new syntax for ELL and slice (PR #67569)

2023-09-27 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/67569

>From 0f1db4000d971192e0b2b027746cdd37083bf87f Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Wed, 27 Sep 2023 14:51:34 +
Subject: [PATCH 1/2] [mlir][sparse] Change tests to use new syntax for ELL and
 slice

Examples:

#ELL = #sparse_tensor.encoding<{
  lvlTypes = [ "dense", "dense", "compressed" ],
  dimToLvl = affine_map<(i,j)[c] -> (c*4*i, i, j)>
}>
to
#ELL = #sparse_tensor.encoding<{
  map = [s0](d0, d1) -> (d0 * (s0 * 4) : dense, d0 : dense, d1 : compressed)
}>

#CSR_SLICE = #sparse_tensor.encoding<{
  lvlTypes = [ "dense", "compressed" ],
  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
to
#CSR_SLICE = #sparse_tensor.encoding<{
  map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed)
}>
---
 .../SparseTensor/IR/SparseTensorAttrDefs.td   |  5 ++--
 mlir/test/CAPI/sparse_tensor.c|  3 +--
 .../SparseTensor/convert_sparse2sparse.mlir   |  3 +--
 mlir/test/Dialect/SparseTensor/invalid.mlir   |  9 +++
 .../SparseTensor/invalid_encoding.mlir|  3 +--
 .../Dialect/SparseTensor/pre_rewriting.mlir   |  3 +--
 mlir/test/Dialect/SparseTensor/roundtrip.mlir | 15 
 .../SparseTensor/roundtrip_encoding.mlir  | 20 +++-
 .../SparseTensor/sparse_extract_slice.mlir|  3 +--
 .../Dialect/SparseTensor/sparse_foreach.mlir  |  6 ++---
 .../CPU/sparse_foreach_slices.mlir| 12 --
 .../SparseTensor/CPU/sparse_matmul_slice.mlir | 24 +++
 12 files changed, 33 insertions(+), 73 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td 
b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index e6577aed063ca7f..58e09f0d5e1803b 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -228,8 +228,9 @@ def SparseTensorEncodingAttr : 
SparseTensor_Attr<"SparseTensorEncoding",
 // CSR slice (offset = 0, size = 4, stride = 1 on the first dimension;
 // offset = 0, size = 8, and a dynamic stride on the second dimension).
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (0, 4, 1), (0, 8, ?) ]
+  map = (d0 : #sparse_tensor,
+ d1 : #sparse_tensor) ->
+(d0 : dense, d1 : compressed)
 }>
 ... tensor ...
 
diff --git a/mlir/test/CAPI/sparse_tensor.c b/mlir/test/CAPI/sparse_tensor.c
index 30ef1557e73302f..33ee8e784096a18 100644
--- a/mlir/test/CAPI/sparse_tensor.c
+++ b/mlir/test/CAPI/sparse_tensor.c
@@ -25,8 +25,7 @@ static int testRoundtripEncoding(MlirContext ctx) {
   // clang-format off
   const char *originalAsm =
 "#sparse_tensor.encoding<{ "
-"lvlTypes = [ \"dense\", \"compressed\", \"compressed\"], "
-"dimToLvl = affine_map<(d0, d1)[s0] -> (s0, d0, d1)>, "
+"map = [s0](d0, d1) -> (s0 : dense, d0 : compressed, d1 : compressed), "
 "posWidth = 32, crdWidth = 64 }>";
   // clang-format on
   MlirAttribute originalAttr =
diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir 
b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
index 54cdfc690952d9a..2a2619daf493654 100644
--- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
@@ -39,8 +39,7 @@
 }>
 
 #COOSlice = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed_nu", "singleton" ],
-  dimSlices = [ (2, 2, 1), (12, 13, 1) ]
+  map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : compressed(nonunique), d1 : singleton)
 }>
 
 // CHECK-LABEL: func @sparse_nop_convert(
diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir 
b/mlir/test/Dialect/SparseTensor/invalid.mlir
index c0e813dcde7c57e..2a13f208fa225d3 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -201,8 +201,7 @@ func.func @mismatch_values_types(%arg0: tensor) -> memref<
 // -
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed)
 }>
 
 func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -214,8 +213,7 @@ func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, 
#CSR_SLICE>) -> index {
 // -
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed)
 }>
 
 func.func @sparse_slice_stride(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -400,8 +398,7 @@ func.func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: 
!llvm.ptr) {
 // -
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : 

[clang-tools-extra] [mlir][sparse] Change tests to use new syntax for ELL and slice (PR #67569)

2023-09-27 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/67569

>From 0f1db4000d971192e0b2b027746cdd37083bf87f Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Wed, 27 Sep 2023 14:51:34 +
Subject: [PATCH] [mlir][sparse] Change tests to use new syntax for ELL and
 slice

Examples:

#ELL = #sparse_tensor.encoding<{
  lvlTypes = [ "dense", "dense", "compressed" ],
  dimToLvl = affine_map<(i,j)[c] -> (c*4*i, i, j)>
}>
to
#ELL = #sparse_tensor.encoding<{
  map = [s0](d0, d1) -> (d0 * (s0 * 4) : dense, d0 : dense, d1 : compressed)
}>

#CSR_SLICE = #sparse_tensor.encoding<{
  lvlTypes = [ "dense", "compressed" ],
  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
to
#CSR_SLICE = #sparse_tensor.encoding<{
  map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed)
}>
---
 .../SparseTensor/IR/SparseTensorAttrDefs.td   |  5 ++--
 mlir/test/CAPI/sparse_tensor.c|  3 +--
 .../SparseTensor/convert_sparse2sparse.mlir   |  3 +--
 mlir/test/Dialect/SparseTensor/invalid.mlir   |  9 +++
 .../SparseTensor/invalid_encoding.mlir|  3 +--
 .../Dialect/SparseTensor/pre_rewriting.mlir   |  3 +--
 mlir/test/Dialect/SparseTensor/roundtrip.mlir | 15 
 .../SparseTensor/roundtrip_encoding.mlir  | 20 +++-
 .../SparseTensor/sparse_extract_slice.mlir|  3 +--
 .../Dialect/SparseTensor/sparse_foreach.mlir  |  6 ++---
 .../CPU/sparse_foreach_slices.mlir| 12 --
 .../SparseTensor/CPU/sparse_matmul_slice.mlir | 24 +++
 12 files changed, 33 insertions(+), 73 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td 
b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index e6577aed063ca7f..58e09f0d5e1803b 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -228,8 +228,9 @@ def SparseTensorEncodingAttr : 
SparseTensor_Attr<"SparseTensorEncoding",
 // CSR slice (offset = 0, size = 4, stride = 1 on the first dimension;
 // offset = 0, size = 8, and a dynamic stride on the second dimension).
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (0, 4, 1), (0, 8, ?) ]
+  map = (d0 : #sparse_tensor,
+ d1 : #sparse_tensor) ->
+(d0 : dense, d1 : compressed)
 }>
 ... tensor ...
 
diff --git a/mlir/test/CAPI/sparse_tensor.c b/mlir/test/CAPI/sparse_tensor.c
index 30ef1557e73302f..33ee8e784096a18 100644
--- a/mlir/test/CAPI/sparse_tensor.c
+++ b/mlir/test/CAPI/sparse_tensor.c
@@ -25,8 +25,7 @@ static int testRoundtripEncoding(MlirContext ctx) {
   // clang-format off
   const char *originalAsm =
 "#sparse_tensor.encoding<{ "
-"lvlTypes = [ \"dense\", \"compressed\", \"compressed\"], "
-"dimToLvl = affine_map<(d0, d1)[s0] -> (s0, d0, d1)>, "
+"map = [s0](d0, d1) -> (s0 : dense, d0 : compressed, d1 : compressed), "
 "posWidth = 32, crdWidth = 64 }>";
   // clang-format on
   MlirAttribute originalAttr =
diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir 
b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
index 54cdfc690952d9a..2a2619daf493654 100644
--- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
@@ -39,8 +39,7 @@
 }>
 
 #COOSlice = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed_nu", "singleton" ],
-  dimSlices = [ (2, 2, 1), (12, 13, 1) ]
+  map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : compressed(nonunique), d1 : singleton)
 }>
 
 // CHECK-LABEL: func @sparse_nop_convert(
diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir 
b/mlir/test/Dialect/SparseTensor/invalid.mlir
index c0e813dcde7c57e..2a13f208fa225d3 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -201,8 +201,7 @@ func.func @mismatch_values_types(%arg0: tensor) -> memref<
 // -
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed)
 }>
 
 func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -214,8 +213,7 @@ func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, 
#CSR_SLICE>) -> index {
 // -
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed)
 }>
 
 func.func @sparse_slice_stride(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -400,8 +398,7 @@ func.func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: 
!llvm.ptr) {
 // -
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : 

[clang] [mlir][sparse] Change tests to use new syntax for ELL and slice (PR #67569)

2023-09-27 Thread Yinying Li via cfe-commits

https://github.com/yinying-lisa-li updated 
https://github.com/llvm/llvm-project/pull/67569

>From 0f1db4000d971192e0b2b027746cdd37083bf87f Mon Sep 17 00:00:00 2001
From: Yinying Li 
Date: Wed, 27 Sep 2023 14:51:34 +
Subject: [PATCH] [mlir][sparse] Change tests to use new syntax for ELL and
 slice

Examples:

#ELL = #sparse_tensor.encoding<{
  lvlTypes = [ "dense", "dense", "compressed" ],
  dimToLvl = affine_map<(i,j)[c] -> (c*4*i, i, j)>
}>
to
#ELL = #sparse_tensor.encoding<{
  map = [s0](d0, d1) -> (d0 * (s0 * 4) : dense, d0 : dense, d1 : compressed)
}>

#CSR_SLICE = #sparse_tensor.encoding<{
  lvlTypes = [ "dense", "compressed" ],
  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
to
#CSR_SLICE = #sparse_tensor.encoding<{
  map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed)
}>
---
 .../SparseTensor/IR/SparseTensorAttrDefs.td   |  5 ++--
 mlir/test/CAPI/sparse_tensor.c|  3 +--
 .../SparseTensor/convert_sparse2sparse.mlir   |  3 +--
 mlir/test/Dialect/SparseTensor/invalid.mlir   |  9 +++
 .../SparseTensor/invalid_encoding.mlir|  3 +--
 .../Dialect/SparseTensor/pre_rewriting.mlir   |  3 +--
 mlir/test/Dialect/SparseTensor/roundtrip.mlir | 15 
 .../SparseTensor/roundtrip_encoding.mlir  | 20 +++-
 .../SparseTensor/sparse_extract_slice.mlir|  3 +--
 .../Dialect/SparseTensor/sparse_foreach.mlir  |  6 ++---
 .../CPU/sparse_foreach_slices.mlir| 12 --
 .../SparseTensor/CPU/sparse_matmul_slice.mlir | 24 +++
 12 files changed, 33 insertions(+), 73 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td 
b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index e6577aed063ca7f..58e09f0d5e1803b 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -228,8 +228,9 @@ def SparseTensorEncodingAttr : 
SparseTensor_Attr<"SparseTensorEncoding",
 // CSR slice (offset = 0, size = 4, stride = 1 on the first dimension;
 // offset = 0, size = 8, and a dynamic stride on the second dimension).
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (0, 4, 1), (0, 8, ?) ]
+  map = (d0 : #sparse_tensor,
+ d1 : #sparse_tensor) ->
+(d0 : dense, d1 : compressed)
 }>
 ... tensor ...
 
diff --git a/mlir/test/CAPI/sparse_tensor.c b/mlir/test/CAPI/sparse_tensor.c
index 30ef1557e73302f..33ee8e784096a18 100644
--- a/mlir/test/CAPI/sparse_tensor.c
+++ b/mlir/test/CAPI/sparse_tensor.c
@@ -25,8 +25,7 @@ static int testRoundtripEncoding(MlirContext ctx) {
   // clang-format off
   const char *originalAsm =
 "#sparse_tensor.encoding<{ "
-"lvlTypes = [ \"dense\", \"compressed\", \"compressed\"], "
-"dimToLvl = affine_map<(d0, d1)[s0] -> (s0, d0, d1)>, "
+"map = [s0](d0, d1) -> (s0 : dense, d0 : compressed, d1 : compressed), "
 "posWidth = 32, crdWidth = 64 }>";
   // clang-format on
   MlirAttribute originalAttr =
diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir 
b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
index 54cdfc690952d9a..2a2619daf493654 100644
--- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
@@ -39,8 +39,7 @@
 }>
 
 #COOSlice = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed_nu", "singleton" ],
-  dimSlices = [ (2, 2, 1), (12, 13, 1) ]
+  map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : compressed(nonunique), d1 : singleton)
 }>
 
 // CHECK-LABEL: func @sparse_nop_convert(
diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir 
b/mlir/test/Dialect/SparseTensor/invalid.mlir
index c0e813dcde7c57e..2a13f208fa225d3 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -201,8 +201,7 @@ func.func @mismatch_values_types(%arg0: tensor) -> memref<
 // -
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed)
 }>
 
 func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -214,8 +213,7 @@ func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, 
#CSR_SLICE>) -> index {
 // -
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed)
 }>
 
 func.func @sparse_slice_stride(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -400,8 +398,7 @@ func.func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: 
!llvm.ptr) {
 // -
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 :