https://github.com/yinying-lisa-li updated
https://github.com/llvm/llvm-project/pull/67569
>From 0f1db4000d971192e0b2b027746cdd37083bf87f Mon Sep 17 00:00:00 2001
From: Yinying Li
Date: Wed, 27 Sep 2023 14:51:34 +
Subject: [PATCH] [mlir][sparse] Change tests to use new syntax for ELL and
slice
Examples:
#ELL = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "dense", "compressed" ],
dimToLvl = affine_map<(i,j)[c] -> (c*4*i, i, j)>
}>
to
#ELL = #sparse_tensor.encoding<{
map = [s0](d0, d1) -> (d0 * (s0 * 4) : dense, d0 : dense, d1 : compressed)
}>
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
to
#CSR_SLICE = #sparse_tensor.encoding<{
map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed)
}>
---
.../SparseTensor/IR/SparseTensorAttrDefs.td | 5 ++--
mlir/test/CAPI/sparse_tensor.c| 3 +--
.../SparseTensor/convert_sparse2sparse.mlir | 3 +--
mlir/test/Dialect/SparseTensor/invalid.mlir | 9 +++
.../SparseTensor/invalid_encoding.mlir| 3 +--
.../Dialect/SparseTensor/pre_rewriting.mlir | 3 +--
mlir/test/Dialect/SparseTensor/roundtrip.mlir | 15
.../SparseTensor/roundtrip_encoding.mlir | 20 +++-
.../SparseTensor/sparse_extract_slice.mlir| 3 +--
.../Dialect/SparseTensor/sparse_foreach.mlir | 6 ++---
.../CPU/sparse_foreach_slices.mlir| 12 --
.../SparseTensor/CPU/sparse_matmul_slice.mlir | 24 +++
12 files changed, 33 insertions(+), 73 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index e6577aed063ca7f..58e09f0d5e1803b 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -228,8 +228,9 @@ def SparseTensorEncodingAttr :
SparseTensor_Attr<"SparseTensorEncoding",
// CSR slice (offset = 0, size = 4, stride = 1 on the first dimension;
// offset = 0, size = 8, and a dynamic stride on the second dimension).
#CSR_SLICE = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed" ],
- dimSlices = [ (0, 4, 1), (0, 8, ?) ]
+ map = (d0 : #sparse_tensor,
+ d1 : #sparse_tensor) ->
+(d0 : dense, d1 : compressed)
}>
... tensor ...
diff --git a/mlir/test/CAPI/sparse_tensor.c b/mlir/test/CAPI/sparse_tensor.c
index 30ef1557e73302f..33ee8e784096a18 100644
--- a/mlir/test/CAPI/sparse_tensor.c
+++ b/mlir/test/CAPI/sparse_tensor.c
@@ -25,8 +25,7 @@ static int testRoundtripEncoding(MlirContext ctx) {
// clang-format off
const char *originalAsm =
"#sparse_tensor.encoding<{ "
-"lvlTypes = [ \"dense\", \"compressed\", \"compressed\"], "
-"dimToLvl = affine_map<(d0, d1)[s0] -> (s0, d0, d1)>, "
+"map = [s0](d0, d1) -> (s0 : dense, d0 : compressed, d1 : compressed), "
"posWidth = 32, crdWidth = 64 }>";
// clang-format on
MlirAttribute originalAttr =
diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
index 54cdfc690952d9a..2a2619daf493654 100644
--- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
@@ -39,8 +39,7 @@
}>
#COOSlice = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed_nu", "singleton" ],
- dimSlices = [ (2, 2, 1), (12, 13, 1) ]
+ map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : compressed(nonunique), d1 : singleton)
}>
// CHECK-LABEL: func @sparse_nop_convert(
diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir
b/mlir/test/Dialect/SparseTensor/invalid.mlir
index c0e813dcde7c57e..2a13f208fa225d3 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -201,8 +201,7 @@ func.func @mismatch_values_types(%arg0: tensor) -> memref<
// -
#CSR_SLICE = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed" ],
- dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+ map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed)
}>
func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -214,8 +213,7 @@ func.func @sparse_slice_offset(%arg0: tensor<2x8xf64,
#CSR_SLICE>) -> index {
// -
#CSR_SLICE = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed" ],
- dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+ map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed)
}>
func.func @sparse_slice_stride(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -400,8 +398,7 @@ func.func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1:
!llvm.ptr) {
// -
#CSR = #sparse_tensor.encoding<{
- lvlTypes = ["dense", "compressed"],
- dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+ map = (d0 :