jinhongyii commented on a change in pull request #8467:
URL: https://github.com/apache/tvm/pull/8467#discussion_r670127252



##########
File path: python/tvm/tir/schedule/schedule.py
##########
@@ -257,6 +257,133 @@ def get_loops(self, block: BlockRV) -> List[LoopRV]:
         return _ffi_api_schedule.ScheduleGetLoops(self, block)  # type: ignore 
# pylint: disable=no-member
 
     ########## Schedule: loops manipulation ##########
+    def fuse(self, *loops: List[LoopRV]) -> LoopRV:
+        """Fuse a list of consecutive loops into one. It requires:
+        1) The loops can't have annotations or thread bindings.
+        2) The (i+1)-th loop must be the only child of the i-th loop.
+        3) All loops must start with 0.
+
+        Parameters
+        ----------
+        *loops : List[LoopRV]
+            The loops to be fused
+
+        Returns
+        ----------
+        fused_loop : LoopRV
+            The new loop after fusion
+
+        Examples
+        --------
+
+        Before fuse, in TensorIR, the IR is:
+
+        .. code-block:: python
+
+            @tvm.script.tir
+            def before_fuse(a: ty.handle, b: ty.handle) -> None:
+                A = tir.match_buffer(a, (128, 128))
+                B = tir.match_buffer(b, (128, 128))
+                with tir.block([128, 128], "B") as [vi, vj]:
+                    B[vi, vj] = A[vi, vj] * 2.0
+
+        Create the schedule and do fuse:
+
+        .. code-block:: python
+
+            sch = tir.Schedule(before_fuse, debug_mode=True)
+            i, j = sch.get_loops(sch.get_block("B"))
+            sch.fuse(i, j)
+            print(tvm.script.asscript(sch.mod["main"]))
+
+        After applying fuse, the IR becomes:
+
+        .. code-block:: python
+
+            @tvm.script.tir
+            def after_fuse(a: ty.handle, b: ty.handle) -> None:
+                A = tir.match_buffer(a, (128, 128))
+                B = tir.match_buffer(b, [128, 128])
+                for i0_i1_fused in tir.serial(0, 16384):
+                    with tir.block([128, 128], "B") as [vi, vj]:
+                        tir.bind(vi, tir.floordiv(i0_i1_fused, 128))
+                        tir.bind(vj, tir.floormod(i0_i1_fused, 128))
+                        tir.reads([A[vi, vj]])
+                        tir.writes([B[vi, vj]])
+                        B[vi, vj] = A[vi, vj] * 2.0
+
+        """
+        return _ffi_api_schedule.ScheduleFuse(self, loops)  # type: ignore # 
pylint: disable=no-member
+
+    def split(
+        self,
+        loop: LoopRV,
+        factors: List[Optional[ExprRV]],
+    ) -> List[LoopRV]:
+        """Split a loop into a list of consecutive loops. It requires:
+        1) The loop can't have annotation or thread binding.
+        2) The loop must start with 0.
+        Predicates may be added to ensure the total loop numbers keeps 
unchanged.
+        In `factors`, at most one of the factors can be None or -1,
+        which will be automatically inferred.
+        Parameters
+        ----------
+        loop : LoopRV
+            The loop to be split
+
+        factors: List[Optional[ExprRV]]
+            The splitting factors
+
+        Returns
+        ----------
+        split_loops : List[LoopRV]
+            The new loops after split
+
+        Examples
+        --------
+
+        Before split, in TensorIR, the IR is:
+
+        .. code-block:: python
+
+            @tvm.script.tir
+            def before_split(a: ty.handle, b: ty.handle) -> None:
+                A = tir.match_buffer(a, (128, 128))
+                B = tir.match_buffer(b, (128, 128))
+                with tir.block([128, 128], "B") as [vi, vj]:
+                    B[vi, vj] = A[vi, vj] * 2.0
+
+        Create the schedule and do fuse:
+
+        .. code-block:: python
+
+            sch = tir.Schedule(before_split, debug_mode=True)
+            i, j = sch.get_loops(sch.get_block("B"))
+            sch.split(i, factors=[2, 64])
+            print(tvm.script.asscript(sch.mod["main"]))
+
+        After applying split, the IR becomes:
+
+        .. code-block:: python
+
+            @tvm.script.tir
+            def after_split(a: ty.handle, b: ty.handle) -> None:
+                A = tir.match_buffer(a, (128, 128))
+                B = tir.match_buffer(b, [128, 128])
+                for i0_outer, i0_inner, i1 in tir.grid(2, 64, 128):
+                    with tir.block([128, 128], "B") as [vi, vj]:
+                        tir.bind(vi, ((i0_outer*64) + i0_inner))
+                        tir.bind(vj, i1)
+                        tir.reads([A[vi, vj]])
+                        tir.writes([B[vi, vj]])
+                        B[vi, vj] = A[vi, vj] * 2.0
+
+        """
+        for i, factor in enumerate(factors):
+            if factor is None:
+                factors[i] = -1

Review comment:
       1. `None` will be converted to `-1` in C++ implementation. For 
convenience, I choose to support `-1`.
   2. I want `ConcreteScheduleNode::Split` to accept factors of type `const 
Array<ExprRV>&` instead of `const Array<Optional<ExprRV>>&` to make it clearer.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to