masahi opened a new pull request, #13093:
URL: https://github.com/apache/tvm/pull/13093
Currently, trying to tune the following simple mod results in an error:
```
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
dense = relay.nn.dense(data, weight)
mod = tvm.IRModule.from_expr(dense + data)
```
```
...
for ax0, ax1 in T.grid(128, 128):
with T.block("p0_shared"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(p0[v0, v1])
T.writes(p0_shared[v0, v1])
p0_shared[v0, v1] = p0[v0, v1]
for i0_0_i1_0_fused in T.thread_binding(4, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(16, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(8,
thread="threadIdx.x"):
for i2_0, i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in
T.grid(1, 32, 1, 32, 4, 1, 1):
# tir.Block#0
with T.block("T_matmul_NT"):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
i = T.axis.spatial(128, i0_1_i1_1_fused * 8 +
i0_2_i1_2_fused + i0_3 + i0_4)
j = T.axis.spatial(128, i1_4 + i0_0_i1_0_fused *
32 + i1_3)
k = T.axis.reduce(128, i2_0 * 128 + i2_1 * 4 +
i2_2)
T.reads(p0_shared[i, k], p1[j, k])
T.writes(T_matmul_NT_local[i, j])
T.block_attr({"layout_free_placeholders":[],
"meta_schedule.thread_extent_high_inclusive":1024,
"meta_schedule.thread_extent_low_inclusive":32,
"meta_schedule.tiling_structure":"SSSRRSRS",
"workload":["dense_small_batch.gpu", ["TENSOR", [128, 128], "float32"],
["TENSOR", [128, 128], "float32"], None, "float32"]})
with T.init():
T_matmul_NT_local[i, j] = T.float32(0)
T_matmul_NT_local[i, j] = T_matmul_NT_local[i,
j] + p0_shared[i, k] * p1[j, k]
for ax0, ax1 in T.grid(1, 32):
with T.block("T_matmul_NT_local"):
v0 = T.axis.spatial(128, i0_1_i1_1_fused * 8 +
i0_2_i1_2_fused + ax0)
v1 = T.axis.spatial(128, i0_0_i1_0_fused * 32 +
ax1)
T.reads(T_matmul_NT_local[v0, v1])
T.writes(T_matmul_NT[v0, v1])
T_matmul_NT[v0, v1] = T_matmul_NT_local[v0, v1]
for i0, i1 in T.grid(128, 128):
# tir.Block#1
with T.block("T_add"):
^^^^^^^^^^^^^^^^^^^^^^
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_matmul_NT[ax0, ax1], p0_shared[ax0, ax1])
T.writes(T_add[ax0, ax1])
T_add[ax0, ax1] = T_matmul_NT[ax0, ax1] + p0_shared[ax0, ax1]
Error message: The primitive requires all the consumer(s) of the given block
to be present under the target loop. However, there are 1 consumer(s) not
satisfying the constraint. List of the consumer(s):tir.Block#0tir.Block#1
```
This is fixed by using the cache-read buffer only in the block that
`AddReadReuse` is operating on.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]