tlopex commented on code in PR #18609:
URL: https://github.com/apache/tvm/pull/18609#discussion_r2646896423
##########
python/tvm/topi/nn/batch_norm.py:
##########
@@ -111,26 +111,22 @@ def batch_norm(
shape = [1] * len(data.shape)
shape[axis] = data.shape[axis]
- reduce_axes = list(range(len(data.shape)))
- reduce_axes.remove(axis)
- shape_prod = reduce(lambda x, y: x * y, [data.shape[ax] for ax in
reduce_axes], 1)
-
- data_mean = topi.sum(data, axis=reduce_axes) / shape_prod
- data_mean_rs = topi.reshape(data_mean, shape)
- data_var = (
- topi.sum((data - data_mean_rs) * (data - data_mean_rs),
axis=reduce_axes) / shape_prod
- )
- data_var_rs = topi.reshape(data_var, shape)
-
if training:
+ reduce_axes = list(range(len(data.shape)))
+ reduce_axes.remove(axis)
+ shape_prod = reduce(lambda x, y: x * y, [data.shape[ax] for ax in
reduce_axes], 1)
+ data_mean = topi.sum(data, axis=reduce_axes) / shape_prod
+ data_mean_rs = topi.reshape(data_mean, shape)
+ data_var = (
+ topi.sum((data - data_mean_rs) * (data - data_mean_rs),
axis=reduce_axes) / shape_prod
+ )
+ data_var_rs = topi.reshape(data_var, shape)
+ out = (data - data_mean_rs) / topi.math.sqrt(data_var_rs + epsilon)
+ else:
moving_mean_rs = topi.reshape(moving_mean, shape)
moving_var_rs = topi.reshape(moving_var, shape)
-
out = (data - moving_mean_rs) / topi.math.sqrt(moving_var_rs + epsilon)
Review Comment:
I think this review make sense, could you have a update?
##########
tests/python/relax/test_transform_legalize_ops_nn.py:
##########
@@ -2091,6 +2110,62 @@ def batch_norm(var_x: T.handle, var_gamma: T.handle,
var_beta: T.handle, var_mov
T.reads(x[v_ax0, v_ax1, v_ax2, v_ax3],
T_reshape[T.int64(0), v_ax1, T.int64(0), T.int64(0)])
T.writes(T_subtract[v_ax0, v_ax1, v_ax2,
v_ax3])
T_subtract[v_ax0, v_ax1, v_ax2, v_ax3] =
x[v_ax0, v_ax1, v_ax2, v_ax3] - T_reshape[T.int64(0), v_ax1, T.int64(0),
T.int64(0)]
+ for ax0 in range(T.int64(2)):
+ for ax1 in range(T.int64(3)):
+ for ax2 in range(T.int64(28)):
+ for ax3 in range(T.int64(28)):
+ with T.block("T_subtract_1"):
+ v_ax0 = T.axis.spatial(T.int64(2), ax0)
+ v_ax1 = T.axis.spatial(T.int64(3), ax1)
+ v_ax2 = T.axis.spatial(T.int64(28), ax2)
+ v_ax3 = T.axis.spatial(T.int64(28), ax3)
+ T.reads(x[v_ax0, v_ax1, v_ax2, v_ax3],
T_reshape[T.int64(0), v_ax1, T.int64(0), T.int64(0)])
+ T.writes(T_subtract_1[v_ax0, v_ax1, v_ax2,
v_ax3])
+ T_subtract_1[v_ax0, v_ax1, v_ax2, v_ax3] =
x[v_ax0, v_ax1, v_ax2, v_ax3] - T_reshape[T.int64(0), v_ax1, T.int64(0),
T.int64(0)]
+ for ax0 in range(T.int64(2)):
+ for ax1 in range(T.int64(3)):
+ for ax2 in range(T.int64(28)):
+ for ax3 in range(T.int64(28)):
+ with T.block("T_subtract_2"):
+ v_ax0 = T.axis.spatial(T.int64(2), ax0)
+ v_ax1 = T.axis.spatial(T.int64(3), ax1)
+ v_ax2 = T.axis.spatial(T.int64(28), ax2)
+ v_ax3 = T.axis.spatial(T.int64(28), ax3)
+ T.reads(x[v_ax0, v_ax1, v_ax2, v_ax3],
T_reshape[T.int64(0), v_ax1, T.int64(0), T.int64(0)])
+ T.writes(T_subtract_2[v_ax0, v_ax1, v_ax2,
v_ax3])
+ T_subtract_2[v_ax0, v_ax1, v_ax2, v_ax3] =
x[v_ax0, v_ax1, v_ax2, v_ax3] - T_reshape[T.int64(0), v_ax1, T.int64(0),
T.int64(0)]
+ for ax0 in range(T.int64(2)):
+ for ax1 in range(T.int64(3)):
+ for ax2 in range(T.int64(28)):
+ for ax3 in range(T.int64(28)):
+ with T.block("T_multiply"):
+ v_ax0 = T.axis.spatial(T.int64(2), ax0)
+ v_ax1 = T.axis.spatial(T.int64(3), ax1)
+ v_ax2 = T.axis.spatial(T.int64(28), ax2)
+ v_ax3 = T.axis.spatial(T.int64(28), ax3)
+ T.reads(T_subtract_1[v_ax0, v_ax1, v_ax2,
v_ax3], T_subtract_2[v_ax0, v_ax1, v_ax2, v_ax3])
+ T.writes(T_multiply[v_ax0, v_ax1, v_ax2,
v_ax3])
+ T_multiply[v_ax0, v_ax1, v_ax2, v_ax3] =
T_subtract_1[v_ax0, v_ax1, v_ax2, v_ax3] * T_subtract_2[v_ax0, v_ax1, v_ax2,
v_ax3]
+ for ax0 in range(T.int64(3)):
+ for k0 in range(T.int64(2)):
+ for k2 in range(T.int64(28)):
+ for k3 in range(T.int64(28)):
+ with T.block("T_multiply_red"):
+ v_ax0 = T.axis.spatial(T.int64(3), ax0)
+ v_k0 = T.axis.reduce(T.int64(2), k0)
+ v_k2 = T.axis.reduce(T.int64(28), k2)
+ v_k3 = T.axis.reduce(T.int64(28), k3)
+ T.reads(T_multiply[v_k0, v_ax0, v_k2,
v_k3])
+ T.writes(T_multiply_red[v_ax0])
+ with T.init():
+ T_multiply_red[v_ax0] = T.float32(0.0)
+ T_multiply_red[v_ax0] =
T_multiply_red[v_ax0] + T_multiply[v_k0, v_ax0, v_k2, v_k3]
+ for ax0 in range(T.int64(3)):
+ with T.block("T_divide_1"):
+ v_ax0 = T.axis.spatial(T.int64(3), ax0)
+ T.reads(T_multiply_red[v_ax0])
+ T.writes(T_divide_1[v_ax0])
+ T_divide_1[v_ax0] = T_multiply_red[v_ax0] /
T.float32(1568.0)
Review Comment:
Could you briefly explain the modification you wrote for the test? I didn't
quite catch it
##########
python/tvm/relax/frontend/torch/exported_program_translator.py:
##########
@@ -199,7 +199,7 @@ def _batch_norm_legit_no_stats(self, node: fx.Node) ->
relax.Var:
# Determine axes for instance norm (all spatial dimensions after
channel)
dim = len(self.shape_of(x))
- axes = list(range(2, dim))
+ axes = [0] + list(range(2, dim))
Review Comment:
Could you update the comment?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]