delldu commented on a change in pull request #8443:
URL: https://github.com/apache/tvm/pull/8443#discussion_r670228541
##########
File path: tests/python/frontend/pytorch/test_forward.py
##########
@@ -3912,6 +3911,98 @@ def forward(self, x):
verify_model(Flip(axis=-1), input_data=input)
[email protected]_gpu
+def test_forward_im2col():
+ torch.set_grad_enabled(False)
+
+ class Im2col(Module):
+ def __init__(self, kernel_size, dilation, padding, stride):
+ super(Im2col, self).__init__()
+ self.kernel_size = (kernel_size, kernel_size)
+ self.dilation = (dilation, dilation)
+ self.padding = (padding, padding)
+ self.stride = (stride, stride)
+
+ def forward(self, x):
+ #
***********************************************************************************
+ #
+ # !!! DO NOT USE !!!
+ # F.unfold(x, kernel_size=3, dilation=1, padding=1, stride=1)
+ # for it broken TVM "if conditional expression" in torch script
mode
+ #
+ #
***********************************************************************************
+ return torch._C._nn.im2col(
+ x, self.kernel_size, self.dilation, self.padding, self.stride
+ )
+
+ input = torch.randn(2, 3, 32, 32)
+ verify_model(Im2col(5, 1, 1, 2), input_data=input)
+ verify_model(Im2col(3, 1, 2, 1), input_data=input)
+ verify_model(Im2col(5, 1, 2, 2), input_data=input)
+
+
[email protected]_gpu
+def test_forward_grid_sampler():
+ torch.set_grad_enabled(False)
+
+ class GridSampler(Module):
+ def __init__(self, output_h, output_w):
+ super(GridSampler, self).__init__()
+ self.output_h = output_h
+ self.output_w = output_w
+
+ # normalize to [-1.0, 1.0]
+ h = torch.arange(0, output_h) / (output_h - 1.0) * 2.0 - 1.0
+ w = torch.arange(0, output_w) / (output_w - 1.0) * 2.0 - 1.0
+ grid = torch.zeros(output_h, output_w, 2)
+ grid[:, :, 0] = w.unsqueeze(0).repeat(output_h, 1)
+ grid[:, :, 1] = h.unsqueeze(0).repeat(output_w, 1).transpose(0, 1)
+ self.grid = grid.unsqueeze(0)
+
+ def forward(self, input):
+ batch = input.size(0)
+ grid = self.grid.repeat(batch, 1, 1, 1).to(input.device)
+
+ # Torch grid_sample default: mode='bilinear',
padding_mode='zeros', align_corners=False
+ # tvm seems align corners as True
+
+ #
***********************************************************************************
+ #
+ # !!! DO NOT USE !!!
+ # F.grid_sample(input, grid, mode='bilinear',
padding_mode='zeros', align_corners=True)
+ # for it broken TVM "if conditional expression" in torch script
mode
+ #
+ #
***********************************************************************************
+
+ return torch.grid_sampler(input, grid, 0, 0, True)
+
+ model = GridSampler(16, 32)
+ input = torch.randn(2, 3, 32, 32)
+
+ verify_model(model, input_data=input)
+ verify_script_model(model.eval(), [(2, 3, 32, 32)],
_get_default_vm_targets())
+
+
[email protected]_gpu
+def test_forward_float():
+ torch.set_grad_enabled(False)
+
+ def convert_i(i: int) -> float:
+ return float(i)
+
+ class FloatModel(Module):
+ def __init__(self):
+ super(FloatModel, self).__init__()
+
+ def forward(self, x):
+ f = convert_i(10)
Review comment:
@masahi, I have little difference opinion.
Of course, if you just use traced mode, previous aten::Float is OK, NO BUG,
NO NEED Fixed.
if you want script mode, convert_i(10) and float(10) is not same thing. you
can explore them with test.
As a matter of fact, user in some case could not avoid script model.
Should we remove all verify_script_model in test_forward ?
You are TVM expert, feel free give us comments. 1) Remove it? 2) Reserved it
?
Thanks a lot.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]