This is an automated email from the ASF dual-hosted git repository.

masahi pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 375566b  Fix default pytorch divide behaviour (#10727)
375566b is described below

commit 375566b6e2d3c4a01844efbc7e6bff35bf28822c
Author: AleksKnezevic <[email protected]>
AuthorDate: Wed Mar 23 19:46:07 2022 -0400

    Fix default pytorch divide behaviour (#10727)
    
    Co-authored-by: Aleks Knezevic <[email protected]>
---
 python/tvm/relay/frontend/pytorch.py | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/python/tvm/relay/frontend/pytorch.py 
b/python/tvm/relay/frontend/pytorch.py
index 37a8e45..8d37d48 100644
--- a/python/tvm/relay/frontend/pytorch.py
+++ b/python/tvm/relay/frontend/pytorch.py
@@ -254,6 +254,20 @@ class PyTorchOpConverter:
     # Operator implementations
     def make_elemwise(self, name):
         def elemwise(inputs, input_types):
+            if name == "divide":
+                # 
https://pytorch.org/docs/stable/generated/torch.div.html#torch.div
+                # None - default behavior. Performs no rounding and, if both 
input and
+                # other are integer types, promotes the inputs to the default 
scalar type.
+                if all(["int" in input_type for input_type in 
input_types[:2]]):
+                    input_types[:2] = ["float32"] * 2
+                    cast_inputs = []
+                    for inp in inputs[:2]:
+                        if np.isscalar(inp):
+                            cast_inputs.append(_expr.const(inp, 
dtype="float32"))
+                        else:
+                            cast_inputs.append(_op.cast(inp, "float32"))
+                    inputs[:2] = cast_inputs
+
             data0, data1 = self.pytorch_promote_types(inputs[:2], 
input_types[:2])
             return get_relay_op(name)(data0, data1)
 

Reply via email to