heliqi commented on code in PR #14172:
URL: https://github.com/apache/tvm/pull/14172#discussion_r1132013613
##########
python/tvm/relay/frontend/paddlepaddle.py:
##########
@@ -400,6 +400,30 @@ def convert_conv2d_transpose(g, op, block):
g.add_node(op.output("Output")[0], out)
+def convert_dist(g, op, block):
+ """Operator converter for dist."""
+
+ x = g.get_node(op.input("X")[0])
+ y = g.get_node(op.input("Y")[0])
+ z = _op.abs(_op.subtract(x, y))
Review Comment:
z = x - y. It can't use `_op.abs`, otherwise `inv_p=_expr.const(1.0 / p,
dtype=dtype)` miscalculated.
##########
python/tvm/relay/frontend/paddlepaddle.py:
##########
@@ -475,6 +499,50 @@ def convert_elementwise_op(g, op, block):
g.add_node(op.output("Out")[0], out)
+def convert_linspace(g, op, block):
+ """Operator converter for linspace."""
+
+ start = g.get_node(op.input("Start")[0])
+ stop = g.get_node(op.input("Stop")[0])
+ num = g.get_node(op.input("Num")[0])
+ dtype = _convert_dtype_value(op.attr("dtype"))
+ start, infered = try_infer_value(start, parameters=g.get_params())
+ if infered:
+ start = start.tolist()[0]
+ else:
+ msg = 'Value {} in attribute "start" of operator Linspace is not
"valid."'
+ raise tvm.error.OpAttributeInvalid(msg.format(start))
+
+ stop, infered = try_infer_value(stop, parameters=g.get_params())
+ if infered:
+ stop = stop.tolist()[0]
+ else:
+ msg = 'Value {} in attribute "stop" of operator Linspace is not
"valid."'
+ raise tvm.error.OpAttributeInvalid(msg.format(stop))
+
+ num, infered = try_infer_value(num, parameters=g.get_params())
+ if infered:
+ num = num.tolist()[0]
+ else:
+ msg = 'Value {} in attribute "num" of operator Linspace is not
"valid."'
+ raise tvm.error.OpAttributeInvalid(msg.format(num))
+
+ if num == 1:
+ out = _op.full(_expr.const(start, dtype), shape=(1))
+ else:
+ if dtype in ["int32", "int64"]:
Review Comment:
dtype support float32, float64, int32 and int64
##########
python/tvm/relay/frontend/paddlepaddle.py:
##########
@@ -475,6 +499,50 @@ def convert_elementwise_op(g, op, block):
g.add_node(op.output("Out")[0], out)
+def convert_linspace(g, op, block):
+ """Operator converter for linspace."""
+
+ start = g.get_node(op.input("Start")[0])
+ stop = g.get_node(op.input("Stop")[0])
+ num = g.get_node(op.input("Num")[0])
+ dtype = _convert_dtype_value(op.attr("dtype"))
+ start, infered = try_infer_value(start, parameters=g.get_params())
Review Comment:
Dynamic shape should also be supported
##########
python/tvm/relay/frontend/paddlepaddle.py:
##########
@@ -514,6 +582,27 @@ def convert_expand_as(g, op, block):
g.add_node(op.output("Out")[0], out)
+def convert_eye(g, op, block):
+ """Operator converter for eye."""
+
+ num_rows = op.attr("num_rows")
+ num_columns = op.attr("num_columns")
Review Comment:
num_columns might equal -1, in which case num_columns equals num_rows.
##########
tests/python/frontend/paddlepaddle/test_forward.py:
##########
@@ -1969,5 +1969,142 @@ def forward(self, inputs):
verify_model(Mish(), input_data=input_data)
[email protected]_gpu
+def test_forward_thresholded_relu():
+ class ThresholdedRelu(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, inputs):
+ return nn.functional.thresholded_relu(inputs)
+
+ input_shapes = [[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]]
+ for input_shape in input_shapes:
+ input_data = paddle.randn(shape=input_shape, dtype="float32")
+ verify_model(ThresholdedRelu(), input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_index_select():
+ class IndexSelect1(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, x, index):
+ return paddle.index_select(x, index, axis=0)
+
+ class IndexSelect2(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, x, index):
+ return paddle.index_select(x, index, axis=-1)
+
+ input_shapes = [[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]]
+ for input_shape in input_shapes:
+ input_data = paddle.randn(shape=input_shape, dtype="float32")
+ index = paddle.to_tensor([0, 1, 1], dtype="int32")
+ verify_model(IndexSelect1(), input_data=[input_data, index])
+ verify_model(IndexSelect2(), input_data=[input_data, index])
+
+
[email protected]_gpu
+def test_forward_eye():
+ class Eye1(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, inputs):
+ return paddle.eye(3, 5, dtype="int32"), paddle.eye(3, 5,
dtype="float32"), inputs
+
+ class Eye2(nn.Layer):
Review Comment:
Add case where num_columns is None
##########
python/tvm/relay/frontend/paddlepaddle.py:
##########
@@ -475,6 +499,50 @@ def convert_elementwise_op(g, op, block):
g.add_node(op.output("Out")[0], out)
+def convert_linspace(g, op, block):
+ """Operator converter for linspace."""
+
+ start = g.get_node(op.input("Start")[0])
+ stop = g.get_node(op.input("Stop")[0])
+ num = g.get_node(op.input("Num")[0])
+ dtype = _convert_dtype_value(op.attr("dtype"))
+ start, infered = try_infer_value(start, parameters=g.get_params())
+ if infered:
+ start = start.tolist()[0]
+ else:
+ msg = 'Value {} in attribute "start" of operator Linspace is not
"valid."'
+ raise tvm.error.OpAttributeInvalid(msg.format(start))
+
+ stop, infered = try_infer_value(stop, parameters=g.get_params())
+ if infered:
+ stop = stop.tolist()[0]
+ else:
+ msg = 'Value {} in attribute "stop" of operator Linspace is not
"valid."'
+ raise tvm.error.OpAttributeInvalid(msg.format(stop))
+
+ num, infered = try_infer_value(num, parameters=g.get_params())
+ if infered:
+ num = num.tolist()[0]
+ else:
+ msg = 'Value {} in attribute "num" of operator Linspace is not
"valid."'
+ raise tvm.error.OpAttributeInvalid(msg.format(num))
+
+ if num == 1:
+ out = _op.full(_expr.const(start, dtype), shape=(1))
+ else:
+ if dtype in ["int32", "int64"]:
+ start = int(start)
+ stop = int(stop)
+ step = (stop - start) / (num - 1)
+ stop = stop + step
+ start = _expr.const(start, "float32")
+ stop = _expr.const(stop, "float32")
+ step = _expr.const(step, "float32")
+ out = _op.transform.arange(start=start, stop=stop, step=step,
dtype="float32")
Review Comment:
Why is dtype fixed to "float32"?
##########
tests/python/frontend/paddlepaddle/test_forward.py:
##########
@@ -1969,5 +1969,142 @@ def forward(self, inputs):
verify_model(Mish(), input_data=input_data)
[email protected]_gpu
+def test_forward_thresholded_relu():
+ class ThresholdedRelu(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, inputs):
+ return nn.functional.thresholded_relu(inputs)
Review Comment:
Add some cases where the threshold is 0.5, paddle.randn ranges from 0 to 1
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]