jiangjiajun commented on a change in pull request #9126:
URL: https://github.com/apache/tvm/pull/9126#discussion_r717355007
##########
File path: python/tvm/relay/frontend/paddlepaddle.py
##########
@@ -40,28 +41,108 @@
__all__ = ["from_paddle"]
+def _get_pad_size(in_size, dilated_kernel_size, stride_size):
+ """calculate the paddings size"""
+
+ if stride_size == 1 or in_size % stride_size == 0:
+ pad = max(dilated_kernel_size - stride_size, 0)
+ else:
+ pad = max(dilated_kernel_size - (in_size % stride_size), 0)
+
+ pad_before = pad // 2
+ pad_after = pad - pad_before
+
+ return [pad_before, pad_after]
+
+
+def _dtype_shape_promotion(inputs):
+ """promote data type and shape for list of tensors."""
+
+ dtype_order = ["bool", "int8", "int16", "int32", "int64", "float32",
"float64"]
+
+ ranks = [len(infer_shape(x)) for x in inputs]
+ if set(ranks) == set([1, 0]):
+ for i, r in enumerate(ranks):
+ if r == 0:
+ inputs[i] = _op.expand_dims(inputs[i], axis=0)
+
+ dtypes = set(dtype_order.index(infer_type(x).checked_type.dtype) for x in
inputs)
+ if len(dtypes) == 1:
+ return inputs
+ max_dtype = dtype_order[max(dtypes)]
+ for i, input_op in enumerate(inputs):
+ if infer_type(input_op).checked_type.dtype != max_dtype:
+ inputs[i] = input_op.astype(max_dtype)
+ return inputs
+
+
def shape_of(x, dtype="int32"):
Review comment:
We have referred ONNX frontend, this function also comes from there
https://github.com/apache/tvm/blob/main/python/tvm/relay/frontend/onnx.py#L1411
It's a little different from `common::infer_shape`
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]