wyc-ruiker commented on a change in pull request #8443:
URL: https://github.com/apache/tvm/pull/8443#discussion_r667702856
##########
File path: python/tvm/relay/frontend/pytorch.py
##########
@@ -1875,7 +1875,9 @@ def Bool(self, inputs, input_types):
def Float(self, inputs, input_types):
assert len(inputs) == 1
- return _op.cast(inputs[0], "float32")
+ if isinstance(inputs[0], _expr.Expr):
+ return inputs[0]
+ return float(inputs[0])
Review comment:
Could you add some comments? I'm not sure this change is correct.
##########
File path: include/tvm/topi/nn.h
##########
@@ -690,6 +690,130 @@ inline Tensor nll_loss(const Tensor& predictions, const
Tensor& targets, const T
return T;
}
}
+
+/*!
+ * \brief Creates an operation that performs im2col with an NCHW-layout
+ *
+ * \param data The 4-D input tensor
+ * \param kernel_size A static tuple for kernel size, such as (3,3)
+ * \param dilation A static tuple for dilation, default is (1,1)
+ * \param padding A static tuple for padding, padding value is zero
+ * \param stride A static tuple for strides, default is (1,1)
+ * \param name The name of the operation
+ * \param tag The tag to mark the operation
+ *
+ * \return A Tensor whose op member is the im2col operation (NCHW layout)
+
+ Pseudo code:
+ input_b, input_c, input_h, input_w = data_shape
+ dilation_h, dilation_w = dilation
+ padding_h, padding_w = padding
+
+ dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
+ dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
+
+ output_h = (input_h + 2 * padding_h - dilated_kernel_h)//stride_h + 1
+ output_w = (input_w + 2 * padding_w - dilated_kernel_w)//stride_w + 1
+
+ h_offset = (k // input_w) % kernel_h
+ w_offset = k % kernel_w
+
+ im2col_data = te.compute(
+ (N, K, L),
+ lambda n, k, l: data[
+ n,
+ k / kernel_h / kernel_w,
+ stride_h * (l / output_w) + dilation_h * h_offset - padding_h,
+ stride_w * (l % output_w) + dilation_w * w_offset - padding_w,
+ ],
+ name="im2col_data",
+ )
+*/
+inline tvm::te::Tensor im2col(const tvm::te::Tensor& data,
+ const tvm::Array<tvm::PrimExpr>& kernel_size,
+ const tvm::Array<tvm::PrimExpr>& dilation,
+ const tvm::Array<tvm::PrimExpr>& padding,
+ const tvm::Array<tvm::PrimExpr>& stride,
+ std::string name = "T_im2col", std::string tag =
kElementWise) {
+ ICHECK_EQ(4, data->shape.size());
+ ICHECK_EQ(2, kernel_size.size());
+ ICHECK_EQ(2, dilation.size());
+ ICHECK_EQ(2, padding.size());
+ ICHECK_EQ(2, stride.size());
+
+ auto input_b = data->shape[0];
+ auto input_c = data->shape[1];
+ auto input_h = data->shape[2];
+ auto input_w = data->shape[3];
+
+ auto kernel_h = tvm::cast(tvm::DataType::Int(32), kernel_size[0]);
+ auto kernel_w = tvm::cast(tvm::DataType::Int(32), kernel_size[1]);
+
+ auto dilation_h = tvm::cast(tvm::DataType::Int(32), dilation[0]);
+ auto dilation_w = tvm::cast(tvm::DataType::Int(32), dilation[1]);
+
+ auto padding_h = tvm::cast(tvm::DataType::Int(32), padding[0]);
+ auto padding_w = tvm::cast(tvm::DataType::Int(32), padding[1]);
+
+ auto stride_h = tvm::cast(tvm::DataType::Int(32), stride[0]);
+ auto stride_w = tvm::cast(tvm::DataType::Int(32), stride[1]);
Review comment:
maybe we can try `tir::as_const_int`
##########
File path: include/tvm/topi/nn.h
##########
@@ -690,6 +690,130 @@ inline Tensor nll_loss(const Tensor& predictions, const
Tensor& targets, const T
return T;
}
}
+
+/*!
+ * \brief Creates an operation that performs im2col with an NCHW-layout
+ *
+ * \param data The 4-D input tensor
+ * \param kernel_size A static tuple for kernel size, such as (3,3)
+ * \param dilation A static tuple for dilation, default is (1,1)
+ * \param padding A static tuple for padding, padding value is zero
+ * \param stride A static tuple for strides, default is (1,1)
Review comment:
There is a space at the front of this line.
##########
File path: src/relay/op/nn/pad.cc
##########
@@ -272,5 +272,86 @@ RELAY_REGISTER_OP("nn.mirror_pad")
.add_type_rel("MirrorPad", MirrorPadRel)
.set_attr<TOpPattern>("TOpPattern", kInjective);
+Array<te::Tensor> Im2colCompute(const Attrs& attrs, const Array<te::Tensor>&
inputs,
+ const Type& out_type) {
+ const auto* param = attrs.as<Im2colAttrs>();
+ ICHECK(param != nullptr);
+
+ return Array<te::Tensor>{
+ topi::im2col(inputs[0], param->kernel_size, param->dilation,
param->padding, param->stride)};
+}
+
+bool Im2colRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
+ const TypeReporter& reporter) {
+ // types: [input, output]
+ ICHECK_EQ(types.size(), 2) << "Expects two types, one for the input and
another for the output";
+
Review comment:
We should add more ICHECK, like `param->padding.size()`
`param->dilation.size()` `param->kernel_size.size()` `param->stride.size()`
##########
File path: src/relay/op/nn/pad.cc
##########
@@ -272,5 +272,86 @@ RELAY_REGISTER_OP("nn.mirror_pad")
.add_type_rel("MirrorPad", MirrorPadRel)
.set_attr<TOpPattern>("TOpPattern", kInjective);
+Array<te::Tensor> Im2colCompute(const Attrs& attrs, const Array<te::Tensor>&
inputs,
+ const Type& out_type) {
+ const auto* param = attrs.as<Im2colAttrs>();
+ ICHECK(param != nullptr);
+
+ return Array<te::Tensor>{
+ topi::im2col(inputs[0], param->kernel_size, param->dilation,
param->padding, param->stride)};
+}
+
+bool Im2colRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
+ const TypeReporter& reporter) {
+ // types: [input, output]
+ ICHECK_EQ(types.size(), 2) << "Expects two types, one for the input and
another for the output";
+
+ const auto* input = types[0].as<TensorTypeNode>();
+ if (input == nullptr) return false;
+
+ if (input->shape.size() != 4) {
+ reporter->GetDiagCtx().EmitFatal(Diagnostic::Error(reporter->GetSpan())
+ << "Im2lossRel: input data should be 4
dimensions, NxCxHxW.");
+ return false;
+ }
+
+ const Im2colAttrs* param = attrs.as<Im2colAttrs>();
+ if (param == nullptr) return false;
+
+ // Calculate output shape
+ auto kernel_h = tvm::cast(tvm::DataType::Int(32), param->kernel_size[0]);
+ auto kernel_w = tvm::cast(tvm::DataType::Int(32), param->kernel_size[1]);
+ auto dilation_h = tvm::cast(tvm::DataType::Int(32), param->dilation[0]);
+ auto dilation_w = tvm::cast(tvm::DataType::Int(32), param->dilation[1]);
+ auto padding_h = tvm::cast(tvm::DataType::Int(32), param->padding[0]);
+ auto padding_w = tvm::cast(tvm::DataType::Int(32), param->padding[1]);
+ auto stride_h = tvm::cast(tvm::DataType::Int(32), param->stride[0]);
+ auto stride_w = tvm::cast(tvm::DataType::Int(32), param->stride[1]);
Review comment:
It seems that we can use `tir::as_const_int`?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]