azai91 commented on a change in pull request #11778: [MXNET-483] C++ tests for
mkldnn convolution operator
URL: https://github.com/apache/incubator-mxnet/pull/11778#discussion_r207374254
##########
File path: tests/cpp/operator/mkldnn.cc
##########
@@ -1204,6 +1271,155 @@ void TestPoolingOp(const OpAttrs &forward_attrs, const
OpAttrs &backwards_attrs)
}
}
+int CalculateWidthConvOutput(int width, int kernel, int padding, int stride) {
+ return (width - kernel + 2 * padding) / stride + 1;
+}
+
+int CalculateWidthDeconvOutput(int width, int kernel, int padding, int stride)
{
+ return stride * (width - 1) + kernel - 2 * padding;
+}
+
+NDArray CreateKernelNDArray(TShape kernel, int num_filters, TShape input, bool
is_deconv = false) {
+ CHECK(kernel.ndim() == 2) << "mkldnn only supports 2d filters on 4d inputs";
+ TShape target_shape(4);
+ target_shape[0] = is_deconv ? input[1] : num_filters;
+ target_shape[1] = is_deconv ? num_filters : input[1];
+ target_shape[2] = kernel[0];
+ target_shape[3] = kernel[1];
+ int dtype = mshadow::DataType<mshadow::default_real_t>::kFlag;
+ NDArray arr(target_shape, Context());
+ auto pd = GetMemPD(target_shape, dtype, mkldnn::memory::format::nchw);
+ InitMKLDNNArray(&arr, pd);
+ return arr;
+}
+
+NDArray CreateBiasNDArray(int num_filters) {
+ TShape target_shape = {num_filters};
+ int dtype = mshadow::DataType<mshadow::default_real_t>::kFlag;
+ NDArray arr(target_shape, Context());
+ auto pd = GetMemPD(target_shape, dtype, mkldnn::memory::format::x);
+ InitMKLDNNArray(&arr, pd);
+ return arr;
+}
+
+void TestConvOp(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs,
+ bool is_deconv = false) {
+ std::vector<NDArray*> inputs(forward_attrs.num_inputs);
+ std::vector<NDArray*> outputs(forward_attrs.num_outputs);
+ std::vector<NDArray*> ex_outputs(forward_attrs.num_outputs);
+
+ std::vector<NDArray*> backwards_input(backwards_attrs.num_inputs);
+ std::vector<NDArray*> backwards_outputs(backwards_attrs.num_outputs);
+ std::vector<NDArray*> backwards_ex_outputs(backwards_attrs.num_outputs);
+
+
+ std::vector<OpReqType> req(forward_attrs.num_outputs);
+ std::vector<OpReqType> back_req(backwards_attrs.num_outputs);
+ std::vector<DispatchMode> dispatches = forward_attrs.dispatches;
+
+ TestArrayShapes tas = GetTestArrayShapes();
+ std::vector<mkldnn::memory::primitive_desc> pds = tas.pds;
+
+ mxnet::op::ConvolutionParam param;
+ param.Init(forward_attrs.attrs.dict);
+ TShape kernel = param.kernel;
+ TShape padding = param.pad;
+ TShape stride = param.stride;
+ int num_filter = param.num_filter;
+
+ std::vector<NDArrayAttrs> in_arrs = GetTestInputArrays(true);
+ std::vector<std::vector<NDArrayAttrs>> out_arrs(forward_attrs.num_outputs);
+ std::vector<std::vector<NDArrayAttrs>>
ex_out_arrs(forward_attrs.num_outputs);
+
+ for (int i1 = 0; i1 < in_arrs.size(); i1++) {
+ auto in_arr = in_arrs[i1];
+
+ // can only conv only 4D inputs
+ TShape input_shape = in_arr.arr.shape();
+ if (input_shape.ndim() != kernel.ndim() + 2)
+ continue;
+
+ // cannot pool if ndarray and mkldnn memory have different ndim
+ if (in_arr.arr.IsView() ||
in_arr.arr.GetMKLDNNData()->get_primitive_desc().desc().data.ndims
+ != in_arr.arr.shape().ndim())
+ continue;
+
+ float scale = CalculateWidthConvOutput(input_shape[2], kernel[0],
padding[0], stride[0])
+ / static_cast<float>(input_shape[2]);
+
+ if (is_deconv) {
+ scale = CalculateWidthDeconvOutput(input_shape[2], kernel[0],
padding[0], stride[0])
+ / static_cast<float>(input_shape[2]);
+ }
+ std::vector<float> scale_vector(in_arr.arr.shape().ndim());
+ scale_vector[0] = 1;
+ scale_vector[1] = static_cast<float>(num_filter) / input_shape[1];
+ scale_vector[2] = scale;
+ scale_vector[3] = scale;
+
+ for (int i = 0; i < forward_attrs.num_outputs; i++) {
+ out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), pds, scale_vector);
+ ex_out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), pds,
scale_vector);
+ }
+ NDArray ndkernel = CreateKernelNDArray(kernel, num_filter,
in_arr.arr.shape(), is_deconv);
+ NDArray ndbias = CreateBiasNDArray(num_filter);
+ inputs[0] = &in_arr.arr;
+ inputs[1] = &ndkernel;
+ inputs[2] = &ndbias;
+ for (size_t output_i = 0; output_i < out_arrs[0].size(); output_i++) {
+ for (int i = 0; i < forward_attrs.num_outputs; i++) {
+ req[i] = kWriteTo;
+ outputs[i] = &out_arrs[i][output_i].arr;
+ ex_outputs[i] = &ex_out_arrs[i][output_i].arr;
+ }
+ Imperative::Get()->set_is_training(true);
+
+ PrintVerifyMsg(in_arr, out_arrs[0][output_i]);
+ Imperative::Get()->InvokeOp(Context(), forward_attrs.attrs, inputs,
+ outputs, req, DispatchMode::kFCompute,
mxnet::OpStatePtr());
+ Imperative::Get()->InvokeOp(Context(), forward_attrs.attrs, inputs,
+ ex_outputs, req, DispatchMode::kFComputeEx,
mxnet::OpStatePtr());
+ Engine::Get()->WaitForAll();
+ VerifyCopyResult(outputs, ex_outputs);
+
+ // backwards test performed same time since output needed
+ backwards_input[0] = outputs[0]; // output grad
+ backwards_input[1] = inputs[0]; // input
+ backwards_input[2] = inputs[1]; // kernel
+ backwards_input[3] = inputs[2]; // bias
+
+ auto tmp_output = GetTestInputArrays(true)[i1];
+ NDArray tmp_kernel = CreateKernelNDArray(kernel, num_filter,
in_arr.arr.shape(), is_deconv);
+ NDArray tmp_bias = CreateBiasNDArray(num_filter);
+
+ backwards_outputs[0] = &tmp_output.arr;
+ backwards_outputs[1] = &tmp_kernel;
+ backwards_outputs[2] = &tmp_bias;
+
+ auto tmp_output2 = GetTestInputArrays(true)[i1];
+ NDArray tmp_kernel2 = CreateKernelNDArray(kernel, num_filter,
in_arr.arr.shape(), is_deconv);
+ NDArray tmp_bias2 = CreateBiasNDArray(num_filter);
+ backwards_ex_outputs[0] = &tmp_output2.arr;
+ backwards_ex_outputs[1] = &tmp_kernel2;
+ backwards_ex_outputs[2] = &tmp_bias2;
+
+ for (int i = 0; i < backwards_attrs.num_outputs; i++)
+ back_req[0] = kWriteTo;
Review comment:
regular fcompute for deconvolution/convolution does not support kAddTo
currently
https://github.com/azai91/incubator-mxnet/blob/d79e1ad3294837cac653478045023fd312ceed78/src/operator/nn/convolution-inl.h#L178
https://github.com/azai91/incubator-mxnet/blob/46e47cbc6183d2812a2e405851f0b209383e72ad/src/operator/nn/deconvolution-inl.h#L223
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services