haojin2 commented on a change in pull request #11587: [MXNET-378] Adding
depth_to_space and space_to_depth operator(Updated)
URL: https://github.com/apache/incubator-mxnet/pull/11587#discussion_r204856166
##########
File path: src/operator/tensor/matrix_op-inl.h
##########
@@ -2171,6 +2171,328 @@ inline bool SqueezeShape(const nnvm::NodeAttrs& attrs,
return true;
}
+struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> {
+ int block_size;
+ DMLC_DECLARE_PARAMETER(DepthToSpaceParam) {
+ DMLC_DECLARE_FIELD(block_size)
+ .describe("Blocks of [block_size. block_size] are moved");
+ }
+};
+
+inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs,
+ std::vector<TShape>* in_attrs,
+ std::vector<TShape>* out_attrs) {
+ const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
+ CHECK_EQ(in_attrs->size(), 1U);
+ CHECK_EQ(out_attrs->size(), 1U);
+ CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires
exactly 4D tensor";
+
+ TShape expected_out(4);
+
+ TShape& in_shape = in_attrs->at(0);
+ int block = param.block_size;
+ CHECK_NE(block, 0) << "block_size must be a positive integer value";
+ CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
+ CHECK_EQ(in_shape[1] % (block * block), 0)
+ << "Cannot perform Depth To Space operation on the specified tensor."
+ " Dimension:1(depth dimension) should be a multiple of 'block^2'";
+ CHECK_NE(in_shape[0], 0)
+ << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
+ CHECK_NE(in_shape[2], 0)
+ << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
+ CHECK_NE(in_shape[3], 0)
+ << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
+
+ expected_out[0] = in_shape[0];
+ expected_out[1] = in_shape[1] / (block * block);
+ uint32_t i = 2;
+ while (i < expected_out.ndim()) {
+ expected_out[i] = in_shape[i] * block;
+ ++i;
+ }
+
+ SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
+ return true;
+}
+
+inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs,
+ std::vector<int>* in_attrs,
+ std::vector<int>* out_attrs) {
+ CHECK_EQ(in_attrs->size(), 1U);
+ CHECK_EQ(out_attrs->size(), 1U);
+
+ TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
+ TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
+ return out_attrs->at(0) != -1;
+}
+
+/*!
+ * \brief This function updates the value of input index from where the data
element
+ * needs to be fetched and written out to the ith location in output tensor
+ * \param index_position index within offset array to get offset of given
dimension
+ * \param dim_size size of current dimension
+ * \param idx output tensor index
+ * \param inp_index index within input tensor from where value is
retrieved
+ * \param offset_arr array containing the linear offset of input tensor
+ */
+MSHADOW_XINLINE void update_index(int index_position, int dim_size, int *idx,
+ int *inp_index, const int* offset_arr) {
+ int next_idx_val = *idx / dim_size;
+ *inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position];
+ *idx = next_idx_val;
+}
+
+/*!
+ * \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) ->
+ * (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be
mapped
+ * to the ith index of output tensor
+ * \param i tensor index
+ * \param out_data output tensor
+ * \param in_data input tensor
+ * \param block size of chunks to be moved out of depth dimension
+ * \param size array containing the size of each dimension of input
tensor
+ * \param offset_arr array containing the linear offset of input tensor
+ */
+template<int req>
+struct depth_to_space_forward {
+ template<typename DType>
+ MSHADOW_XINLINE static void Map(int i, DType* out_data, const DType* in_data,
+ const int block, const int* size, const int*
offset_arr) {
+ int inp_index = 0, idx = i, dim_size;
+ dim_size = block;
+ update_index(2, dim_size, &idx, &inp_index, offset_arr);
+ dim_size = size[3];
+ update_index(5, dim_size, &idx, &inp_index, offset_arr);
+ dim_size = block;
+ update_index(1, dim_size, &idx, &inp_index, offset_arr);
+ dim_size = size[2];
+ update_index(4, dim_size, &idx, &inp_index, offset_arr);
+ dim_size = size[1] / (block * block);
+ update_index(3, dim_size, &idx, &inp_index, offset_arr);
+ dim_size = size[0];
+ update_index(0, dim_size, &idx, &inp_index, offset_arr);
+ KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
+ }
+};
+
+/*!
+ * \brief This function calculates the linear offset for each dimension of
+ * input tensor and stores them in an array, which is later used in
+ * performing depth_to_space operation
+ * \param i global thread id
+ * \param offset_arr array to be populated with offset values
+ * \param size array to be populated with size of each dimension of
input tensor
+ * \param block size of chunks to be moved out of depth dimension
+ * \param size0 size of Dim 0 of input tensor
+ * \param size1 size of Dim 1 of input tensor
+ * \param size2 size of Dim 2 of input tensor
+ * \param size3 size of Dim 3 of input tensor
+ */
+template<int req>
+struct compute_offset_for_depth_to_space {
+ template<typename DType>
+ MSHADOW_XINLINE static void Map(int i, DType* offset_arr, DType* size, const
int block,
+ const int32_t size0, const int32_t size1,
const int32_t size2,
+ const int32_t size3) {
+ size[0] = size0;
+ size[1] = size1;
+ size[2] = size2;
+ size[3] = size3;
+
+ offset_arr[5] = 1;
+ offset_arr[4] = offset_arr[5] * size[3];
+ offset_arr[3] = offset_arr[4] * size[2];
+ offset_arr[2] = offset_arr[3] * size[1] / (block * block);
+ offset_arr[1] = offset_arr[2] * block;
+ offset_arr[0] = offset_arr[1] * block;
+ }
+};
+
+template<typename xpu>
+void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs,
+ const OpContext& ctx,
+ const std::vector<TBlob>& inputs,
+ const std::vector<OpReqType>& req,
+ const std::vector<TBlob>& outputs) {
+ CHECK_EQ(inputs.size(), 1U);
+ CHECK_EQ(outputs.size(), 1U);
+ CHECK_EQ(req.size(), 1U);
+ mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+ const TBlob& in_data = inputs[0];
+ const TBlob& out_data = outputs[0];
+ const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
+ using namespace mxnet_op;
+ int block = param.block_size;
+
+ mshadow::Tensor<xpu, 1, char> workspace =
+ ctx.requested[0].get_space_typed<xpu, 1,
char>(mshadow::Shape1(sizeof(int32_t) * 10), s);
+ char* workspace_curr_ptr = workspace.dptr_;
+ int32_t* offset_arr = reinterpret_cast<int32_t*>(workspace_curr_ptr);
+ int32_t* size = reinterpret_cast<int32_t*>(workspace_curr_ptr +
sizeof(int32_t) * 6);
+
+ MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
+ MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
+ Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch(
+ s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
+ in_data.shape_[2], in_data.shape_[3]);
+
+ Kernel<depth_to_space_forward<req_type>, xpu>::Launch(
+ s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
+ block, size, offset_arr);
+ });
+ });
+}
+
+inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs,
+ std::vector<TShape>* in_attrs,
+ std::vector<TShape>* out_attrs) {
+ const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
+ CHECK_EQ(in_attrs->size(), 1U);
+ CHECK_EQ(out_attrs->size(), 1U);
+ CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires
exactly 4D tensor";
+
+ TShape expected_out(in_attrs->at(0).ndim());
+
+ TShape& in_shape = in_attrs->at(0);
+ int block = param.block_size;
+ CHECK_NE(block, 0) << "block_size must be a positive integer value";
+ CHECK_NE(in_shape[0], 0)
+ << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
+ CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
+ CHECK_NE(in_shape[2], 0)
+ << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
+ CHECK_EQ(in_shape[2]%block, 0)
+ << "Cannot perform Depth To Space operation on the specified tensor."
+ " Dimension:2(1st Space dimension) should be a multiple of 'block' ";
+ CHECK_NE(in_shape[3], 0)
+ << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
+ CHECK_EQ(in_shape[3]%block, 0)
Review comment:
nit: add spaces around `%`
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services