eric-haibin-lin commented on a change in pull request #13591: Add a DGL 
operator to compute vertex Ids in layers
URL: https://github.com/apache/incubator-mxnet/pull/13591#discussion_r242375690
 
 

 ##########
 File path: src/operator/contrib/dgl_graph.cc
 ##########
 @@ -1620,5 +1620,153 @@ empty rows and empty columns.
 .add_argument("graph_data", "NDArray-or-Symbol[]", "Input graphs and input 
vertex Ids.")
 .add_arguments(SubgraphCompactParam::__FIELDS__());
 
+///////////////////////// Layer vid ///////////////////////////
+
+struct LayerVidParam : public dmlc::Parameter<LayerVidParam> {
+  int num_args;
+  nnvm::Tuple<nnvm::dim_t> num_layers;
+  DMLC_DECLARE_PARAMETER(LayerVidParam) {
+    DMLC_DECLARE_FIELD(num_args).set_lower_bound(1)
+    .describe("Number of input arrays.");
+    DMLC_DECLARE_FIELD(num_layers)
+    .describe("The number of layers we want to get.");
+  }
+};  // struct LayerVidParam
+
+DMLC_REGISTER_PARAMETER(LayerVidParam);
+
+static void ComputeLayerVid(const TBlob &layer_ids, const TBlob &out,
+                            const TBlob &layer_sizes, size_t num_layers) {
+  CHECK_EQ(out.shape_[0], num_layers);
+  CHECK_EQ(layer_sizes.shape_[0], num_layers);
+  const int64_t *data = layer_ids.dptr<int64_t>();
+  int64_t *out_data = out.dptr<int64_t>();
+  int64_t *num_vs = layer_sizes.dptr<int64_t>();
+  for (size_t i = 0; i < num_layers; i++)
+    num_vs[i] = 0;
+  // Initialize output data.
+  for (size_t i = 0; i < out.shape_.Size(); i++)
+    out_data[i] = -1;
+
+  size_t max_size = out.shape_[1];
+  size_t size = layer_ids.shape_.Size();
+  for (size_t i = 0; i < size; i++) {
+    size_t layer = data[i];
+    // We only look for the vertices within a certain layer.
+    if (layer >= num_layers)
+      continue;
+    int64_t &num = num_vs[layer];
+    out_data[layer * max_size + num] = i;
+    num++;
+  }
+}
+
+static void LayerVidComputeCPU(const nnvm::NodeAttrs& attrs,
+                               const OpContext& ctx,
+                               const std::vector<TBlob>& inputs,
+                               const std::vector<OpReqType>& req,
+                               const std::vector<TBlob>& outputs) {
+  const LayerVidParam& params = nnvm::get<LayerVidParam>(attrs.parsed);
+  int num_arrs = inputs.size();
+#pragma omp parallel for 
num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
+  for (int i = 0; i < num_arrs; i++) {
+    ComputeLayerVid(inputs[i], outputs[i], outputs[i + num_arrs],
+                    params.num_layers[i]);
+  }
+}
+
+static bool LayerVidStorageType(const nnvm::NodeAttrs& attrs,
 
 Review comment:
   No need to register storage type if both inputs and outputs are dense and 
FCompute is used. 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to