You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/12/18 01:12:01 UTC

[GitHub] eric-haibin-lin commented on a change in pull request #13591: Add a DGL operator to compute vertex Ids in layers

eric-haibin-lin commented on a change in pull request #13591: Add a DGL operator to compute vertex Ids in layers
URL: https://github.com/apache/incubator-mxnet/pull/13591#discussion_r242375882
 
 

 ##########
 File path: src/operator/contrib/dgl_graph.cc
 ##########
 @@ -1620,5 +1620,153 @@ empty rows and empty columns.
 .add_argument("graph_data", "NDArray-or-Symbol[]", "Input graphs and input vertex Ids.")
 .add_arguments(SubgraphCompactParam::__FIELDS__());
 
+///////////////////////// Layer vid ///////////////////////////
+
+struct LayerVidParam : public dmlc::Parameter<LayerVidParam> {
+  int num_args;
+  nnvm::Tuple<nnvm::dim_t> num_layers;
+  DMLC_DECLARE_PARAMETER(LayerVidParam) {
+    DMLC_DECLARE_FIELD(num_args).set_lower_bound(1)
+    .describe("Number of input arrays.");
+    DMLC_DECLARE_FIELD(num_layers)
+    .describe("The number of layers we want to get.");
+  }
+};  // struct LayerVidParam
+
+DMLC_REGISTER_PARAMETER(LayerVidParam);
+
+static void ComputeLayerVid(const TBlob &layer_ids, const TBlob &out,
+                            const TBlob &layer_sizes, size_t num_layers) {
+  CHECK_EQ(out.shape_[0], num_layers);
+  CHECK_EQ(layer_sizes.shape_[0], num_layers);
+  const int64_t *data = layer_ids.dptr<int64_t>();
+  int64_t *out_data = out.dptr<int64_t>();
+  int64_t *num_vs = layer_sizes.dptr<int64_t>();
+  for (size_t i = 0; i < num_layers; i++)
+    num_vs[i] = 0;
+  // Initialize output data.
+  for (size_t i = 0; i < out.shape_.Size(); i++)
+    out_data[i] = -1;
+
+  size_t max_size = out.shape_[1];
+  size_t size = layer_ids.shape_.Size();
+  for (size_t i = 0; i < size; i++) {
+    size_t layer = data[i];
+    // We only look for the vertices within a certain layer.
+    if (layer >= num_layers)
+      continue;
+    int64_t &num = num_vs[layer];
+    out_data[layer * max_size + num] = i;
+    num++;
+  }
+}
+
+static void LayerVidComputeCPU(const nnvm::NodeAttrs& attrs,
+                               const OpContext& ctx,
+                               const std::vector<TBlob>& inputs,
+                               const std::vector<OpReqType>& req,
+                               const std::vector<TBlob>& outputs) {
+  const LayerVidParam& params = nnvm::get<LayerVidParam>(attrs.parsed);
+  int num_arrs = inputs.size();
+#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
+  for (int i = 0; i < num_arrs; i++) {
+    ComputeLayerVid(inputs[i], outputs[i], outputs[i + num_arrs],
+                    params.num_layers[i]);
+  }
+}
+
+static bool LayerVidStorageType(const nnvm::NodeAttrs& attrs,
+                                const int dev_mask,
+                                DispatchMode* dispatch_mode,
+                                std::vector<int> *in_attrs,
+                                std::vector<int> *out_attrs) {
+  for (size_t i = 0; i < in_attrs->size(); i++) {
+    CHECK_EQ(in_attrs->at(i), kDefaultStorage);
+  }
+  bool success = true;
+  *dispatch_mode = DispatchMode::kFCompute;
+  for (size_t i = 0; i < out_attrs->size(); i++) {
+    if (!type_assign(&(*out_attrs)[i], mxnet::kDefaultStorage))
+      success = false;
+  }
+
+  return success;
+}
+
+static bool LayerVidShape(const nnvm::NodeAttrs& attrs,
+                          std::vector<TShape> *in_attrs,
+                          std::vector<TShape> *out_attrs) {
+  const LayerVidParam& params = nnvm::get<LayerVidParam>(attrs.parsed);
+  CHECK_EQ(params.num_layers.ndim(), in_attrs->size());
+  CHECK_EQ(params.num_layers.ndim() * 2, out_attrs->size());
+  size_t num_arrs = in_attrs->size();
+  for (size_t i = 0; i < num_arrs; i++) {
+    CHECK_EQ(in_attrs->at(i).ndim(), 1U);
+  }
+
+  for (size_t i = 0; i < num_arrs; i++) {
+    TShape shape(2);
+    shape[0] = params.num_layers[i];
+    shape[1] = in_attrs->at(i)[0];
+    out_attrs->at(i) = shape;
+    TShape shape2(1);
+    shape2[0] = params.num_layers[i];
+    out_attrs->at(i + num_arrs) = shape2;
+  }
+  return true;
+}
+
+static bool LayerVidType(const nnvm::NodeAttrs& attrs,
+                         std::vector<int> *in_attrs,
+                         std::vector<int> *out_attrs) {
+  for (size_t i = 0; i < in_attrs->size(); i++) {
+    CHECK_EQ(in_attrs->at(i), mshadow::kInt64);
+  }
+  for (size_t i = 0; i < out_attrs->size(); i++) {
+    out_attrs->at(i) = mshadow::kInt64;
 
 Review comment:
   use TYPE_ASSIGN in case the output attributes have some pre-defined value other than int64

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services