You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by li...@apache.org on 2020/01/27 18:28:04 UTC

[incubator-mxnet] branch tvm_sync created (now 2ef7de0)

This is an automated email from the ASF dual-hosted git repository.

liuyizhi pushed a change to branch tvm_sync
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


      at 2ef7de0  upgrade enum according to updated tvm

This branch includes the following new commits:

     new dde46f5  sync latest tvm
     new 2ef7de0  upgrade enum according to updated tvm

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[incubator-mxnet] 02/02: upgrade enum according to updated tvm

Posted by li...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

liuyizhi pushed a commit to branch tvm_sync
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git

commit 2ef7de0ec0072828e788976d1ec44e9438b96383
Author: Yizhi Liu <li...@apache.org>
AuthorDate: Fri Jan 24 22:17:50 2020 -0800

    upgrade enum according to updated tvm
---
 src/nnvm/plan_memory.cc                              | 2 --
 src/nnvm/tvm_bridge.cc                               | 4 ++--
 src/operator/numpy/np_elemwise_broadcast_logic_op.cc | 6 +++---
 src/operator/tensor/elemwise_unary_op_pow.cc         | 4 ++--
 src/operator/tvmop/op_module.cc                      | 2 +-
 5 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/src/nnvm/plan_memory.cc b/src/nnvm/plan_memory.cc
index c89eefc..e061dab 100644
--- a/src/nnvm/plan_memory.cc
+++ b/src/nnvm/plan_memory.cc
@@ -26,7 +26,6 @@
 #include <nnvm/pass.h>
 #include <nnvm/graph_attr_types.h>
 #include <nnvm/op_attr_types.h>
-#include <nnvm/top/tensor.h>
 #include <mxnet/base.h>
 #include <memory>
 #include "graph_algorithm.h"
@@ -36,7 +35,6 @@ namespace nnvm {
 namespace pass {
 
 namespace {
-  using namespace nnvm::top;
 // Return bytes of data flag.
 static int MXGetDTypeSize(int type_flag) {
   switch (type_flag) {
diff --git a/src/nnvm/tvm_bridge.cc b/src/nnvm/tvm_bridge.cc
index 0692998..17e05e3 100644
--- a/src/nnvm/tvm_bridge.cc
+++ b/src/nnvm/tvm_bridge.cc
@@ -73,7 +73,7 @@ class TVMFunctor {
         const NDArray& nd =
             static_cast<NDArray*>(args.values[i].v_handle)[0];
         // We cannot set the value until
-        type_codes_[i] = kArrayHandle;
+        type_codes_[i] = kTVMDLTensorHandle;
         array_data_.push_back(nd);
         array_loc_.push_back(i);
         // check if there is read or mutate
@@ -86,7 +86,7 @@ class TVMFunctor {
           mutate_vars->push_back(nd.var());
         }
       } else {
-        CHECK_LT(args.type_codes[i], kTVMType)
+        CHECK_LT(args.type_codes[i], kTVMDataType)
             << "Only allow POD type in mxnet async call";
       }
     }
diff --git a/src/operator/numpy/np_elemwise_broadcast_logic_op.cc b/src/operator/numpy/np_elemwise_broadcast_logic_op.cc
index 7e8951a..8395caf 100644
--- a/src/operator/numpy/np_elemwise_broadcast_logic_op.cc
+++ b/src/operator/numpy/np_elemwise_broadcast_logic_op.cc
@@ -95,7 +95,7 @@ struct TVMBinaryBroadcastCompute {
     values.resize(num_args);
     for (size_t i = 0; i < num_args; ++i) {
       tblobs[i] = PrependAxes(tblobs[i], ondim);
-      type_codes[i] = kArrayHandle;
+      type_codes[i] = kTVMDLTensorHandle;
       values[i].v_handle = const_cast<DLTensor*>(&(tblobs[i].dltensor()));
     }
     tvm::runtime::TVMArgs tvm_args(&values[0], &type_codes[0], tblobs.size());
@@ -200,7 +200,7 @@ struct TVMBinaryBroadcastScalarCompute {
     values.resize(num_args);
 
     // input tensor setup
-    type_codes[0] = kArrayHandle;
+    type_codes[0] = kTVMDLTensorHandle;
     values[0].v_handle = const_cast<DLTensor*>(&(tblobs[0].dltensor()));
 
     // scalar param
@@ -208,7 +208,7 @@ struct TVMBinaryBroadcastScalarCompute {
     values[1].v_float64 = nnvm::get<double>(attrs.parsed);
 
     // output tensor
-    type_codes[2] = kArrayHandle;
+    type_codes[2] = kTVMDLTensorHandle;
     values[2].v_handle = const_cast<DLTensor*>(&(tblobs[1].dltensor()));
 
     tvm::runtime::TVMArgs tvm_args(&values[0], &type_codes[0], 3);
diff --git a/src/operator/tensor/elemwise_unary_op_pow.cc b/src/operator/tensor/elemwise_unary_op_pow.cc
index b4d3a4a..914cb820 100644
--- a/src/operator/tensor/elemwise_unary_op_pow.cc
+++ b/src/operator/tensor/elemwise_unary_op_pow.cc
@@ -224,7 +224,7 @@ The storage type of ``rsqrt`` output is always dense
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(
   _backward_rsqrt, unary_bwd<mshadow_op::reciprocal_square_root_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // NodeEntry{n} : y_grad * f'(x)
       // n->inputs[0] : y_grad
       // n->inputs[1] : x
@@ -329,7 +329,7 @@ MXNET_OPERATOR_REGISTER_BINARY(_backward_rcbrt)
                     ElemwiseBinaryOp::Compute<cpu,
                       unary_bwd<mshadow_op::reciprocal_cube_root_grad>>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // NodeEntry{n} : y_grad * f'(x)
       // n->inputs[0] : y_grad
       // n->inputs[1] : x
diff --git a/src/operator/tvmop/op_module.cc b/src/operator/tvmop/op_module.cc
index b45df5d..cdd7321 100644
--- a/src/operator/tvmop/op_module.cc
+++ b/src/operator/tvmop/op_module.cc
@@ -94,7 +94,7 @@ void TVMOpModule::Call(const std::string &func_name,
   type_codes.resize(args.size());
   values.resize(args.size());
   for (size_t i = 0; i < args.size(); ++i) {
-    type_codes[i] = kArrayHandle;
+    type_codes[i] = kTVMDLTensorHandle;
     values[i].v_handle = const_cast<DLTensor *>(&(args[i].dltensor()));
   }
 


[incubator-mxnet] 01/02: sync latest tvm

Posted by li...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

liuyizhi pushed a commit to branch tvm_sync
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git

commit dde46f52c99bcbd77d81ab9547172aeb8df5f8a2
Author: Yizhi Liu <li...@apache.org>
AuthorDate: Fri Jan 24 18:34:17 2020 -0800

    sync latest tvm
---
 3rdparty/tvm                                       |  2 +-
 include/mxnet/imperative.h                         | 10 +--
 include/mxnet/op_attr_types.h                      |  2 +-
 src/c_api/c_api.cc                                 |  2 +-
 src/c_api/c_api_function.cc                        |  4 +-
 src/c_api/c_api_symbolic.cc                        | 10 +--
 src/common/exec_utils.cc                           | 10 +--
 src/executor/eliminate_common_expr_pass.cc         | 28 ++++----
 src/executor/exec_pass.h                           |  4 +-
 src/executor/graph_executor.cc                     | 16 ++---
 src/executor/infer_graph_attr_pass.cc              |  8 +--
 src/executor/pointwise_fusion_pass.cc              | 18 ++---
 src/executor/simple_partition_pass.h               | 14 ++--
 src/imperative/cached_op.cc                        |  8 +--
 src/imperative/cached_op.h                         |  2 +-
 src/imperative/imperative.cc                       | 14 ++--
 src/nnvm/amp_infer_unknown.cc                      | 12 ++--
 src/nnvm/gradient.cc                               | 20 +++---
 src/nnvm/graph_editor.cc                           |  6 +-
 src/nnvm/legacy_json_util.cc                       |  2 +-
 src/nnvm/legacy_op_util.cc                         |  6 +-
 src/nnvm/low_precision_pass.cc                     | 42 ++++++------
 src/nnvm/node_op_util.h                            |  4 +-
 src/operator/batch_norm_v1.cc                      |  2 +-
 src/operator/contrib/amp_graph_pass.cc             |  4 +-
 src/operator/contrib/roi_align.cc                  |  2 +-
 src/operator/contrib/sync_batch_norm.cc            |  2 +-
 src/operator/control_flow.cc                       |  6 +-
 src/operator/custom/custom.cc                      |  6 +-
 src/operator/elemwise_op_common.h                  | 10 +--
 src/operator/fusion/fused_op.cc                    | 16 ++---
 src/operator/fusion/fused_op.h                     | 12 ++--
 src/operator/identity_attach_KL_sparse_reg.cc      |  2 +-
 src/operator/leaky_relu.cc                         |  2 +-
 src/operator/nn/activation.cc                      |  2 +-
 src/operator/nn/batch_norm.cc                      |  8 +--
 src/operator/nn/concat.cc                          |  2 +-
 src/operator/nn/convolution.cc                     |  2 +-
 src/operator/nn/cudnn/cudnn_batch_norm.cc          |  2 +-
 src/operator/nn/deconvolution.cc                   |  2 +-
 src/operator/nn/dropout.cc                         |  2 +-
 src/operator/nn/fully_connected.cc                 |  4 +-
 src/operator/nn/group_norm.cc                      |  2 +-
 src/operator/nn/layer_norm.cc                      |  2 +-
 src/operator/nn/lrn.cc                             |  2 +-
 src/operator/nn/softmax-inl.h                      |  2 +-
 src/operator/nn/upsampling.cc                      |  4 +-
 src/operator/numpy/np_broadcast_reduce_op_value.cc |  2 +-
 src/operator/numpy/np_matrix_op.cc                 | 10 +--
 src/operator/numpy/np_where_op.cc                  |  2 +-
 src/operator/operator_common.h                     | 16 ++---
 src/operator/quantization/quantize_graph_pass.cc   | 76 +++++++++++-----------
 src/operator/quantization/quantized_activation.cc  |  2 +-
 src/operator/quantization/quantized_batch_norm.cc  |  2 +-
 src/operator/quantization/quantized_concat.cc      |  2 +-
 src/operator/quantization/quantized_conv.cc        |  2 +-
 .../quantization/quantized_elemwise_add.cc         |  2 +-
 .../quantization/quantized_elemwise_mul.cc         |  2 +-
 src/operator/quantization/quantized_flatten.cc     |  2 +-
 .../quantization/quantized_fully_connected.cc      |  2 +-
 src/operator/quantization/quantized_indexing_op.cc |  2 +-
 src/operator/quantization/quantized_pooling.cc     |  2 +-
 src/operator/random/sample_multinomial_op.cc       |  2 +-
 src/operator/regression_output-inl.h               |  2 +-
 src/operator/rnn.cc                                |  2 +-
 src/operator/softmax_output.cc                     |  4 +-
 src/operator/subgraph/build_subgraph.cc            |  8 +--
 src/operator/subgraph/common.h                     |  2 +-
 src/operator/subgraph/default_subgraph_property.cc |  4 +-
 .../subgraph/default_subgraph_property_v2.cc       |  4 +-
 src/operator/subgraph/mkldnn/mkldnn_conv.cc        |  6 +-
 .../subgraph/mkldnn/mkldnn_conv_property.h         | 12 ++--
 .../mkldnn_elemwisemul_post_quantize_property.h    | 12 ++--
 src/operator/subgraph/mkldnn/mkldnn_fc.cc          |  6 +-
 .../mkldnn/mkldnn_fc_post_quantize_property.h      | 12 ++--
 src/operator/subgraph/mkldnn/mkldnn_fc_property.h  |  8 +--
 .../mkldnn/mkldnn_post_quantize_property.h         | 10 +--
 .../partitioner/custom_subgraph_property.h         |  4 +-
 src/operator/subgraph/subgraph_property.h          | 10 +--
 src/operator/subgraph/tensorrt/tensorrt-inl.h      |  8 +--
 src/operator/tensor/broadcast_reduce_op.h          |  4 +-
 src/operator/tensor/broadcast_reduce_op_index.cc   |  2 +-
 src/operator/tensor/broadcast_reduce_op_value.cc   |  2 +-
 src/operator/tensor/control_flow_op.cc             |  2 +-
 src/operator/tensor/dot.cc                         |  6 +-
 src/operator/tensor/elemwise_sum.cc                |  4 +-
 src/operator/tensor/elemwise_unary_op_basic.cc     | 12 ++--
 src/operator/tensor/elemwise_unary_op_logexp.cc    | 10 +--
 src/operator/tensor/elemwise_unary_op_pow.cc       |  8 +--
 src/operator/tensor/elemwise_unary_op_trig.cc      | 24 +++----
 src/operator/tensor/indexing_op.cc                 | 12 ++--
 src/operator/tensor/la_op.h                        |  2 +-
 src/operator/tensor/matrix_op.cc                   |  2 +-
 src/operator/tensor/ordering_op.cc                 |  4 +-
 src/operator/tensor/sparse_retain.cc               |  2 +-
 tests/cpp/include/test_core_op.h                   | 14 ++--
 96 files changed, 348 insertions(+), 348 deletions(-)

diff --git a/3rdparty/tvm b/3rdparty/tvm
index f8f4ceb..9bd2c7b 160000
--- a/3rdparty/tvm
+++ b/3rdparty/tvm
@@ -1 +1 @@
-Subproject commit f8f4ceb253a6c39ac2a3e282ef89e9d558aa1c73
+Subproject commit 9bd2c7b44208ed992061f8c2688e1137357f1db1
diff --git a/include/mxnet/imperative.h b/include/mxnet/imperative.h
index dbd81e5..6a367b3 100644
--- a/include/mxnet/imperative.h
+++ b/include/mxnet/imperative.h
@@ -62,18 +62,18 @@ class Imperative {
     AGInfo() :
       grad_req(kNullOp), fresh_out_grad(false) {}
 
-    static void Clear(const nnvm::NodePtr& node) {
+    static void Clear(const nnvm::ObjectPtr& node) {
       if (node == nullptr || node->info.empty()) return;
       AGInfo& info = Get(node);
       if (info.grad_req != kNullOp) return;
       node->info.clear();
     }
 
-    static AGInfo& Get(const nnvm::NodePtr& node) {
+    static AGInfo& Get(const nnvm::ObjectPtr& node) {
       return dmlc::get<AGInfo>(node->info);
     }
 
-    static AGInfo& Create(const nnvm::NodePtr& node) {
+    static AGInfo& Create(const nnvm::ObjectPtr& node) {
       node->info.construct<AGInfo>();
       return Get(node);
     }
@@ -82,7 +82,7 @@ class Imperative {
       return arr.entry_.node == nullptr || arr.entry_.node->info.empty();
     }
 
-    static bool IsVariable(const nnvm::NodePtr& node) {
+    static bool IsVariable(const nnvm::ObjectPtr& node) {
       AGInfo& info = Get(node);
       return info.grad_req != kNullOp && info.outputs.size() == 1
              && info.out_grads.size() == 1;
@@ -196,7 +196,7 @@ class Imperative {
   }
   /*! \brief find the input/output ndarrays that are needed for backward */
   void GetBackwardDependency(
-      const nnvm::NodePtr& node,
+      const nnvm::ObjectPtr& node,
       uint32_t num_inputs, uint32_t num_outputs,
       std::vector<bool> *p_save_inputs,
       std::vector<bool> *p_save_outputs);
diff --git a/include/mxnet/op_attr_types.h b/include/mxnet/op_attr_types.h
index 237c595..a0ac301 100644
--- a/include/mxnet/op_attr_types.h
+++ b/include/mxnet/op_attr_types.h
@@ -329,7 +329,7 @@ using FQuantizable = std::function<QuantizeType (const NodeAttrs& attrs)>;
  * \brief Register a quantized node creation function based on the attrs of the node
  * \note Register under "FQuantizedOp" for non-quantized operators
  */
-using FQuantizedOp = std::function<nnvm::NodePtr (const NodeAttrs& attrs)>;
+using FQuantizedOp = std::function<nnvm::ObjectPtr (const NodeAttrs& attrs)>;
 
 /*!
  * \brief Register a function to determine if the output of a quantized operator
diff --git a/src/c_api/c_api.cc b/src/c_api/c_api.cc
index 54c544a..f3e1eb2 100644
--- a/src/c_api/c_api.cc
+++ b/src/c_api/c_api.cc
@@ -519,7 +519,7 @@ int MXLoadLib(const char *path) {
     };
 
     // FGradient register lambda
-    auto grad_reg = [=](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    auto grad_reg = [=](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
         // copy gradients first
         std::vector<nnvm::NodeEntry> heads(ograds.begin(), ograds.end());
         // copy inputs second
diff --git a/src/c_api/c_api_function.cc b/src/c_api/c_api_function.cc
index 3cd7037..f1dd8d9 100644
--- a/src/c_api/c_api_function.cc
+++ b/src/c_api/c_api_function.cc
@@ -42,11 +42,11 @@ struct CustomFunctionParam {
 };
 
 std::vector<nnvm::NodeEntry> Gradient(
-    const nnvm::NodePtr& n,
+    const nnvm::ObjectPtr& n,
     const std::vector<nnvm::NodeEntry>& out_grads) {
   const CustomFunctionParam& params = nnvm::get<CustomFunctionParam>(n->attrs.parsed);
 
-  nnvm::NodePtr g = nnvm::Node::Create();
+  nnvm::ObjectPtr g = nnvm::Node::Create();
   g->attrs.op = nnvm::Op::Get("_backward_CustomFunction");
   g->attrs.name = n->attrs.name + "_backward";
   g->attrs.parsed = params;
diff --git a/src/c_api/c_api_symbolic.cc b/src/c_api/c_api_symbolic.cc
index cfc0d5f..0776bc7 100644
--- a/src/c_api/c_api_symbolic.cc
+++ b/src/c_api/c_api_symbolic.cc
@@ -386,7 +386,7 @@ int MXSymbolCutSubgraph(SymbolHandle sym, SymbolHandle **input_symbols,
     const std::string &subg_name = it->second;
     std::vector<nnvm::NodeEntry *> input_entries;
     DFSVisit(s->outputs, [&subg_attr, &subg_name, &input_entries]
-             (nnvm::NodePtr n) {
+             (nnvm::ObjectPtr n) {
       // If the node itself isn't in the subgraph, we ignore it.
       auto it = n->attrs.dict.find(subg_attr);
       if (it == n->attrs.dict.end() || it->second != subg_name)
@@ -431,7 +431,7 @@ int MXSymbolCutSubgraph(SymbolHandle sym, SymbolHandle **input_symbols,
 void ConvertShapeAttrToNumPyCompatible(nnvm::Graph* g) {
   if (Imperative::Get()->is_np_shape()
     && (!g->HasAttr("is_np_shape") || !g->GetAttr<int>("is_np_shape"))) {
-    DFSVisit(g->outputs, [](nnvm::NodePtr n) {
+    DFSVisit(g->outputs, [](nnvm::ObjectPtr n) {
       if (n->is_variable()) {
         auto it = n->attrs.dict.find("__shape__");
         if (it != n->attrs.dict.end()) {
@@ -1094,13 +1094,13 @@ static void _SetInputDTypes(
 // if model_params is provided the function will dtype of only model params.
 // if model_params is empty, the function will dtype of all nodes which had
 // a prior dtype set.
-// args is a const_reference vector of NodePtrs. NodePtrs are immutable but
+// args is a const_reference vector of ObjectPtrs. ObjectPtrs are immutable but
 // the Nodes they are pointing will be mutated in this function
 static void _UpdateSymDTypeAttrs(
     const std::unordered_map<std::string, int>& node_name_dtype_map,
     const std::unordered_map<std::string, int>& node_without_dtype_map,
     const std::unordered_set<std::string>& model_params,
-    const std::vector<nnvm::NodePtr>& args) {
+    const std::vector<nnvm::ObjectPtr>& args) {
   const std::string dtype_keyword = "__dtype__";
 
   // Update args to have the right dtype attrs
@@ -1250,7 +1250,7 @@ int MXReducePrecisionSymbol(SymbolHandle sym_handle,
   result_sym->outputs = g.outputs;
   *ret_sym_handle = result_sym;
   nnvm::Symbol *ret_sym = static_cast<nnvm::Symbol *>(*ret_sym_handle);
-  const std::vector<nnvm::NodePtr>& args = ret_sym->ListInputs(nnvm::Symbol::kAll);
+  const std::vector<nnvm::ObjectPtr>& args = ret_sym->ListInputs(nnvm::Symbol::kAll);
 
   // update symbol dtype attrs using the node name -> dtype mapping, if dtype is already set
   // in the symbol, else set dtype for the model_params
diff --git a/src/common/exec_utils.cc b/src/common/exec_utils.cc
index 6782abd..601d1c0 100644
--- a/src/common/exec_utils.cc
+++ b/src/common/exec_utils.cc
@@ -32,12 +32,12 @@ namespace common {
 
 void CopyGraph(nnvm::Graph *dst, const nnvm::Graph &src, bool copy_variables) {
   using nnvm::Node;
-  using nnvm::NodePtr;
+  using nnvm::ObjectPtr;
   using nnvm::NodeEntry;
-  std::unordered_map<Node*, NodePtr> old_new;
+  std::unordered_map<Node*, ObjectPtr> old_new;
   // use DFSVisit to copy all the nodes
-  DFSVisit(src.outputs, [&old_new, copy_variables](const NodePtr& node) {
-      NodePtr np;
+  DFSVisit(src.outputs, [&old_new, copy_variables](const ObjectPtr& node) {
+      ObjectPtr np;
       if (copy_variables || !node->is_variable()) {
         np = Node::Create();
         np->attrs = node->attrs;
@@ -52,7 +52,7 @@ void CopyGraph(nnvm::Graph *dst, const nnvm::Graph &src, bool copy_variables) {
       Node *ptr = e.node.get();
       kv.second->inputs.emplace_back(NodeEntry{old_new[ptr], e.index, e.version});
     }
-    for (const NodePtr& p : kv.first->control_deps) {
+    for (const ObjectPtr& p : kv.first->control_deps) {
       kv.second->control_deps.emplace_back(old_new[p.get()]);
     }
   }
diff --git a/src/executor/eliminate_common_expr_pass.cc b/src/executor/eliminate_common_expr_pass.cc
index 5c77ec2..e6cc35b 100644
--- a/src/executor/eliminate_common_expr_pass.cc
+++ b/src/executor/eliminate_common_expr_pass.cc
@@ -38,7 +38,7 @@ namespace exec {
 namespace {
 
 using nnvm::Node;
-using nnvm::NodePtr;
+using nnvm::ObjectPtr;
 using nnvm::Graph;
 using nnvm::IndexedGraph;
 
@@ -94,12 +94,12 @@ bool NodeEqual(const Node* n, const Node* m) {
 }
 
 // Graph traversal to create a list of pairs of identical-function nodes that can be combined.
-std::vector<std::pair<NodePtr, NodePtr> > GetCommonNodes(const Graph& g) {
-  std::vector<std::pair<NodePtr, NodePtr> > ret;
+std::vector<std::pair<ObjectPtr, ObjectPtr> > GetCommonNodes(const Graph& g) {
+  std::vector<std::pair<ObjectPtr, ObjectPtr> > ret;
   // A map between a vector of inputs and those nodes that have those inputs
-  std::map<std::vector<NodeInput>, std::vector<const NodePtr*> > grouped_nodes;
+  std::map<std::vector<NodeInput>, std::vector<const ObjectPtr*> > grouped_nodes;
   // Traverse the graph and group the nodes by their vector of inputs
-  nnvm::DFSVisit(g.outputs, [&grouped_nodes](const NodePtr& n) {
+  nnvm::DFSVisit(g.outputs, [&grouped_nodes](const ObjectPtr& n) {
     if (n->inputs.size() != 0) {
       grouped_nodes[ConvertInputs(n->inputs)].push_back(&n);
     }
@@ -116,8 +116,8 @@ std::vector<std::pair<NodePtr, NodePtr> > GetCommonNodes(const Graph& g) {
           // be eliminated in favor of the other Node (the 'src').
           if (NodeEqual(node_group[i]->get(), node_group[j]->get())) {
             visited.insert(j);
-            NodePtr src = *node_group[i];
-            NodePtr replaced = *node_group[j];
+            ObjectPtr src = *node_group[i];
+            ObjectPtr replaced = *node_group[j];
             ret.emplace_back(src, replaced);
           }
         }
@@ -131,14 +131,14 @@ std::vector<std::pair<NodePtr, NodePtr> > GetCommonNodes(const Graph& g) {
  * \brief Do a single pass of Node elimination given pairs of identical Nodes.
  */
 void EliminateCommonNodes(Graph* g,
-                          const std::vector<std::pair<NodePtr, NodePtr> >& common_nodes) {
+                          const std::vector<std::pair<ObjectPtr, ObjectPtr> >& common_nodes) {
   for (const auto &p : common_nodes) {
-    std::vector <NodePtr> nodes_to_change;
-    const NodePtr &src = p.first;
-    const NodePtr &replaced = p.second;
+    std::vector <ObjectPtr> nodes_to_change;
+    const ObjectPtr &src = p.first;
+    const ObjectPtr &replaced = p.second;
     // Create a `nodes_to_change` list containing the Nodes that refer to the `replaced` Node
     // that is targeted for elimination.
-    DFSVisit(g->outputs, [replaced, &nodes_to_change](const NodePtr &n) {
+    DFSVisit(g->outputs, [replaced, &nodes_to_change](const ObjectPtr &n) {
       for (const auto &dep : n->control_deps) {
         if (dep == replaced) {
           nodes_to_change.push_back(n);
@@ -189,7 +189,7 @@ void EliminateCommonNodes(Graph* g,
     if (kv == unique_outputs.end()) {
       unique_outputs.emplace(g->outputs[i], 0);
     } else {
-      NodePtr copy_node = Node::Create();
+      ObjectPtr copy_node = Node::Create();
       std::ostringstream os;
       os << kv->first.node->attrs.name << "_" << kv->second << "_copy";
       kv->second++;
@@ -207,7 +207,7 @@ void EliminateCommonNodes(Graph* g,
  * \brief Simplify a graph by iteratively eliminating Nodes with identical inputs and function.
  */
 nnvm::Graph EliminateCommonExpr(nnvm::Graph&& g) {
-  using nnvm::NodePtr;
+  using nnvm::ObjectPtr;
   bool keep_running = true;
   while (keep_running) {
     const auto& common_nodes = GetCommonNodes(g);
diff --git a/src/executor/exec_pass.h b/src/executor/exec_pass.h
index 55d431c..e3d2fa4 100644
--- a/src/executor/exec_pass.h
+++ b/src/executor/exec_pass.h
@@ -41,7 +41,7 @@ namespace mxnet {
 namespace exec {
 
 template <typename Attr>
-using FAccessSubgraphAttr = std::function<std::tuple<const nnvm::NodePtr,
+using FAccessSubgraphAttr = std::function<std::tuple<const nnvm::ObjectPtr,
                                           std::vector<Attr>,
                                           std::vector<Attr>>
                               (const NodeAttrs& attrs)>;
@@ -52,7 +52,7 @@ using FAccessSubgraphStorageType = FAccessSubgraphAttr<int>;
 
 template <typename Attr>
 using FProvideSubgraphAttr = std::function<void (const NodeAttrs& attrs,
-                                                 const std::vector<nnvm::NodePtr> &nodes,
+                                                 const std::vector<nnvm::ObjectPtr> &nodes,
                                                  const std::vector<std::vector<Attr>> &in_attrs,
                                                  const std::vector<std::vector<Attr>> &out_attrs)>;
 using FProvideSubgraphShape = FProvideSubgraphAttr<mxnet::TShape>;
diff --git a/src/executor/graph_executor.cc b/src/executor/graph_executor.cc
index a57d6c2..49ae3b5 100644
--- a/src/executor/graph_executor.cc
+++ b/src/executor/graph_executor.cc
@@ -216,7 +216,7 @@ const std::unordered_map<std::string, NDArray>& GraphExecutor::aux_state_map() c
 
 static nnvm::NodeEntry AttrHint(nnvm::NodeEntry src, nnvm::NodeEntry like) {
   static const Op* id_like = Op::Get("_identity_with_attr_like_rhs");
-  nnvm::NodePtr n = nnvm::Node::Create();
+  nnvm::ObjectPtr n = nnvm::Node::Create();
   n->attrs.op = id_like;
   n->attrs.name = src.node->attrs.name + "_id";
   n->inputs = {src, like};
@@ -233,7 +233,7 @@ nnvm::NodeEntry AggregateGradient(std::vector<nnvm::NodeEntry>&& v) {
   static const Op* zeros_like_op = Op::Get("zeros_like");
 
   if (v.empty()) {
-    nnvm::NodePtr ng = nnvm::Node::Create();
+    nnvm::ObjectPtr ng = nnvm::Node::Create();
     ng->attrs.op = Op::Get("_zeros_without_dtype");
     ng->attrs.name = "zeros_without_dtype";
     ng->attrs.op->attr_parser(&(ng->attrs));
@@ -253,7 +253,7 @@ nnvm::NodeEntry AggregateGradient(std::vector<nnvm::NodeEntry>&& v) {
     return std::move(v[0]);
   } else {
     if (v.size() < inplace_sum_cap) {
-      nnvm::NodePtr sum_node = nnvm::Node::Create();
+      nnvm::ObjectPtr sum_node = nnvm::Node::Create();
       sum_node->attrs.op = ewise_sum_op;
       sum_node->attrs.name = "sum_grad";
       sum_node->attrs.dict["num_args"] = std::to_string(v.size());
@@ -285,7 +285,7 @@ nnvm::NodeEntry AggregateGradient(std::vector<nnvm::NodeEntry>&& v) {
 
         std::ostringstream os;
         os << "sum_grad_" << i;
-        nnvm::NodePtr x = nnvm::Node::Create();
+        nnvm::ObjectPtr x = nnvm::Node::Create();
         x->attrs.op = ewise_plus_op;
         x->attrs.name = os.str();
         x->inputs = {ret, v[i]};
@@ -293,7 +293,7 @@ nnvm::NodeEntry AggregateGradient(std::vector<nnvm::NodeEntry>&& v) {
       }
       // identity node is used to avoid exposure of dummy plus node
       // when its output get assigned to another space.
-      nnvm::NodePtr id_node = nnvm::Node::Create();
+      nnvm::ObjectPtr id_node = nnvm::Node::Create();
       id_node->attrs.op = identity_op;
       id_node->attrs.name = "sum_grad_final";
       id_node->inputs = {ret};
@@ -324,7 +324,7 @@ inline ValueType get_node_attr(
  */
 nnvm::Graph GraphExecutor::InitFullGraph(nnvm::Symbol symbol,
                                          const std::vector<OpReqType>& grad_req_types) {
-  using nnvm::NodePtr;
+  using nnvm::ObjectPtr;
   using nnvm::NodeEntry;
   // initial information
   num_forward_outputs_ = symbol.outputs.size();
@@ -346,7 +346,7 @@ nnvm::Graph GraphExecutor::InitFullGraph(nnvm::Symbol symbol,
     head_grad_entry_.emplace_back(AttrHint(ngrad, g.outputs[i]));
     head_grad_map_[ngrad.node.get()] = i;
   }
-  std::vector<NodePtr> args = symbol.ListInputs(nnvm::Symbol::kReadOnlyArgs);
+  std::vector<ObjectPtr> args = symbol.ListInputs(nnvm::Symbol::kReadOnlyArgs);
   std::vector<NodeEntry> xs;
   for (size_t i = 0; i < grad_req_types.size(); ++i) {
     if (grad_req_types[i] != kNullOp) {
@@ -1421,7 +1421,7 @@ void GraphExecutor::ExecuteMonOutputCallback(size_t nid) {
   const auto& node = idx[nid].source;
   for (size_t i = 0; i < opnode.exec->out_array.size(); ++i) {
     NDArray *cpy = new NDArray(opnode.exec->out_array[i]);
-    nnvm::NodePtr node_ptr = std::make_shared<nnvm::Node>(*node);
+    nnvm::ObjectPtr node_ptr = std::make_shared<nnvm::Node>(*node);
     std::string name = GetOutputName({node_ptr, static_cast<uint32_t >(i), 0});
     this->monitor_callback_(name.c_str(), reinterpret_cast<void*>(cpy));
   }
diff --git a/src/executor/infer_graph_attr_pass.cc b/src/executor/infer_graph_attr_pass.cc
index 4b6ee2e..6819fbd 100644
--- a/src/executor/infer_graph_attr_pass.cc
+++ b/src/executor/infer_graph_attr_pass.cc
@@ -74,7 +74,7 @@ inline void GetAttrFromForwardNode(const uint32_t nid,
   // gradient function, used to get node correspondence.
   static auto& fgrad =
       Op::GetAttr<nnvm::FGradient>("FGradient");
-  nnvm::NodePtr fwd_ptr = inode.source->control_deps[0];
+  nnvm::ObjectPtr fwd_ptr = inode.source->control_deps[0];
   const nnvm::IndexedGraph::Node& fnode = idx[inode.control_deps[0]];
   // use gradient function to find out the correspondence.
   std::vector<nnvm::NodeEntry> ograd(fwd_ptr->num_outputs());
@@ -140,7 +140,7 @@ void GetAttrFromFusedNode(uint32_t nid,
   // gradient function, used to get node correspondence.
   static auto& fgrad =
       Op::GetAttr<nnvm::FGradient>("FGradient");
-  nnvm::NodePtr fused_fwd_ptr = inode.source->control_deps[0];
+  nnvm::ObjectPtr fused_fwd_ptr = inode.source->control_deps[0];
   static auto& finfer_fused_shape =
     Op::GetAttr<FAccessSubgraphType>(infer_fusion_name);
   auto finfer = finfer_fused_shape.get(fused_fwd_ptr->op(), nullptr);
@@ -394,7 +394,7 @@ nnvm::Graph InferAttr(nnvm::Graph &&ret,
         << "Backward inference for node attributes is not available";
       CHECK_GE(inode.source->control_deps.size(), 1U)
         << "BackwardOp need to have control_deps to its forward op";
-      nnvm::NodePtr fwd_ptr = inode.source->control_deps[0];
+      nnvm::ObjectPtr fwd_ptr = inode.source->control_deps[0];
       CHECK(fwd_ptr->op() != nullptr) << "Forward op cannot be a variable";
 
       static auto& is_fusion_helper = Op::GetAttr<exec::TIsFusionHelper>("TIsFusionHelper");
@@ -690,7 +690,7 @@ nnvm::Graph InferShapeAttr(nnvm::Graph &&ret,
         << "Backward inference for node attributes is not available";
       CHECK_GE(inode.source->control_deps.size(), 1U)
         << "BackwardOp need to have control_deps to its forward op";
-      nnvm::NodePtr fwd_ptr = inode.source->control_deps[0];
+      nnvm::ObjectPtr fwd_ptr = inode.source->control_deps[0];
       CHECK(fwd_ptr->op() != nullptr) << "Forward op cannot be a variable";
 
       static auto& is_fusion_helper = Op::GetAttr<exec::TIsFusionHelper>("TIsFusionHelper");
diff --git a/src/executor/pointwise_fusion_pass.cc b/src/executor/pointwise_fusion_pass.cc
index 6a0d5f4..5db9706 100644
--- a/src/executor/pointwise_fusion_pass.cc
+++ b/src/executor/pointwise_fusion_pass.cc
@@ -94,7 +94,7 @@ namespace {
     return false;
   }
 
-  nnvm::NodePtr CreateSubgraphNode(const Graph& subgraph, size_t inputs_size) {
+  nnvm::ObjectPtr CreateSubgraphNode(const Graph& subgraph, size_t inputs_size) {
     nnvm::Symbol subgraph_sym;
     auto node = nnvm::Node::Create();
     subgraph_sym.outputs = subgraph.outputs;
@@ -133,7 +133,7 @@ Graph ReplaceSubgraphsPointwise(Graph&& g, const std::vector<NodeRawPtrSet>& sub
     // replug inputs of node out of subgraph to be output of the subgraph node
     // if it was a node in the subgraph
     DFSVisit(g.outputs,
-        [&subgraph_node, &subgraph_set, &sub_outputs_in_main](const nnvm::NodePtr node) {
+        [&subgraph_node, &subgraph_set, &sub_outputs_in_main](const nnvm::ObjectPtr node) {
       if (!subgraph_set.count(node.get())) {
         for (auto &e : node->inputs) {
           auto it = sub_outputs_in_main.find(e);
@@ -155,11 +155,11 @@ Graph ReplaceSubgraphsPointwise(Graph&& g, const std::vector<NodeRawPtrSet>& sub
     }
     // move control dependencies between nodes of the subgraph and out of the subgraph
     // to a dependencies between the subgraph node and the nodes out of the subgraph
-    DFSVisit(subgraph.outputs, [&subgraph_node, &subgraph_set](const nnvm::NodePtr& node) {
+    DFSVisit(subgraph.outputs, [&subgraph_node, &subgraph_set](const nnvm::ObjectPtr& node) {
       if (subgraph_set.count(node.get())) {
         auto it = node->control_deps.begin();
         static auto& is_fusion = Op::GetAttr<exec::TIsFusionHelper>("TIsFusionHelper");
-        std::vector<nnvm::NodePtr> new_control_deps;
+        std::vector<nnvm::ObjectPtr> new_control_deps;
         // Use the first control dependency to get the inferattr helper
         if (it != node->control_deps.end()) {
           if (subgraph_set.count(it->get())) {
@@ -190,7 +190,7 @@ Graph ReplaceSubgraphsPointwise(Graph&& g, const std::vector<NodeRawPtrSet>& sub
 
     std::ostringstream name_oss;
     // the name of the new node will be the concatenation of all the node names in the subgraph
-    DFSVisit(subgraph.outputs, [&name_oss](const nnvm::NodePtr n) {
+    DFSVisit(subgraph.outputs, [&name_oss](const nnvm::ObjectPtr n) {
       if (n->op() != nullptr) {
         name_oss << n->op()->name << "_";
       }
@@ -200,7 +200,7 @@ Graph ReplaceSubgraphsPointwise(Graph&& g, const std::vector<NodeRawPtrSet>& sub
     subgraph_node->attrs.name = subgraph_name;
 
     const auto& index = subgraph.indexed_graph();
-    DFSVisit(g.outputs, [&subgraph_node, &subgraph_set, &index](const nnvm::NodePtr& node) {
+    DFSVisit(g.outputs, [&subgraph_node, &subgraph_set, &index](const nnvm::ObjectPtr& node) {
       for (auto &e : node->control_deps) {
         if (subgraph_set.count(e.get())) {
           uint32_t node_id = index.node_id(e.get());
@@ -244,7 +244,7 @@ void AddInputsOnlyCompatible(const Graph &g,
     }
   }
   std::vector<std::vector<nnvm::Node*> > to_add(subsets->size());
-  DFSVisit(g.outputs, [&is_compatible, &node2setidx, &to_add](const nnvm::NodePtr& n) {
+  DFSVisit(g.outputs, [&is_compatible, &node2setidx, &to_add](const nnvm::ObjectPtr& n) {
     const auto& it = node2setidx.find(n.get());
     if (it != node2setidx.end()) {
       for (auto& e : n->inputs) {
@@ -273,7 +273,7 @@ void AddInputsOnlyCompatible(const Graph &g,
                      [&node](const nnvm::NodeEntry& n) {
                        return n.node.get() != node;
                      });
-        DFSVisit(_heads, [&make_cycle, &node](const nnvm::NodePtr& n) {
+        DFSVisit(_heads, [&make_cycle, &node](const nnvm::ObjectPtr& n) {
           if (n.get() == node)
             make_cycle = true;
         });
@@ -308,7 +308,7 @@ Graph FusePointwiseBackward(Graph &&g) {
   fg.outputs.insert(fg.outputs.begin(), g.outputs.begin(),
                     g.outputs.begin() + num_forward_outputs);
   std::unordered_set<nnvm::Node*> exclusion_set;
-  DFSVisit(fg.outputs, [&exclusion_set](const nnvm::NodePtr& n) {
+  DFSVisit(fg.outputs, [&exclusion_set](const nnvm::ObjectPtr& n) {
     exclusion_set.insert(n.get());
   });
   auto subsets = GetCompatibleSubsets(g, [&exclusion_set](nnvm::Node* n) {
diff --git a/src/executor/simple_partition_pass.h b/src/executor/simple_partition_pass.h
index ea1dcf3..1ca0086 100644
--- a/src/executor/simple_partition_pass.h
+++ b/src/executor/simple_partition_pass.h
@@ -64,7 +64,7 @@ class BidirectionalGraph {
     // Create all the nodes in a new graph from
     // nodes in the NNVM graph and store them
     // in nodes array
-    DFSVisit(g.outputs, [this](const nnvm::NodePtr& n) {
+    DFSVisit(g.outputs, [this](const nnvm::ObjectPtr& n) {
       Node new_node;
       new_node.nnvmptr = n.get();
       nnvm2nid[n.get()] = static_cast<uint32_t>(nodes.size());
@@ -298,7 +298,7 @@ nnvm::NodeEntryMap<uint32_t> GetSubgraphOutputs(Graph g, NodeRawPtrSet subgraph_
       outputs.insert({e, count++});
     }
   }
-  DFSVisit(g.outputs, [&subgraph_set, &outputs, &count](const nnvm::NodePtr &node){
+  DFSVisit(g.outputs, [&subgraph_set, &outputs, &count](const nnvm::ObjectPtr &node){
     if (!subgraph_set.count(node.get())) {
       for (auto& e : node->inputs) {
         if (subgraph_set.count(e.node.get()) && !outputs.count(e)) {
@@ -317,7 +317,7 @@ nnvm::NodeEntryMap<uint32_t> GetSubgraphOutputs(Graph g, NodeRawPtrSet subgraph_
 std::vector<nnvm::NodeEntry> GetSubgraphInputs(Graph g, NodeRawPtrSet subgraph_set) {
   std::vector<nnvm::NodeEntry> inputs;
   nnvm::NodeEntryMap<nnvm::NodeEntry> entry_map;
-  DFSVisit(g.outputs, [&subgraph_set, &inputs, &entry_map](const nnvm::NodePtr &node){
+  DFSVisit(g.outputs, [&subgraph_set, &inputs, &entry_map](const nnvm::ObjectPtr &node){
     if (subgraph_set.count(node.get())) {
       for (auto &e : node->inputs) {
         if (!subgraph_set.count(e.node.get())) {
@@ -361,7 +361,7 @@ std::unordered_map<uint32_t, uint32_t> GetGraphInputsMap(const Graph& g) {
  * \brief Helper function to display what nodes are in a specific subset.
  */
 void dispNodesSet(Graph g, NodeRawPtrSet s) {
-  DFSVisit(g.outputs, [&s](const nnvm::NodePtr n){
+  DFSVisit(g.outputs, [&s](const nnvm::ObjectPtr n){
     if (s.count(n.get())) {
       std::cout << "  Y " << n->attrs.name << std::endl;
     } else {
@@ -394,7 +394,7 @@ Graph ReplaceSubgraphs(Graph&& g, const std::vector<NodeRawPtrSet>& subgraph_set
     // replug inputs of node out of subgraph to be output of the subgraph node
     // if it was a node in the subgraph
     DFSVisit(g.outputs,
-        [&subgraph_node, &subgraph_set, &sub_outputs_in_main](const nnvm::NodePtr node) {
+        [&subgraph_node, &subgraph_set, &sub_outputs_in_main](const nnvm::ObjectPtr node) {
       if (!subgraph_set.count(node.get())) {
         for (auto &e : node->inputs) {
           auto it = sub_outputs_in_main.find(e);
@@ -416,13 +416,13 @@ Graph ReplaceSubgraphs(Graph&& g, const std::vector<NodeRawPtrSet>& subgraph_set
     }
     // move control dependencies between nodes of the subgraph and out of the subgraph
     // to a dependencies between the subgraph node and the nodes out of the subgraph
-    DFSVisit(g.outputs, [&subgraph_node, &subgraph_set](const nnvm::NodePtr& node) {
+    DFSVisit(g.outputs, [&subgraph_node, &subgraph_set](const nnvm::ObjectPtr& node) {
       for (auto &e : node->control_deps) {
         if (subgraph_set.count(e.get()))
           e = subgraph_node;
       }
     });
-    DFSVisit(subgraph.outputs, [&subgraph_node, &subgraph_set](const nnvm::NodePtr& node) {
+    DFSVisit(subgraph.outputs, [&subgraph_node, &subgraph_set](const nnvm::ObjectPtr& node) {
       auto it = node->control_deps.begin();
       while (it != node->control_deps.end()) {
         if (subgraph_set.count(it->get())) {
diff --git a/src/imperative/cached_op.cc b/src/imperative/cached_op.cc
index 1edd989..054426e 100644
--- a/src/imperative/cached_op.cc
+++ b/src/imperative/cached_op.cc
@@ -77,7 +77,7 @@ void CreateFullGraph(const nnvm::Symbol& sym,
     NodeEntryMap<size_t> dedup_out;
     for (const NodeEntry& nodeEntry : sym.outputs) {
       if (dedup_out.find(nodeEntry) != dedup_out.end()) {
-        NodePtr copy_node = Node::Create();
+        ObjectPtr copy_node = Node::Create();
         copy_node->attrs.op = _copy_op;
         copy_node->attrs.name =
             nodeEntry.node->attrs.name + "_copy" + std::to_string(dedup_out[nodeEntry]++);
@@ -101,7 +101,7 @@ void CreateFullGraph(const nnvm::Symbol& sym,
   {
     ograd_entries->reserve(fwd_graph->outputs.size());
     for (size_t i = 0; i < fwd_graph->outputs.size(); ++i) {
-      nnvm::NodePtr np = Node::Create();
+      nnvm::ObjectPtr np = Node::Create();
       np->attrs.name = "_head_grad_" + std::to_string(i);
       ograd_entries->emplace_back(np);
     }
@@ -339,7 +339,7 @@ CachedOp::~CachedOp() {
 }
 
 std::vector<nnvm::NodeEntry> CachedOp::Gradient(
-    const nnvm::NodePtr& node,
+    const nnvm::ObjectPtr& node,
     const std::vector<nnvm::NodeEntry>& ograds) const {
   using namespace nnvm;
   static const auto _backward_CachedOp = Op::Get("_backward_CachedOp");
@@ -1523,7 +1523,7 @@ NNVM_REGISTER_OP(_CachedOp)
   })
 .set_attr_parser(CachedOpParamParser)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     const CachedOpPtr& op = nnvm::get<CachedOpPtr>(n->attrs.parsed);
     return op->Gradient(n, ograds);
   })
diff --git a/src/imperative/cached_op.h b/src/imperative/cached_op.h
index 0134715..8f5bda3 100644
--- a/src/imperative/cached_op.h
+++ b/src/imperative/cached_op.h
@@ -105,7 +105,7 @@ class CachedOp {
     return fwd_graph_.indexed_graph().mutable_input_nodes();
   }
   std::vector<nnvm::NodeEntry> Gradient(
-      const nnvm::NodePtr& node,
+      const nnvm::ObjectPtr& node,
       const std::vector<nnvm::NodeEntry>& ograds) const;
   OpStatePtr Forward(
       const std::shared_ptr<CachedOp>& op_ptr,
diff --git a/src/imperative/imperative.cc b/src/imperative/imperative.cc
index 6f70d6a..97a09fd 100644
--- a/src/imperative/imperative.cc
+++ b/src/imperative/imperative.cc
@@ -145,7 +145,7 @@ void Imperative::MarkVariables(
 
 
 void Imperative::GetBackwardDependency(
-    const nnvm::NodePtr& node,
+    const nnvm::ObjectPtr& node,
     uint32_t num_inputs, uint32_t num_outputs,
     std::vector<bool> *p_save_inputs,
     std::vector<bool> *p_save_outputs) {
@@ -177,7 +177,7 @@ void Imperative::GetBackwardDependency(
         save_outputs[i.index] = true;
       }
     }
-    DFSVisit(igrad_entries, [&](const nnvm::NodePtr& gnode) {
+    DFSVisit(igrad_entries, [&](const nnvm::ObjectPtr& gnode) {
         if (!gnode || gnode == node) return;
         for (const auto& i : gnode->inputs) {
           if (i.node == nullptr && i.version == 0) {
@@ -216,7 +216,7 @@ void Imperative::RecordOp(
   }
   if (!need_grad) return;
 
-  nnvm::NodePtr node = nnvm::Node::Create();
+  nnvm::ObjectPtr node = nnvm::Node::Create();
   node->attrs = std::move(attrs);
   node->attrs.name = "node_" + std::to_string(node_count_++);
   AGInfo& info = AGInfo::Create(node);
@@ -305,7 +305,7 @@ std::vector<NDArray*> Imperative::Backward(
   std::vector<NodeEntry> ograd_entries;
   ograd_entries.reserve(ograds.size());
   for (size_t i = 0; i < outputs.size(); ++i) {
-    nnvm::NodePtr np = Node::Create();
+    nnvm::ObjectPtr np = Node::Create();
     np->attrs.name = "_head_grad_" + std::to_string(i);
     ograd_entries.emplace_back(NodeEntry{np, 0, 0});
     AGInfo& info = AGInfo::Create(ograd_entries.back().node);
@@ -341,7 +341,7 @@ std::vector<NDArray*> Imperative::Backward(
       x_reqs.push_back(kWriteTo);
     }
   } else {
-    std::vector<NodePtr> args = sym.ListInputs(Symbol::kReadOnlyArgs);
+    std::vector<ObjectPtr> args = sym.ListInputs(Symbol::kReadOnlyArgs);
     xs.reserve(args.size());
     x_grads.reserve(args.size());
     x_reqs.reserve(args.size());
@@ -394,7 +394,7 @@ std::vector<NDArray*> Imperative::Backward(
   }
   if (create_graph) {
     states.resize(num_forward_nodes);
-    nnvm::DFSVisit(sym.outputs, [&](const nnvm::NodePtr& n) {
+    nnvm::DFSVisit(sym.outputs, [&](const nnvm::ObjectPtr& n) {
       AGInfo& info = AGInfo::Get(n);
       states[idx.node_id(n.get())] = info.state;
       for (uint32_t i = 0; i < info.outputs.size(); ++i) {
@@ -532,7 +532,7 @@ std::vector<NDArray*> Imperative::Backward(
 
   // Clear history
   if (!retain_graph) {
-    nnvm::DFSVisit(sym.outputs, [&](const nnvm::NodePtr& n) {
+    nnvm::DFSVisit(sym.outputs, [&](const nnvm::ObjectPtr& n) {
       AGInfo::Clear(n);
       n->inputs.clear();
     });
diff --git a/src/nnvm/amp_infer_unknown.cc b/src/nnvm/amp_infer_unknown.cc
index 1de3104..1815dc4 100644
--- a/src/nnvm/amp_infer_unknown.cc
+++ b/src/nnvm/amp_infer_unknown.cc
@@ -37,7 +37,7 @@
 
 namespace mxnet {
 using nnvm::Graph;
-using nnvm::NodePtr;
+using nnvm::ObjectPtr;
 using nnvm::NodeEntry;
 using dmlc::any;
 using mxnet::op::AMPCastParam;
@@ -50,7 +50,7 @@ static void CheckAndUpdateInferredDtypes(
     mshadow::TypeFlag result_dtype,
     std::unordered_map<std::string, mshadow::TypeFlag> *visited_vars,
     nnvm::DTypeVector *inferred_dtype_result) {
-  const NodePtr &input_node = node_entry.node;
+  const ObjectPtr &input_node = node_entry.node;
   if (!visited_vars->count(input_node->attrs.name)) {
     if ((*inferred_dtype_result)[idx.entry_id(node_entry)] == -1) {
       (*visited_vars)[input_node->attrs.name] = result_dtype;
@@ -84,7 +84,7 @@ Graph AMPInferUnknown(Graph &&src) {
   // and check if inputs to these nodes are variables.
   // If input nodes are variables, set dtype for these inputs
   // and check for conflicts if an input node goes to two cast nodes
-  DFSVisit(src.outputs, [&](const NodePtr &node) {
+  DFSVisit(src.outputs, [&](const ObjectPtr &node) {
     if (!node->is_variable()) {
       std::string op_name = node->op()->name;
 
@@ -94,7 +94,7 @@ Graph AMPInferUnknown(Graph &&src) {
         // and already has dtype set, make sure the dtype inferred again
         // is same, otherwise reset dtype to original dtype
         for (const NodeEntry &node_entry : node->inputs) {
-          const NodePtr &input_node = node_entry.node;
+          const ObjectPtr &input_node = node_entry.node;
           if (input_node->is_variable() &&
               (node->attrs.dict.find("dtype") != node->attrs.dict.end())) {
             const AMPCastParam &param =
@@ -112,7 +112,7 @@ Graph AMPInferUnknown(Graph &&src) {
         // if it is not already set
         mshadow::TypeFlag max_dtype = static_cast<mshadow::TypeFlag>(target_dtype);
         for (const NodeEntry& node_entry : node->inputs) {
-          const NodePtr& input_node = node_entry.node;
+          const ObjectPtr& input_node = node_entry.node;
           if (!input_node->is_variable()) {
             // if one input is not a variable then don't infer the dtype of other
             // input node dtypes
@@ -121,7 +121,7 @@ Graph AMPInferUnknown(Graph &&src) {
         }
         if (max_dtype == target_dtype) {
           for (const NodeEntry &node_entry : node->inputs) {
-            const NodePtr &input_node = node_entry.node;
+            const ObjectPtr &input_node = node_entry.node;
             if (input_node->is_variable()) {
               CheckAndUpdateInferredDtypes(inferred_dtypes, idx, node_entry,
                                            max_dtype, &visited_vars,
diff --git a/src/nnvm/gradient.cc b/src/nnvm/gradient.cc
index 5860271..74cec16 100644
--- a/src/nnvm/gradient.cc
+++ b/src/nnvm/gradient.cc
@@ -39,13 +39,13 @@ NodeEntry DefaultAggregateGradient(std::vector<NodeEntry>&& v) {
   if (v.size() == 1) {
     return std::move(v[0]);
   } else if (v.size() == 0) {
-    NodePtr zero_node = Node::Create();
+    ObjectPtr zero_node = Node::Create();
     zero_node->attrs.op = Op::Get("zeros");
     zero_node->attrs.name = "zero_grad";
     zero_node->attrs.op->attr_parser(&(zero_node->attrs));
     return NodeEntry{zero_node, 0, 0};
   } else {
-    NodePtr sum_node = Node::Create();
+    ObjectPtr sum_node = Node::Create();
     sum_node->attrs.op = Op::Get("elemwise_sum");
     sum_node->inputs = std::move(v);
     sum_node->attrs.name = "grad_sum";
@@ -121,10 +121,10 @@ Graph Gradient(Graph src) {
       nullptr;
 
   // topo sort
-  std::vector<NodePtr> topo_order;
+  std::vector<ObjectPtr> topo_order;
   std::unordered_map<Node*, std::vector<GradEntry> > output_grads;
 
-  DFSVisit(ys, [&](const NodePtr& node) {
+  DFSVisit(ys, [&](const ObjectPtr& node) {
       if (output_grads.count(node.get()) == 0) {
         output_grads[node.get()].resize(node->num_outputs());
       }
@@ -145,11 +145,11 @@ Graph Gradient(Graph src) {
   }
 
   // construct mirror as memory reduction strategy if needed
-  std::unordered_map<Node*, NodePtr> mirror_map;
+  std::unordered_map<Node*, ObjectPtr> mirror_map;
   if (mirror_fun != nullptr) {
-    for (const NodePtr& node_ptr : topo_order) {
+    for (const ObjectPtr& node_ptr : topo_order) {
       if (mirror_fun(*node_ptr)) {
-        NodePtr new_node = Node::Create();
+        ObjectPtr new_node = Node::Create();
         *new_node = *node_ptr;
         new_node->attrs.name += "_mirror";
         for (auto& e : new_node->inputs) {
@@ -171,7 +171,7 @@ Graph Gradient(Graph src) {
 
   std::vector<NodeEntry> out_agg_grads;
   for (auto rit = topo_order.rbegin(); rit != topo_order.rend(); ++rit) {
-    const NodePtr& ptr = *rit;
+    const ObjectPtr& ptr = *rit;
     if (ptr->is_variable()) continue;
     out_agg_grads.clear();
     auto& out_grad_vec = output_grads.at(ptr.get());
@@ -184,7 +184,7 @@ Graph Gradient(Graph src) {
       out_agg_grads.push_back(e.sum);
     }
     if ((*rit)->inputs.size() != 0) {
-      NodePtr fwd_node = (mirror_map.size() == 0 ? ptr : mirror_map.at(ptr.get()));
+      ObjectPtr fwd_node = (mirror_map.size() == 0 ? ptr : mirror_map.at(ptr.get()));
       std::vector<NodeEntry> input_grads;
       // Check for FGradient
       if (grad_fun_map.contains(ptr->op())) {
@@ -246,7 +246,7 @@ Graph Gradient(Graph src) {
       if (kv == unique_grads.end()) {
         unique_grads.emplace(std::move(entry.sum), std::make_pair(1, counter));
       } else {
-        NodePtr copy_node = Node::Create();
+        ObjectPtr copy_node = Node::Create();
         std::ostringstream os;
         os << entry.sum.node->attrs.name << "_" << kv->second.first << "_copy";
         kv->second.first++;
diff --git a/src/nnvm/graph_editor.cc b/src/nnvm/graph_editor.cc
index e3ff3f7..2d2053c 100644
--- a/src/nnvm/graph_editor.cc
+++ b/src/nnvm/graph_editor.cc
@@ -28,7 +28,7 @@
 #include <nnvm/node.h>
 
 namespace nnvm {
-NodePtr CreateVariableNode(const std::string& name);
+ObjectPtr CreateVariableNode(const std::string& name);
 }
 
 namespace mxnet {
@@ -73,7 +73,7 @@ bool CutGraphInputs(const std::vector<nnvm::NodeEntry *> &input_entries,
     }
   };
 
-  std::vector<nnvm::NodePtr> var_nodes;
+  std::vector<nnvm::ObjectPtr> var_nodes;
   orig_entries->clear();
   orig_entries->reserve(input_entries.size());
   for (auto input_entry : input_entries) {
@@ -85,7 +85,7 @@ bool CutGraphInputs(const std::vector<nnvm::NodeEntry *> &input_entries,
                            pred_entry(*input_entry));
     bool exist = (it != orig_entries->end());
     orig_entries->push_back(*input_entry);
-    nnvm::NodePtr n;
+    nnvm::ObjectPtr n;
     // If we haven't seen the entry before, we need to create a new var node
     // for the node entry.
     if (!exist) {
diff --git a/src/nnvm/legacy_json_util.cc b/src/nnvm/legacy_json_util.cc
index a2d14c2..4741d6e 100644
--- a/src/nnvm/legacy_json_util.cc
+++ b/src/nnvm/legacy_json_util.cc
@@ -39,7 +39,7 @@ namespace mxnet {
 using nnvm::Graph;
 using nnvm::Op;
 using nnvm::Node;
-using nnvm::NodePtr;
+using nnvm::ObjectPtr;
 using nnvm::NodeAttrs;
 using nnvm::NodeEntry;
 using nnvm::Symbol;
diff --git a/src/nnvm/legacy_op_util.cc b/src/nnvm/legacy_op_util.cc
index 3e03b6b..851552a 100644
--- a/src/nnvm/legacy_op_util.cc
+++ b/src/nnvm/legacy_op_util.cc
@@ -36,7 +36,7 @@ namespace op {
 
 using nnvm::Op;
 using nnvm::Node;
-using nnvm::NodePtr;
+using nnvm::ObjectPtr;
 using nnvm::NodeAttrs;
 using nnvm::NodeEntry;
 
@@ -309,7 +309,7 @@ OpStatePtr OpPropCreateLayerOp(const NodeAttrs& attrs,
 
 inline std::vector<NodeEntry> OpPropGradient(
     const Op* back_op,
-    const NodePtr& ptr,
+    const ObjectPtr& ptr,
     const std::vector<NodeEntry>& out_grads) {
   auto& prop = nnvm::get<ParsedOpProp>(ptr->attrs.parsed);
   std::vector<NodeEntry> out_data;
@@ -326,7 +326,7 @@ inline std::vector<NodeEntry> OpPropGradient(
   for (size_t i = 0; i < prop.aux_states.size(); ++i) {
     inputs.emplace_back(ptr->inputs[i + prop.arguments.size()]);
   }
-  NodePtr gnode = Node::Create();
+  ObjectPtr gnode = Node::Create();
   gnode->inputs = std::move(inputs);
   gnode->control_deps.emplace_back(ptr);
   gnode->attrs = ptr->attrs;
diff --git a/src/nnvm/low_precision_pass.cc b/src/nnvm/low_precision_pass.cc
index 7cd0178..6faa5c4c 100644
--- a/src/nnvm/low_precision_pass.cc
+++ b/src/nnvm/low_precision_pass.cc
@@ -34,13 +34,13 @@
 namespace mxnet {
 using nnvm::Symbol;
 using nnvm::Node;
-using nnvm::NodePtr;
+using nnvm::ObjectPtr;
 using nnvm::NodeEntry;
 using nnvm::Graph;
 
 // create a node for operator : op_name with name : node_name
-static NodePtr CreateNode(std::string op_name, std::string node_name) {
-  NodePtr node = Node::Create();
+static ObjectPtr CreateNode(std::string op_name, std::string node_name) {
+  ObjectPtr node = Node::Create();
   node->attrs.name = node_name;
   if (op_name == "nullptr") {
     node->attrs.op = nullptr;
@@ -54,9 +54,9 @@ static NodePtr CreateNode(std::string op_name, std::string node_name) {
   return node;
 }
 
-static NodePtr InsertNode(std::string op_name, std::string node_name, NodePtr current,
+static ObjectPtr InsertNode(std::string op_name, std::string node_name, ObjectPtr current,
                           NodeEntry previous) {
-    NodePtr node = CreateNode(op_name, node_name);
+    ObjectPtr node = CreateNode(op_name, node_name);
     node->inputs.emplace_back(previous);
     current->inputs.emplace_back(NodeEntry{node, 0, 0});
     return node;
@@ -64,11 +64,11 @@ static NodePtr InsertNode(std::string op_name, std::string node_name, NodePtr cu
 
 // get suffix for a node entry so that it can be used for amp_cast/amp_multicast node name
 static std::string GetSuffix(const nnvm::NodeEntry &node_entry,
-                             const std::unordered_map<Node*, NodePtr> &mirror_map) {
+                             const std::unordered_map<Node*, ObjectPtr> &mirror_map) {
   static const auto &flist_outputs =
       nnvm::Op::GetAttr<nnvm::FListOutputNames>("FListOutputNames");
   std::string suffix = "";
-  NodePtr mirror_node = mirror_map.at(node_entry.node.get());
+  ObjectPtr mirror_node = mirror_map.at(node_entry.node.get());
   if (mirror_node->op() != nullptr) {
       auto list_output_names_func = flist_outputs.get(node_entry.node->op(), nullptr);
       if (list_output_names_func != nullptr) {
@@ -85,8 +85,8 @@ static std::string GetSuffix(const nnvm::NodeEntry &node_entry,
 static void AddCastNode(const nnvm::NodeEntry &e, const std::string &suffix,
                         const nnvm::NodeEntry &input, const std::string dtype,
                         nnvm::NodeEntryMap<NodeEntry> *mirror_entry_map,
-                        NodePtr curr_node) {
-  NodePtr cast_node =
+                        ObjectPtr curr_node) {
+  ObjectPtr cast_node =
       InsertNode("amp_cast", e.node->attrs.name + suffix + "_amp_cast_" + dtype,
                  curr_node, input);
   cast_node->attrs.dict["dtype"] = dtype;
@@ -98,13 +98,13 @@ static void AddCastNode(const nnvm::NodeEntry &e, const std::string &suffix,
 // add amp_multicast node between curr_node and inputs
 static void AddMultiCastNode(const std::vector<NodeEntry> &inputs,
                              const std::string &node_name,
-                             const std::unordered_map<Node *, NodePtr> &mirror_map,
-                             NodePtr curr_node) {
-  NodePtr node =
+                             const std::unordered_map<Node *, ObjectPtr> &mirror_map,
+                             ObjectPtr curr_node) {
+  ObjectPtr node =
       CreateNode("amp_multicast",
                  inputs[0].node->attrs.name + node_name + "_amp_multicast");
   for (const auto &node_entry : inputs) {
-    NodePtr mirror_node = mirror_map.at(node_entry.node.get());
+    ObjectPtr mirror_node = mirror_map.at(node_entry.node.get());
     NodeEntry mirror_entry = NodeEntry{std::move(mirror_node), node_entry.index,
                                        node_entry.version};
     node->inputs.emplace_back(mirror_entry);
@@ -123,7 +123,7 @@ static bool CheckConditionalFP32(
     const std::unordered_map<
         std::string, std::unordered_map<std::string, std::vector<std::string>>>
         &conditional_fp32_ops,
-    const std::unordered_set<std::string> &excluded_syms, NodePtr node) {
+    const std::unordered_set<std::string> &excluded_syms, ObjectPtr node) {
   if (node->is_variable() || (excluded_syms.count(node->attrs.name) > 0) ||
       conditional_fp32_ops.count(node->op()->name) == 0) {
     return false;
@@ -167,13 +167,13 @@ Graph ReducePrecision(Graph &&src) {
       << "Only float16 target_dtype is supported yet";
 
   // Additional data structures to share common cast node inputs among different nodes
-  std::unordered_map<Node *, NodePtr> mirror_map;
+  std::unordered_map<Node *, ObjectPtr> mirror_map;
   nnvm::NodeEntryMap<NodeEntry> mirror_fp32_map;
   nnvm::NodeEntryMap<NodeEntry> mirror_target_dtype_map;
 
   // Visit nodes in a topologically sorted order
-  DFSVisit(src.outputs, [&](const NodePtr &node) {
-    NodePtr new_node = Node::Create(*node);
+  DFSVisit(src.outputs, [&](const ObjectPtr &node) {
+    ObjectPtr new_node = Node::Create(*node);
     new_node->inputs.clear();
 
     /* 1. for node which needs to run in FP32 mode, add amp_cast operators
@@ -192,7 +192,7 @@ Graph ReducePrecision(Graph &&src) {
         if (mirror_fp32_map.count(node_entry)) {
           new_node->inputs.emplace_back(mirror_fp32_map[node_entry]);
         } else {
-          NodePtr mirror_node = mirror_map.at(node_entry.node.get());
+          ObjectPtr mirror_node = mirror_map.at(node_entry.node.get());
           NodeEntry mirror_entry = NodeEntry{mirror_node, node_entry.index, node_entry.version};
           std::string suffix = GetSuffix(node_entry, mirror_map);
           AddCastNode(node_entry, suffix, mirror_entry, "float32", &mirror_fp32_map,
@@ -206,7 +206,7 @@ Graph ReducePrecision(Graph &&src) {
         if (mirror_target_dtype_map.count(node_entry)) {
           new_node->inputs.emplace_back(mirror_target_dtype_map[node_entry]);
         } else {
-          NodePtr mirror_node = mirror_map.at(node_entry.node.get());
+          ObjectPtr mirror_node = mirror_map.at(node_entry.node.get());
           NodeEntry mirror_entry = NodeEntry{mirror_node, node_entry.index, node_entry.version};
           std::string suffix = GetSuffix(node_entry, mirror_map);
           AddCastNode(node_entry, suffix, mirror_entry, "float16",
@@ -228,7 +228,7 @@ Graph ReducePrecision(Graph &&src) {
         if (mirror_fp32_map.count(node_entry)) {
           new_node->inputs.emplace_back(mirror_fp32_map[node_entry]);
         } else {
-          NodePtr mirror_node = mirror_map.at(node_entry.node.get());
+          ObjectPtr mirror_node = mirror_map.at(node_entry.node.get());
           NodeEntry mirror_entry = NodeEntry{mirror_node, node_entry.index, node_entry.version};
           std::string suffix = GetSuffix(node_entry, mirror_map);
           AddCastNode(node_entry, suffix, mirror_entry, "float32", &mirror_fp32_map,
@@ -237,7 +237,7 @@ Graph ReducePrecision(Graph &&src) {
       }
     } else {
       for (const auto& node_entry : node->inputs) {
-        NodePtr mirror_node = mirror_map.at(node_entry.node.get());
+        ObjectPtr mirror_node = mirror_map.at(node_entry.node.get());
         new_node->inputs.emplace_back(mirror_node, node_entry.index, node_entry.version);
       }
     }
diff --git a/src/nnvm/node_op_util.h b/src/nnvm/node_op_util.h
index 54a9633..a110553 100644
--- a/src/nnvm/node_op_util.h
+++ b/src/nnvm/node_op_util.h
@@ -34,10 +34,10 @@ namespace util {
 
 class NodeOpGen {
  private:
-    const nnvm::NodePtr &dependent_node;
+    const nnvm::ObjectPtr &dependent_node;
 
  public:
-    explicit NodeOpGen(const nnvm::NodePtr &dependent_node) : dependent_node{dependent_node} {}
+    explicit NodeOpGen(const nnvm::ObjectPtr &dependent_node) : dependent_node{dependent_node} {}
 
     nnvm::NodeEntry mul(const nnvm::NodeEntry &lhs, const nnvm::NodeEntry &rhs) {
         return nnvm::NodeEntry{mxnet::op::MakeNode("elemwise_mul",
diff --git a/src/operator/batch_norm_v1.cc b/src/operator/batch_norm_v1.cc
index dc9f724..c837a5e 100644
--- a/src/operator/batch_norm_v1.cc
+++ b/src/operator/batch_norm_v1.cc
@@ -100,7 +100,7 @@ sparse tensors.
 
 NNVM_REGISTER_OP(BatchNorm_v1)
 .set_attr<nnvm::FSetInputVarAttrOnCompose>("FSetInputVarAttrOnCompose",
-    [](const nnvm::NodeAttrs& attrs, nnvm::NodePtr var, const int index) {
+    [](const nnvm::NodeAttrs& attrs, nnvm::ObjectPtr var, const int index) {
       if (var->attrs.dict.find("__init__") != var->attrs.dict.end()) return;
       if (index == 3) {
         var->attrs.dict["__init__"] = "[\"zero\", {}]";
diff --git a/src/operator/contrib/amp_graph_pass.cc b/src/operator/contrib/amp_graph_pass.cc
index abecc4a..b3c90e7 100644
--- a/src/operator/contrib/amp_graph_pass.cc
+++ b/src/operator/contrib/amp_graph_pass.cc
@@ -31,7 +31,7 @@ namespace mxnet {
 namespace op {
 
 using nnvm::Node;
-using nnvm::NodePtr;
+using nnvm::ObjectPtr;
 using nnvm::Graph;
 
 
@@ -39,7 +39,7 @@ using nnvm::Graph;
  * \brief Remove amp_cast and amp_multicast and replug the fp32 weights
  */
 Graph RemoveAmpCast(Graph&& g) {
-  DFSVisit(g.outputs, [](const NodePtr& n) {
+  DFSVisit(g.outputs, [](const ObjectPtr& n) {
     for (size_t i = 0; i < n->inputs.size(); ++i) {
       auto e = n->inputs[i];
       if (e.node->op() == Op::Get("amp_cast")) {
diff --git a/src/operator/contrib/roi_align.cc b/src/operator/contrib/roi_align.cc
index 38b889b..e3785c2 100644
--- a/src/operator/contrib/roi_align.cc
+++ b/src/operator/contrib/roi_align.cc
@@ -608,7 +608,7 @@ He, Kaiming, et al. "Mask R-CNN." ICCV, 2017
 })
 .set_attr<FCompute>("FCompute<cpu>", ROIAlignForwardCompute<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     std::vector<nnvm::NodeEntry> heads;
     heads.push_back(ograds[roialign::kOut]);
     heads.push_back(n->inputs[roialign::kBox]);
diff --git a/src/operator/contrib/sync_batch_norm.cc b/src/operator/contrib/sync_batch_norm.cc
index 418688e..f737ef5 100644
--- a/src/operator/contrib/sync_batch_norm.cc
+++ b/src/operator/contrib/sync_batch_norm.cc
@@ -104,7 +104,7 @@ Reference:
 
 NNVM_REGISTER_OP(_contrib_SyncBatchNorm)
 .set_attr<nnvm::FSetInputVarAttrOnCompose>("FSetInputVarAttrOnCompose",
-    [](const nnvm::NodeAttrs& attrs, nnvm::NodePtr var, const int index) {
+    [](const nnvm::NodeAttrs& attrs, nnvm::ObjectPtr var, const int index) {
       if (var->attrs.dict.find("__init__") != var->attrs.dict.end()) return;
       if (index == 3) {
         var->attrs.dict["__init__"] = "[\"zero\", {}]";
diff --git a/src/operator/control_flow.cc b/src/operator/control_flow.cc
index a9e9038..474241c 100644
--- a/src/operator/control_flow.cc
+++ b/src/operator/control_flow.cc
@@ -471,7 +471,7 @@ static OpStatePtr CreateForeachState(const NodeAttrs& attrs,
 }
 
 static std::vector<nnvm::NodeEntry>
-ForeachGradient(const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+ForeachGradient(const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
   ElemwiseGradUseInOut fgrad{"_backward_foreach"};
   std::vector<nnvm::NodeEntry> entries = fgrad(n, ograds);
   entries[0].node->attrs.subgraphs = n->attrs.subgraphs;
@@ -826,7 +826,7 @@ static OpStatePtr CreateWhileLoopState(const NodeAttrs& attrs,
 }
 
 static std::vector<nnvm::NodeEntry>
-WhileLoopGradient(const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+WhileLoopGradient(const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
   ElemwiseGradUseInOut fgrad{"_backward_while_loop"};
   std::vector<nnvm::NodeEntry> entries = fgrad(n, ograds);
   entries[0].node->attrs.subgraphs = n->attrs.subgraphs;
@@ -1079,7 +1079,7 @@ static OpStatePtr CreateCondState(const NodeAttrs& attrs,
 }
 
 static std::vector<nnvm::NodeEntry>
-CondGradient(const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+CondGradient(const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
   ElemwiseGradUseInOut fgrad{"_backward_cond"};
   std::vector<nnvm::NodeEntry> entries = fgrad(n, ograds);
   entries[0].node->attrs.subgraphs = n->attrs.subgraphs;
diff --git a/src/operator/custom/custom.cc b/src/operator/custom/custom.cc
index 3c4843c..49497a2 100644
--- a/src/operator/custom/custom.cc
+++ b/src/operator/custom/custom.cc
@@ -209,11 +209,11 @@ bool InferType(const NodeAttrs& attrs,
 }
 
 std::vector<nnvm::NodeEntry> Gradient(
-    const nnvm::NodePtr& n,
+    const nnvm::ObjectPtr& n,
     const std::vector<nnvm::NodeEntry>& out_grads) {
   const CustomParam& params = nnvm::get<CustomParam>(n->attrs.parsed);
 
-  nnvm::NodePtr g = nnvm::Node::Create();
+  nnvm::ObjectPtr g = nnvm::Node::Create();
   g->attrs.op = nnvm::Op::Get("_backward_Custom");
   g->attrs.name = n->attrs.name;
   g->attrs.parsed = params;
@@ -241,7 +241,7 @@ std::vector<nnvm::NodeEntry> Gradient(
     ret.emplace_back(g, static_cast<uint32_t>(i), 0);
   }
   if (params.num_auxs) {
-    nnvm::NodePtr ng = nnvm::Node::Create();
+    nnvm::ObjectPtr ng = nnvm::Node::Create();
     ng->attrs.op = nnvm::Op::Get("_NoGradient");
     ng->attrs.name = "NoGradient";
     for (size_t i = 0; i < params.num_auxs; ++i) {
diff --git a/src/operator/elemwise_op_common.h b/src/operator/elemwise_op_common.h
index 2cdd73a..bc2e1d7 100644
--- a/src/operator/elemwise_op_common.h
+++ b/src/operator/elemwise_op_common.h
@@ -224,7 +224,7 @@ inline bool ElemwiseIntType(const nnvm::NodeAttrs& attrs,
 // Transfer gradient and input to FGradient function
 struct ElemwiseGradUseIn {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     return MakeNonlossGradNode(op_name, n, ograds, n->inputs, n->attrs.dict);
   }
@@ -233,7 +233,7 @@ struct ElemwiseGradUseIn {
 // Transfer gradient and output to FGradient function
 struct ElemwiseGradUseOut {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     std::vector<nnvm::NodeEntry> heads;
     uint32_t n_out = n->num_outputs();
@@ -247,7 +247,7 @@ struct ElemwiseGradUseOut {
 // Transfer gradient and input and output to FGradient function
 struct ElemwiseGradUseInOut {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     std::vector<nnvm::NodeEntry> heads(ograds.begin(), ograds.end());
     for (auto& h : n->inputs) {
@@ -264,7 +264,7 @@ struct ElemwiseGradUseInOut {
 // Transfer only gradient to FGradient function
 struct ElemwiseGradUseNone {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     return MakeNonlossGradNode(op_name, n, ograds, {}, n->attrs.dict);
   }
@@ -272,7 +272,7 @@ struct ElemwiseGradUseNone {
 
 struct CloneGradient {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     std::vector<nnvm::NodeEntry> ret;
     const size_t input_count = n->inputs.size();
diff --git a/src/operator/fusion/fused_op.cc b/src/operator/fusion/fused_op.cc
index 5e2d782..2ac0b53 100644
--- a/src/operator/fusion/fused_op.cc
+++ b/src/operator/fusion/fused_op.cc
@@ -145,7 +145,7 @@ bool FusedOp::InferType(const nnvm::NodeAttrs &attrs,
 }
 
 template <typename Attr>
-std::tuple<const nnvm::NodePtr,
+std::tuple<const nnvm::ObjectPtr,
            std::vector<Attr>,
            std::vector<Attr>>
 FusedOp::GetAttrs(const std::string& attr_name, const uint32_t node_id) {
@@ -192,7 +192,7 @@ bool FusedOpInferType(const nnvm::NodeAttrs& attrs,
 }
 
 void FusedOpProvideShape(const nnvm::NodeAttrs& attrs,
-                         const std::vector<nnvm::NodePtr>& nodes,
+                         const std::vector<nnvm::ObjectPtr>& nodes,
                          const std::vector<std::vector<mxnet::TShape>> &in_attrs,
                          const std::vector<std::vector<mxnet::TShape>> &out_attrs) {
   const FusedOpPtr& op = nnvm::get<FusedOpPtr>(attrs.parsed);
@@ -200,7 +200,7 @@ void FusedOpProvideShape(const nnvm::NodeAttrs& attrs,
 }
 
 void FusedOpProvideType(const nnvm::NodeAttrs& attrs,
-                        const std::vector<nnvm::NodePtr>& nodes,
+                        const std::vector<nnvm::ObjectPtr>& nodes,
                         const std::vector<std::vector<int>> &in_attrs,
                         const std::vector<std::vector<int>> &out_attrs) {
   const FusedOpPtr& op = nnvm::get<FusedOpPtr>(attrs.parsed);
@@ -208,7 +208,7 @@ void FusedOpProvideType(const nnvm::NodeAttrs& attrs,
 }
 
 void FusedOpProvideStorageType(const nnvm::NodeAttrs& attrs,
-                               const std::vector<nnvm::NodePtr>& nodes,
+                               const std::vector<nnvm::ObjectPtr>& nodes,
                                const std::vector<std::vector<int>> &in_attrs,
                                const std::vector<std::vector<int>> &out_attrs) {}
 
@@ -243,7 +243,7 @@ NNVM_REGISTER_OP(_FusedOp)
 .set_attr_parser(FusedOpParamParser)
 .add_argument("data", "NDArray-or-Symbol[]", "Data");
 
-std::tuple<const nnvm::NodePtr,
+std::tuple<const nnvm::ObjectPtr,
            std::vector<mxnet::TShape>,
            std::vector<mxnet::TShape>>
 FusedOpHelperShape(const NodeAttrs& attrs) {
@@ -253,7 +253,7 @@ FusedOpHelperShape(const NodeAttrs& attrs) {
   return op->GetAttrs<mxnet::TShape>("shape", node_id);
 }
 
-std::tuple<const nnvm::NodePtr,
+std::tuple<const nnvm::ObjectPtr,
            std::vector<int>,
            std::vector<int>>
 FusedOpHelperType(const NodeAttrs& attrs) {
@@ -272,7 +272,7 @@ NNVM_REGISTER_OP(_FusedOpHelper)
 .set_attr<exec::FAccessSubgraphType>("FAccessSubgraphType", FusedOpHelperType);
 
 
-std::tuple<const nnvm::NodePtr,
+std::tuple<const nnvm::ObjectPtr,
            std::vector<mxnet::TShape>,
            std::vector<mxnet::TShape>>
 FusedOpOutHelperShape(const NodeAttrs& attrs) {
@@ -282,7 +282,7 @@ FusedOpOutHelperShape(const NodeAttrs& attrs) {
   return op->GetAuxShape(node_id);
 }
 
-std::tuple<const nnvm::NodePtr,
+std::tuple<const nnvm::ObjectPtr,
            std::vector<int>,
            std::vector<int>>
 FusedOpOutHelperType(const NodeAttrs& attrs) {
diff --git a/src/operator/fusion/fused_op.h b/src/operator/fusion/fused_op.h
index 1296472..3a1db4e 100644
--- a/src/operator/fusion/fused_op.h
+++ b/src/operator/fusion/fused_op.h
@@ -85,13 +85,13 @@ class FusedOp {
                  std::vector<int> *out_attrs);
 
   template <typename Attr>
-  std::tuple<const nnvm::NodePtr,
+  std::tuple<const nnvm::ObjectPtr,
              std::vector<Attr>,
              std::vector<Attr>>
     GetAttrs(const std::string& attr_name,
              const uint32_t node_id);
 
-  void ProvideShape(const std::vector<nnvm::NodePtr>& nodes,
+  void ProvideShape(const std::vector<nnvm::ObjectPtr>& nodes,
                     const std::vector<std::vector<mxnet::TShape>> &in_attrs,
                     const std::vector<std::vector<mxnet::TShape>> &out_attrs) {
     aux_nodes_ = nodes;
@@ -99,7 +99,7 @@ class FusedOp {
     aux_out_shapes_ = out_attrs;
   }
 
-  void ProvideType(const std::vector<nnvm::NodePtr>& nodes,
+  void ProvideType(const std::vector<nnvm::ObjectPtr>& nodes,
                    const std::vector<std::vector<int>> &in_attrs,
                    const std::vector<std::vector<int>> &out_attrs) {
     aux_nodes_ = nodes;
@@ -107,7 +107,7 @@ class FusedOp {
     aux_out_types_ = out_attrs;
   }
 
-  std::tuple<const nnvm::NodePtr,
+  std::tuple<const nnvm::ObjectPtr,
              std::vector<mxnet::TShape>,
              std::vector<mxnet::TShape>>
     GetAuxShape(const int node_id) const {
@@ -116,7 +116,7 @@ class FusedOp {
                            aux_out_shapes_[node_id]);
   }
 
-  std::tuple<const nnvm::NodePtr,
+  std::tuple<const nnvm::ObjectPtr,
              std::vector<int>,
              std::vector<int>>
     GetAuxType(const int node_id) const {
@@ -169,7 +169,7 @@ class FusedOp {
   std::vector<IntermediateAttr<mxnet::TShape> > intermediate_shapes_;
   std::vector<IntermediateAttr<int> > intermediate_dtypes_;
 
-  std::vector<nnvm::NodePtr> aux_nodes_;
+  std::vector<nnvm::ObjectPtr> aux_nodes_;
   std::vector<std::vector<mxnet::TShape>> aux_in_shapes_;
   std::vector<std::vector<mxnet::TShape>> aux_out_shapes_;
   std::vector<std::vector<int>> aux_in_types_;
diff --git a/src/operator/identity_attach_KL_sparse_reg.cc b/src/operator/identity_attach_KL_sparse_reg.cc
index df0919d..09e1973 100644
--- a/src/operator/identity_attach_KL_sparse_reg.cc
+++ b/src/operator/identity_attach_KL_sparse_reg.cc
@@ -45,7 +45,7 @@ MXNET_REGISTER_OP_PROPERTY(IdentityAttachKLSparseReg, IdentityAttachKLSparseRegP
 
 NNVM_REGISTER_OP(IdentityAttachKLSparseReg)
 .set_attr<nnvm::FSetInputVarAttrOnCompose>("FSetInputVarAttrOnCompose",
-    [](const nnvm::NodeAttrs& attrs, nnvm::NodePtr var, const int index) {
+    [](const nnvm::NodeAttrs& attrs, nnvm::ObjectPtr var, const int index) {
       if (var->attrs.dict.find("__init__") != var->attrs.dict.end()) return;
       if (index == 1) {
         var->attrs.dict["__init__"] = "[\"zero\", {}]";
diff --git a/src/operator/leaky_relu.cc b/src/operator/leaky_relu.cc
index c2414ad..d3ed234 100644
--- a/src/operator/leaky_relu.cc
+++ b/src/operator/leaky_relu.cc
@@ -199,7 +199,7 @@ The following modified ReLU Activation functions are supported:
 .add_argument("gamma", "NDArray-or-Symbol", "Input data to activation function.")
 .add_arguments(LeakyReLUParam::__FIELDS__())
 .set_attr<nnvm::FSetInputVarAttrOnCompose>("FSetInputVarAttrOnCompose",
-    [](const nnvm::NodeAttrs& attrs, nnvm::NodePtr var, const int index) {
+    [](const nnvm::NodeAttrs& attrs, nnvm::ObjectPtr var, const int index) {
       if (index == 1 && var->attrs.dict.find("__init__") == var->attrs.dict.end()) {
         var->attrs.dict["__init__"] = "[\"Constant\", {\"value\": 0.25}]";
       }
diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc
index ce5fb3e..1259ceb 100644
--- a/src/operator/nn/activation.cc
+++ b/src/operator/nn/activation.cc
@@ -63,7 +63,7 @@ DMLC_REGISTER_PARAMETER(ActivationParam);
 // This will determine the order of the inputs for backward computation.
 struct ActivationGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     // ograds, output...
     std::vector<nnvm::NodeEntry> heads(ograds.begin(), ograds.end());
diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc
index ea1c769..0baf365 100644
--- a/src/operator/nn/batch_norm.cc
+++ b/src/operator/nn/batch_norm.cc
@@ -452,7 +452,7 @@ static inline bool BatchNormStorageType(const nnvm::NodeAttrs &attrs,
   return dispatched;
 }
 
-std::vector<nnvm::NodeEntry> BatchNormGrad(const nnvm::NodePtr& n,
+std::vector<nnvm::NodeEntry> BatchNormGrad(const nnvm::ObjectPtr& n,
                                            const std::vector<nnvm::NodeEntry>& ograds) {
   std::vector<nnvm::NodeEntry> out_data;
   out_data.reserve(n->num_outputs());
@@ -469,7 +469,7 @@ std::vector<nnvm::NodeEntry> BatchNormGrad(const nnvm::NodePtr& n,
   heads.emplace_back(n->inputs.at(batchnorm::kInMovingMean));
   heads.emplace_back(n->inputs.at(batchnorm::kInMovingVar));
 
-  nnvm::NodePtr gnode = nnvm::Node::Create();
+  nnvm::ObjectPtr gnode = nnvm::Node::Create();
   gnode->inputs = std::move(heads);
   gnode->control_deps.emplace_back(n);
   gnode->attrs = n->attrs;
@@ -481,7 +481,7 @@ std::vector<nnvm::NodeEntry> BatchNormGrad(const nnvm::NodePtr& n,
   for (size_t i = 0; i < 3; ++i)
     in_grad.emplace_back(gnode, i, 0);
   // attach no gradient node to forbid gradient on aux_state
-  nnvm::NodePtr ng = nnvm::Node::Create();
+  nnvm::ObjectPtr ng = nnvm::Node::Create();
   ng->attrs.op = Op::Get("_NoGradient");
   ng->attrs.name = "NoGradient";
   // the aux state of batchnorm
@@ -583,7 +583,7 @@ then set ``gamma`` to 1 and its gradient to 0.
 .add_arguments(BatchNormParam::__FIELDS__())
 .set_attr<nnvm::FSetInputVarAttrOnCompose>(
   "FSetInputVarAttrOnCompose",
-  [](const nnvm::NodeAttrs& attrs, nnvm::NodePtr var, const int index) {
+  [](const nnvm::NodeAttrs& attrs, nnvm::ObjectPtr var, const int index) {
     if (var->attrs.dict.find("__init__") != var->attrs.dict.end()) return;
     if (index == 3) {
       var->attrs.dict["__init__"] = "[\"zero\", {}]";
diff --git a/src/operator/nn/concat.cc b/src/operator/nn/concat.cc
index 081ffde..4b2d0bf 100644
--- a/src/operator/nn/concat.cc
+++ b/src/operator/nn/concat.cc
@@ -300,7 +300,7 @@ static void ConcatGradComputeExCPU(const nnvm::NodeAttrs& attrs,
 
 struct ConcatGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     CHECK_EQ(ograds.size(), 1);
     std::vector<nnvm::NodeEntry> heads(ograds.begin(), ograds.end());
diff --git a/src/operator/nn/convolution.cc b/src/operator/nn/convolution.cc
index 36ee4e0..a07c1ad 100644
--- a/src/operator/nn/convolution.cc
+++ b/src/operator/nn/convolution.cc
@@ -384,7 +384,7 @@ void ConvolutionParamParser(nnvm::NodeAttrs* attrs) {
 
 struct ConvolutionGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     const ConvolutionParam& param = nnvm::get<ConvolutionParam>(n->attrs.parsed);
     std::vector<nnvm::NodeEntry> heads(ograds.begin(), ograds.end());
diff --git a/src/operator/nn/cudnn/cudnn_batch_norm.cc b/src/operator/nn/cudnn/cudnn_batch_norm.cc
index d691b78..1290ba8 100644
--- a/src/operator/nn/cudnn/cudnn_batch_norm.cc
+++ b/src/operator/nn/cudnn/cudnn_batch_norm.cc
@@ -96,7 +96,7 @@ NNVM_REGISTER_OP(CuDNNBatchNorm)
 .add_arguments(BatchNormParam::__FIELDS__())
 .set_attr<nnvm::FSetInputVarAttrOnCompose>(
   "FSetInputVarAttrOnCompose",
-  [](const nnvm::NodeAttrs& attrs, nnvm::NodePtr var, const int index) {
+  [](const nnvm::NodeAttrs& attrs, nnvm::ObjectPtr var, const int index) {
     if (var->attrs.dict.find("__init__") != var->attrs.dict.end()) return;
     if (index == 3) {
       var->attrs.dict["__init__"] = "[\"zero\", {}]";
diff --git a/src/operator/nn/deconvolution.cc b/src/operator/nn/deconvolution.cc
index f0a6f88..cd22ace 100644
--- a/src/operator/nn/deconvolution.cc
+++ b/src/operator/nn/deconvolution.cc
@@ -390,7 +390,7 @@ static void DeconvolutionParamParser(nnvm::NodeAttrs* attrs) {
 
 struct DeconvolutionGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     std::vector<nnvm::NodeEntry> heads(ograds.begin(), ograds.end());
     heads.push_back(n->inputs[deconv::kData]);
diff --git a/src/operator/nn/dropout.cc b/src/operator/nn/dropout.cc
index 745bba1..163e8c0 100644
--- a/src/operator/nn/dropout.cc
+++ b/src/operator/nn/dropout.cc
@@ -51,7 +51,7 @@ OpStatePtr CreateDropoutState(const nnvm::NodeAttrs &attrs,
 
 struct DropoutGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     std::vector<nnvm::NodeEntry> heads;
     heads.push_back(ograds[0]);
diff --git a/src/operator/nn/fully_connected.cc b/src/operator/nn/fully_connected.cc
index 1632486..06685c8 100644
--- a/src/operator/nn/fully_connected.cc
+++ b/src/operator/nn/fully_connected.cc
@@ -170,7 +170,7 @@ static bool FullyConnectedType(const nnvm::NodeAttrs& attrs,
 
 struct FullyConnectedGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     std::vector<nnvm::NodeEntry> heads(ograds.begin(), ograds.end());
     heads.push_back(n->inputs[fullc::kData]);
@@ -181,7 +181,7 @@ struct FullyConnectedGrad {
 
 struct FullyConnectedGradGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     std::vector<nnvm::NodeEntry> heads(ograds.begin(), ograds.end());
     heads.push_back(n->inputs[0]);  // o_y : head gradient of the output y
diff --git a/src/operator/nn/group_norm.cc b/src/operator/nn/group_norm.cc
index 06430c2..6b8fe9b 100644
--- a/src/operator/nn/group_norm.cc
+++ b/src/operator/nn/group_norm.cc
@@ -94,7 +94,7 @@ Both ``gamma`` and ``beta`` are learnable parameters.
 .set_attr<mxnet::FInferShape>("FInferShape", GroupNormShape)
 .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<3, 3>)
 .set_attr<FCompute>("FCompute<cpu>", GroupNormCompute<cpu>)
-.set_attr<nnvm::FGradient>("FGradient", [](const nnvm::NodePtr& n,
+.set_attr<nnvm::FGradient>("FGradient", [](const nnvm::ObjectPtr& n,
                                            const std::vector<nnvm::NodeEntry>& ograds) {
   std::vector<nnvm::NodeEntry> heads;
   heads.push_back(ograds[0]);  // ograd
diff --git a/src/operator/nn/layer_norm.cc b/src/operator/nn/layer_norm.cc
index 1b2a43b..21ec525 100644
--- a/src/operator/nn/layer_norm.cc
+++ b/src/operator/nn/layer_norm.cc
@@ -177,7 +177,7 @@ axis to be the last item in the input shape.
 #else
 .set_attr<FCompute>("FCompute<cpu>", LayerNormCompute<cpu>)
 #endif
-.set_attr<nnvm::FGradient>("FGradient", [](const nnvm::NodePtr& n,
+.set_attr<nnvm::FGradient>("FGradient", [](const nnvm::ObjectPtr& n,
                                            const std::vector<nnvm::NodeEntry>& ograds) {
   std::vector<nnvm::NodeEntry> heads;
   heads.push_back(ograds[0]);  // ograd
diff --git a/src/operator/nn/lrn.cc b/src/operator/nn/lrn.cc
index 1496791..1ac3840 100644
--- a/src/operator/nn/lrn.cc
+++ b/src/operator/nn/lrn.cc
@@ -72,7 +72,7 @@ bool LRNType(const nnvm::NodeAttrs& attrs,
 
 struct LRNGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                 const std::vector<nnvm::NodeEntry>& ograds) const {
     std::vector<nnvm::NodeEntry> heads;
     heads.push_back(ograds[0]);  // out_grad
diff --git a/src/operator/nn/softmax-inl.h b/src/operator/nn/softmax-inl.h
index 2dbdbe1..f8a3fe4 100644
--- a/src/operator/nn/softmax-inl.h
+++ b/src/operator/nn/softmax-inl.h
@@ -758,7 +758,7 @@ static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeA
 
 struct SoftmaxFGradient {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) {
       return ElemwiseGradUseInOut {op_name}(n, ograds);
diff --git a/src/operator/nn/upsampling.cc b/src/operator/nn/upsampling.cc
index 8000106..d471eba 100644
--- a/src/operator/nn/upsampling.cc
+++ b/src/operator/nn/upsampling.cc
@@ -106,7 +106,7 @@ static bool UpSamplingType(const nnvm::NodeAttrs& attrs,
 
 struct UpSamplingGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     const UpSamplingParam& param_ = nnvm::get<UpSamplingParam>(n->attrs.parsed);
     std::vector<nnvm::NodeEntry> heads(ograds.begin(), ograds.end());
@@ -203,7 +203,7 @@ Example::
               "For bilinear upsampling, there should be 2 inputs - 1 data and 1 weight.")
 .add_arguments(UpSamplingParam::__FIELDS__())
 .set_attr<nnvm::FSetInputVarAttrOnCompose>("FSetInputVarAttrOnCompose",
-    [](const nnvm::NodeAttrs& attrs, nnvm::NodePtr var, const int index) {
+    [](const nnvm::NodeAttrs& attrs, nnvm::ObjectPtr var, const int index) {
       if (var->attrs.dict.find("__init__") != var->attrs.dict.end()) return;
       if (index == 1) {
         var->attrs.dict["__init__"] = "[\"bilinear\", {}]";
diff --git a/src/operator/numpy/np_broadcast_reduce_op_value.cc b/src/operator/numpy/np_broadcast_reduce_op_value.cc
index 8b6e8b7..1a74099 100644
--- a/src/operator/numpy/np_broadcast_reduce_op_value.cc
+++ b/src/operator/numpy/np_broadcast_reduce_op_value.cc
@@ -494,7 +494,7 @@ NNVM_REGISTER_OP(_npi_broadcast_to)
   })
 .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n,
+  [](const nnvm::ObjectPtr& n,
     const std::vector<nnvm::NodeEntry>& ograds) {
     return MakeNonlossGradNode("_backward_np_broadcast_to", n, ograds, {}, n->attrs.dict);
   })
diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc
index 3dcd410..7c48152 100644
--- a/src/operator/numpy/np_matrix_op.cc
+++ b/src/operator/numpy/np_matrix_op.cc
@@ -111,7 +111,7 @@ NNVM_REGISTER_OP(_np_transpose)
 .set_attr<mxnet::FInferShape>("FInferShape", NumpyTransposeShape)
 .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     const NumpyTransposeParam& param = nnvm::get<NumpyTransposeParam>(n->attrs.parsed);
     if (ndim_is_known(param.axes)) {
       mxnet::TShape axes = mxnet::TShape(param.axes.ndim(), -1);
@@ -664,7 +664,7 @@ bool NumpyConcatenateShape(const nnvm::NodeAttrs& attrs,
 
 struct NumpyConcatGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     CHECK_EQ(ograds.size(), 1);
     std::vector<nnvm::NodeEntry> heads(ograds.begin(), ograds.end());
@@ -1167,7 +1167,7 @@ NNVM_REGISTER_OP(_np_roll)
 .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
 .set_attr<mxnet::FCompute>("FCompute<cpu>", NumpyRollCompute<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
      const NumpyRollParam& param = nnvm::get<NumpyRollParam>(n->attrs.parsed);
      if (!param.shift.has_value()) {
        LOG(FATAL) << "roll missing 1 required positional argument: 'shift'.";
@@ -1268,7 +1268,7 @@ Other axes remain in their original order.
 .set_attr<mxnet::FInferShape>("FInferShape", NumpyMoveaxisShape)
 .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
      const NumpyMoveaxisParam& param = nnvm::get<NumpyMoveaxisParam>(n->attrs.parsed);
      std::ostringstream os1;
      os1 << param.source;
@@ -1341,7 +1341,7 @@ NNVM_REGISTER_OP(_npi_rot90)
 .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
 .set_attr<mxnet::FCompute>("FCompute<cpu>", NumpyRot90Compute<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
      const NumpyRot90Param& param = nnvm::get<NumpyRot90Param>(n->attrs.parsed);
      std::ostringstream os1;
      os1 << param.k;
diff --git a/src/operator/numpy/np_where_op.cc b/src/operator/numpy/np_where_op.cc
index 6cca0c5..ba7cb14 100644
--- a/src/operator/numpy/np_where_op.cc
+++ b/src/operator/numpy/np_where_op.cc
@@ -92,7 +92,7 @@ NNVM_REGISTER_OP(_npi_where)
   // Use the following lambda function instead of ElemwiseGradUseIn
   // for best efficiency. grad[condition] = 0; to calculate grad[x] and grad[y]
   // we need only condition from input.
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     std::vector<nnvm::NodeEntry> ret;
     // make zero grad node for grad[condition]
     auto p = MakeNode("zeros_like", n->attrs.name + "_cond_backward",
diff --git a/src/operator/operator_common.h b/src/operator/operator_common.h
index 9291826..bdc6793 100644
--- a/src/operator/operator_common.h
+++ b/src/operator/operator_common.h
@@ -360,7 +360,7 @@ inline bool dispatch_fallback(StorageTypeVector* stypes, DispatchMode* dispatch)
 }
 
 inline std::vector<nnvm::NodeEntry>CreateNodeEntries(
-  nnvm::NodePtr pNode,
+  nnvm::ObjectPtr pNode,
   const std::vector<nnvm::NodeEntry>* pOgrads = nullptr,
   const std::vector<nnvm::NodeEntry>* pInputs = nullptr) {
   if (pOgrads)
@@ -384,11 +384,11 @@ inline std::vector<nnvm::NodeEntry>CreateNodeEntries(
 }
 
 // make a new node with operator op_name. Inputs are not filled.
-inline nnvm::NodePtr MakeNode(
+inline nnvm::ObjectPtr MakeNode(
     const char* op_name, const std::string& name,
     std::vector<nnvm::NodeEntry> const * inputs = nullptr,
     std::unordered_map<std::string, std::string> const * dict = nullptr,
-    nnvm::NodePtr const * fwd_node = nullptr) {
+    nnvm::ObjectPtr const * fwd_node = nullptr) {
   auto p = nnvm::Node::Create();
   p->attrs.op = nnvm::Op::Get(op_name);
   p->attrs.name = name;
@@ -409,18 +409,18 @@ inline nnvm::NodePtr MakeNode(
   return p;
 }
 
-inline nnvm::NodePtr MakeNode(
+inline nnvm::ObjectPtr MakeNode(
     const char* op_name, const std::string& name,
     const std::vector<nnvm::NodeEntry>& inputs,
     std::unordered_map<std::string, std::string> const * dict,
-    nnvm::NodePtr const * fwd_node) {
+    nnvm::ObjectPtr const * fwd_node) {
   return MakeNode(op_name, name, &inputs, dict, fwd_node);
 }
 
 
 // quick helper to make node
 inline std::vector<nnvm::NodeEntry> MakeGradNode(
-    const char* op_name, const nnvm::NodePtr& n,
+    const char* op_name, const nnvm::ObjectPtr& n,
     const std::vector<nnvm::NodeEntry>& inputs,
     const std::unordered_map<std::string, std::string>& dict) {
   auto p = MakeNode(op_name, n->attrs.name + "_backward",
@@ -431,7 +431,7 @@ inline std::vector<nnvm::NodeEntry> MakeGradNode(
 
 // quick helper to make gradient nodes that simply pass back zero. could be used in output ops.
 inline std::vector<nnvm::NodeEntry> MakeZeroGradNodes(
-    const nnvm::NodePtr& n,
+    const nnvm::ObjectPtr& n,
     const std::vector<nnvm::NodeEntry>& ograds) {
   std::vector<nnvm::NodeEntry> ret;
   for (uint32_t i = 0; i < n->num_inputs(); ++i) {
@@ -465,7 +465,7 @@ inline bool CheckGradAllZero(const std::vector<nnvm::NodeEntry>& ograds) {
 // make gradient node that doesn't add to objective.
 // i.e. igrads are always zero when ograds are zero.
 inline std::vector<nnvm::NodeEntry> MakeNonlossGradNode(
-    const char* op_name, const nnvm::NodePtr& n,
+    const char* op_name, const nnvm::ObjectPtr& n,
     const std::vector<nnvm::NodeEntry>& ograds,
     const std::vector<nnvm::NodeEntry>& inputs,
     const std::unordered_map<std::string, std::string>& dict) {
diff --git a/src/operator/quantization/quantize_graph_pass.cc b/src/operator/quantization/quantize_graph_pass.cc
index 229793f..5c43e13 100644
--- a/src/operator/quantization/quantize_graph_pass.cc
+++ b/src/operator/quantization/quantize_graph_pass.cc
@@ -38,11 +38,11 @@ namespace op {
 
 using nnvm::Symbol;
 using nnvm::Node;
-using nnvm::NodePtr;
+using nnvm::ObjectPtr;
 using nnvm::NodeEntry;
 using nnvm::Graph;
 
-static inline size_t GetNumOutputs(NodePtr node) {
+static inline size_t GetNumOutputs(ObjectPtr node) {
   // Get NumOutputs, check if current node has NumVisibleOutputs function, if yes, return
   // num_visible_outputs
   size_t num_outputs = node->num_outputs();
@@ -55,8 +55,8 @@ static inline size_t GetNumOutputs(NodePtr node) {
   return num_outputs;
 }
 
-NodePtr CreateNode(std::string op_name, std::string node_name) {
-  NodePtr node = Node::Create();
+ObjectPtr CreateNode(std::string op_name, std::string node_name) {
+  ObjectPtr node = Node::Create();
   node->attrs.name = node_name;
   if (op_name == "nullptr") {
     node->attrs.op = nullptr;
@@ -73,9 +73,9 @@ NodePtr CreateNode(std::string op_name, std::string node_name) {
  * \brief Insert a node named with node_name holding the op of op_name
  * before the node current and after the node previous.
  */
-NodePtr InsertNode(std::string op_name,
-    std::string node_name, NodePtr current, NodeEntry previous) {
-  NodePtr node = CreateNode(op_name, node_name);
+ObjectPtr InsertNode(std::string op_name,
+    std::string node_name, ObjectPtr current, NodeEntry previous) {
+  ObjectPtr node = CreateNode(op_name, node_name);
   node->inputs.emplace_back(previous);
   current->inputs.emplace_back(node);
   return node;
@@ -84,14 +84,14 @@ NodePtr InsertNode(std::string op_name,
 std::vector<NodeEntry> OfflineParams(std::vector<NodeEntry>&& outputs,
                                      const std::unordered_set<std::string>& offline_params) {
   std::string node_suffixs[3] = {"", "_min", "_max"};
-  std::unordered_map<Node*, NodePtr> mirror_map;
-  nnvm::NodeEntryMap<NodePtr> entry_var;
-  auto need_offline = [&](NodePtr n) {
+  std::unordered_map<Node*, ObjectPtr> mirror_map;
+  nnvm::NodeEntryMap<ObjectPtr> entry_var;
+  auto need_offline = [&](ObjectPtr n) {
     return (n->op() == Op::Get("_contrib_quantize_v2")) &&
            n->inputs[0].node->is_variable() &&
            offline_params.count(n->inputs[0].node->attrs.name);
   };
-  DFSVisit(outputs, [&](const NodePtr& node) {
+  DFSVisit(outputs, [&](const ObjectPtr& node) {
     for (NodeEntry& e : node->inputs) {
       if (need_offline(e.node)) {
         std::string node_name = e.node->attrs.name;
@@ -108,7 +108,7 @@ std::vector<NodeEntry> OfflineParams(std::vector<NodeEntry>&& outputs,
 }
 
 // To check if a node is registered with a computation function on a target device.
-bool isRegistered(NodePtr node, const int& dev_type) {
+bool isRegistered(ObjectPtr node, const int& dev_type) {
   const auto& op = node->op();
   Context ctx = Context::Create(static_cast<Context::DeviceType>(dev_type), 0);
   FCompute fcompute = common::GetFCompute<FCompute>(op, "FCompute", ctx);
@@ -121,13 +121,13 @@ bool isRegistered(NodePtr node, const int& dev_type) {
           fcomputestateful != nullptr || fcomputestateful_ex != nullptr);
 }
 
-inline QuantizeType NeedQuantize(NodePtr node,
+inline QuantizeType NeedQuantize(ObjectPtr node,
                                  const std::unordered_set<std::string>& excluded_nodes,
                                  const std::unordered_set<std::string>& excluded_ops,
                                  const int& dev_type,
-                                 std::unordered_map<NodePtr, NodePtr>* quantized_node_map,
+                                 std::unordered_map<ObjectPtr, ObjectPtr>* quantized_node_map,
                                  const std::string quantize_granularity) {
-  std::unordered_map<NodePtr, NodePtr> quantized_node;
+  std::unordered_map<ObjectPtr, ObjectPtr> quantized_node;
   static auto& quantizable_map = Op::GetAttr<mxnet::FQuantizable>("FQuantizable");
   static auto& quantized_op_map = Op::GetAttr<mxnet::FQuantizedOp>("FQuantizedOp");
   static auto& fexec_type = nnvm::Op::GetAttr<FExecType>("FExecType");
@@ -153,7 +153,7 @@ inline QuantizeType NeedQuantize(NodePtr node,
           // This is a fused subgraph node, try to match inner node.
           CHECK_EQ(node->attrs.subgraphs.size(), 1);
           auto subgraph_sym = node->attrs.subgraphs[0];
-          DFSVisit(subgraph_sym->outputs, [&](const nnvm::NodePtr& n) {
+          DFSVisit(subgraph_sym->outputs, [&](const nnvm::ObjectPtr& n) {
             if (n->is_variable()) return;
             if (excluded_nodes.count(n->attrs.name)) {
               need = false;
@@ -189,18 +189,18 @@ enum quantize_bit {
 };
 
 static void MarkQuantizedNodes(const Graph& src,
-                               std::unordered_map<NodePtr, NodePtr>* quantized_node_map) {
+                               std::unordered_map<ObjectPtr, ObjectPtr>* quantized_node_map) {
   const auto excluded_nodes = src.GetAttr<std::unordered_set<std::string>>("excluded_nodes");
   const auto excluded_ops = src.GetAttr<std::unordered_set<std::string>>("excluded_ops");
   const auto quantize_mode = src.GetAttr<std::string>("quantize_mode");
   const auto dev_type = src.GetAttr<int>("target_ctx");
   const auto quantize_granularity = src.GetAttr<std::string>("quantize_granularity");
 
-  std::unordered_map<NodePtr, std::vector<NodePtr>> node_output_map;
-  std::unordered_set<NodePtr> must_quantize_nodes;
-  std::unordered_map<NodePtr, int> support_quantize_nodes;
+  std::unordered_map<ObjectPtr, std::vector<ObjectPtr>> node_output_map;
+  std::unordered_set<ObjectPtr> must_quantize_nodes;
+  std::unordered_map<ObjectPtr, int> support_quantize_nodes;
   // Build node_output_map, must_quantize_nodes and support_quantize_nodes;
-  DFSVisit(src.outputs, [&](const NodePtr& node) {
+  DFSVisit(src.outputs, [&](const ObjectPtr& node) {
     auto quantize_type =
         NeedQuantize(node, excluded_nodes, excluded_ops, dev_type,
                      quantized_node_map, quantize_granularity);
@@ -218,7 +218,7 @@ static void MarkQuantizedNodes(const Graph& src,
     return;
   } else if (quantize_mode == "smart") {
     // Mark quantized nodes from input
-    std::queue<NodePtr> task_queue;
+    std::queue<ObjectPtr> task_queue;
     for (const auto& node : must_quantize_nodes) {
       task_queue.push(node);
     }
@@ -280,18 +280,18 @@ Graph QuantizeGraph(Graph &&src) {
                << " please set quantize_granularity to `tensor-wise` when quantizing model.";
   }
 
-  std::unordered_map<NodePtr, NodePtr> quantized_node_map;
+  std::unordered_map<ObjectPtr, ObjectPtr> quantized_node_map;
   MarkQuantizedNodes(src, &quantized_node_map);
 
   // mirror_map stores the mapping from the currently visited graph to the newly created quantized
   // graph. Key is the currently visited graph's node pointer, and value is a copied node of the key
   // node. The existing key's value may be updated with the newly created quantize/dequantize op.
-  std::unordered_map<Node*, NodePtr> mirror_map;
-  std::unordered_map<NodePtr, NodePtr> reverse_mirror_map;
+  std::unordered_map<Node*, ObjectPtr> mirror_map;
+  std::unordered_map<ObjectPtr, ObjectPtr> reverse_mirror_map;
   nnvm::NodeEntryMap<NodeEntry> mirror_entry_map;
   static int verbose = dmlc::GetEnv("MXNET_QUANTIZATION_VERBOSE", 0);
-  DFSVisit(src.outputs, [&](const NodePtr& node) {
-    NodePtr new_node = Node::Create();
+  DFSVisit(src.outputs, [&](const ObjectPtr& node) {
+    ObjectPtr new_node = Node::Create();
     // If the currently visited node needs quantization, insert a quantize op node before the
     // current node and replace the current node with the quantized version in the new graph.
     if (quantized_node_map.count(node)) {
@@ -303,7 +303,7 @@ Graph QuantizeGraph(Graph &&src) {
       // add data into quantized op input
       for (size_t i = 0; i < node->inputs.size(); ++i) {
         const auto& e = node->inputs[i];
-        NodePtr mirror_node = mirror_map.at(e.node.get());
+        ObjectPtr mirror_node = mirror_map.at(e.node.get());
         NodeEntry mirror_entry = NodeEntry{
           mirror_node, e.index, e.version};
         // If the NodeEntry e's node does not need quantization, and (the mirror_node is a variable,
@@ -333,7 +333,7 @@ Graph QuantizeGraph(Graph &&src) {
               }
             }
 
-            NodePtr quantize_node = InsertNode("_contrib_quantize_v2",
+            ObjectPtr quantize_node = InsertNode("_contrib_quantize_v2",
               e.node->attrs.name + suffix + "_quantize", new_node, mirror_entry);
             quantize_node->attrs.dict["out_type"] = quantized_dtype;
             quantize_node->op()->attr_parser(&(quantize_node->attrs));
@@ -353,7 +353,7 @@ Graph QuantizeGraph(Graph &&src) {
       // data1, data2, ..., min1, max1, min2, max2, ...
       for (size_t i = 0; i < node->inputs.size(); ++i) {
         const auto& e = node->inputs[i];
-        NodePtr mirror_node = mirror_map.at(e.node.get());
+        ObjectPtr mirror_node = mirror_map.at(e.node.get());
         if (mirror_node->op() == Op::Get("_contrib_dequantize")) {
           mirror_node = mirror_node->inputs[0].node;
         }
@@ -394,7 +394,7 @@ Graph QuantizeGraph(Graph &&src) {
       // out_data, min_range, and max_range.
       if (need_requantize_map.count(new_node->op()) > 0 &&
           need_requantize_map[new_node->op()](new_node->attrs)) {
-        NodePtr requantize_node = Node::Create();
+        ObjectPtr requantize_node = Node::Create();
         requantize_node->attrs.op = Op::Get("_contrib_requantize");
         requantize_node->attrs.name = "requantize_" + node->attrs.name;
         requantize_node->attrs.dict["out_type"] = quantized_dtype;
@@ -417,7 +417,7 @@ Graph QuantizeGraph(Graph &&src) {
       *new_node = *node;
       new_node->inputs.clear();
       for (const auto& e : node->inputs) {
-        NodePtr mirror_node = mirror_map.at(e.node.get());
+        ObjectPtr mirror_node = mirror_map.at(e.node.get());
         NodeEntry mirror_entry = NodeEntry{
           mirror_node, e.index, e.version};
         // if input node is quantized operator, add dequantize node
@@ -430,7 +430,7 @@ Graph QuantizeGraph(Graph &&src) {
           size_t num_outputs = GetNumOutputs(mirror_node) - 2;
           uint32_t min_index = num_outputs + 2 * e.index;
           uint32_t max_index = num_outputs + 2 * e.index + 1;
-          NodePtr dequantize_node = CreateNode("_contrib_dequantize",
+          ObjectPtr dequantize_node = CreateNode("_contrib_dequantize",
             e.node->attrs.name + "_dequantize");
           dequantize_node->inputs.emplace_back(mirror_entry);
           dequantize_node->inputs.emplace_back(mirror_node, min_index, 0);
@@ -456,7 +456,7 @@ Graph QuantizeGraph(Graph &&src) {
   for (const auto& e : src.outputs) {
     if (quantized_node_map.count(e.node)) {
       // Only insert dequantize for those Ops supports quantize and not excluded.
-      NodePtr mirror_node = mirror_map.at(e.node.get());
+      ObjectPtr mirror_node = mirror_map.at(e.node.get());
       NodeEntry mirror_entry = NodeEntry{mirror_node, e.index, e.version};
       // here we calculate the output number (exclude min/max, in order to
       // calculate min/max index from mirror node) based on assumption that
@@ -466,7 +466,7 @@ Graph QuantizeGraph(Graph &&src) {
       uint32_t min_index = num_outputs + 2 * e.index;
       uint32_t max_index = num_outputs + 2 * e.index + 1;
 
-      NodePtr dequantize_node = CreateNode("_contrib_dequantize",
+      ObjectPtr dequantize_node = CreateNode("_contrib_dequantize",
           e.node->attrs.name + "_dequantize");
       dequantize_node->inputs.emplace_back(mirror_entry);
       dequantize_node->inputs.emplace_back(mirror_node, min_index, 0);
@@ -488,7 +488,7 @@ Graph QuantizeGraph(Graph &&src) {
   static const auto& need_calib_output_map =
       Op::GetAttr<mxnet::FNeedCalibrateOutput>("FNeedCalibrateOutput");
   std::vector<std::string> calib_nodes;
-  DFSVisit(ret.outputs, [&](const NodePtr& node) {
+  DFSVisit(ret.outputs, [&](const ObjectPtr& node) {
     if (need_calib_input_map.count(node->op())) {
       const auto calib_idx = need_calib_input_map[node->op()](node->attrs);
       for (const auto &idx : calib_idx) {
@@ -526,7 +526,7 @@ Graph QuantizeGraph(Graph &&src) {
 }
 
 static inline void SetCalibTableForEntry(
-    const NodeEntry& e, const NodePtr& node,
+    const NodeEntry& e, const ObjectPtr& node,
     const std::unordered_map<std::string, std::pair<float, float>>& calib_table) {
   std::string out_data_name = common::GetOutputName(e);
   const std::string prefix = "quantized_";
@@ -562,7 +562,7 @@ Graph SetCalibTableToQuantizedGraph(Graph&& g) {
   if (verbose) {
     LOG(INFO) << "Set calibration result to quantized symbol.";
   }
-  DFSVisit(g.outputs, [&](const NodePtr& node) {
+  DFSVisit(g.outputs, [&](const ObjectPtr& node) {
     if (need_calib_input_map.count(node->op())) {
       const auto calib_idx = need_calib_input_map[node->op()](node->attrs);
       CHECK_EQ(calib_idx.size(), 1);
diff --git a/src/operator/quantization/quantized_activation.cc b/src/operator/quantization/quantized_activation.cc
index 40a28d6..b4ef03b 100644
--- a/src/operator/quantization/quantized_activation.cc
+++ b/src/operator/quantization/quantized_activation.cc
@@ -119,7 +119,7 @@ the float32 data into int8.
 NNVM_REGISTER_OP(Activation)
 .set_attr<FQuantizedOp>("FQuantizedOp", [](const NodeAttrs& attrs) {
   const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
-  nnvm::NodePtr node = nnvm::Node::Create();
+  nnvm::ObjectPtr node = nnvm::Node::Create();
   if (param.act_type == activation::kReLU) {
     node->attrs.op = Op::Get("_contrib_quantized_act");
     node->attrs.name = "quantized_" + attrs.name;
diff --git a/src/operator/quantization/quantized_batch_norm.cc b/src/operator/quantization/quantized_batch_norm.cc
index 91baf43..f197ebd 100644
--- a/src/operator/quantization/quantized_batch_norm.cc
+++ b/src/operator/quantization/quantized_batch_norm.cc
@@ -126,7 +126,7 @@ the float32 data into int8.
 
 NNVM_REGISTER_OP(BatchNorm)
 .set_attr<FQuantizedOp>("FQuantizedOp", [](const NodeAttrs& attrs) {
-    nnvm::NodePtr node = nnvm::Node::Create();
+    nnvm::ObjectPtr node = nnvm::Node::Create();
     node->attrs.op = Op::Get("_contrib_quantized_batch_norm");
     node->attrs.name = "quantized_" + attrs.name;
     node->attrs.dict = attrs.dict;
diff --git a/src/operator/quantization/quantized_concat.cc b/src/operator/quantization/quantized_concat.cc
index 5835701..2cd219c 100644
--- a/src/operator/quantization/quantized_concat.cc
+++ b/src/operator/quantization/quantized_concat.cc
@@ -139,7 +139,7 @@ If any input holds int8, then the output will be int8. Otherwise output will be
 NNVM_REGISTER_OP(Concat)
 .set_attr<FQuantizedOp>("FQuantizedOp", [](const NodeAttrs& attrs) {
   const ConcatParam& param = nnvm::get<ConcatParam>(attrs.parsed);
-  nnvm::NodePtr node = nnvm::Node::Create();
+  nnvm::ObjectPtr node = nnvm::Node::Create();
   if (param.dim > 0) {
     node->attrs.op = Op::Get("_contrib_quantized_concat");
     node->attrs.name = "quantized_" + attrs.name;
diff --git a/src/operator/quantization/quantized_conv.cc b/src/operator/quantization/quantized_conv.cc
index 9d774dd..a4c3ab7 100644
--- a/src/operator/quantization/quantized_conv.cc
+++ b/src/operator/quantization/quantized_conv.cc
@@ -184,7 +184,7 @@ NNVM_REGISTER_OP(Convolution)
     return QuantizeType::kMust;
 })
 .set_attr<FQuantizedOp>("FQuantizedOp", [](const NodeAttrs& attrs) {
-    nnvm::NodePtr node = nnvm::Node::Create();
+    nnvm::ObjectPtr node = nnvm::Node::Create();
     node->attrs.op = Op::Get("_contrib_quantized_conv");
     node->attrs.name = "quantized_" + attrs.name;
     node->attrs.dict = attrs.dict;
diff --git a/src/operator/quantization/quantized_elemwise_add.cc b/src/operator/quantization/quantized_elemwise_add.cc
index f821e65..f335a07 100644
--- a/src/operator/quantization/quantized_elemwise_add.cc
+++ b/src/operator/quantization/quantized_elemwise_add.cc
@@ -127,7 +127,7 @@ and max thresholds representing the threholds for quantizing the float32 output
 
 NNVM_REGISTER_OP(elemwise_add)
 .set_attr<FQuantizedOp>("FQuantizedOp", [](const NodeAttrs& attrs) {
-  nnvm::NodePtr node = nnvm::Node::Create();
+  nnvm::ObjectPtr node = nnvm::Node::Create();
   node->attrs.op = Op::Get("_contrib_quantized_elemwise_add");
   node->attrs.name = "quantized_" + attrs.name;
   node->attrs.dict = attrs.dict;
diff --git a/src/operator/quantization/quantized_elemwise_mul.cc b/src/operator/quantization/quantized_elemwise_mul.cc
index a752c14..fb0df3c 100644
--- a/src/operator/quantization/quantized_elemwise_mul.cc
+++ b/src/operator/quantization/quantized_elemwise_mul.cc
@@ -253,7 +253,7 @@ NNVM_REGISTER_OP(_contrib_quantized_elemwise_mul)
 
 NNVM_REGISTER_OP(elemwise_mul)
 .set_attr<FQuantizedOp>("FQuantizedOp", [](const NodeAttrs& attrs) {
-  nnvm::NodePtr node = nnvm::Node::Create();
+  nnvm::ObjectPtr node = nnvm::Node::Create();
   node->attrs.op = Op::Get("_contrib_quantized_elemwise_mul");
   node->attrs.name = "quantized_" + attrs.name;
   node->attrs.dict = attrs.dict;
diff --git a/src/operator/quantization/quantized_flatten.cc b/src/operator/quantization/quantized_flatten.cc
index 7e6d27b..c8bf3e2 100644
--- a/src/operator/quantization/quantized_flatten.cc
+++ b/src/operator/quantization/quantized_flatten.cc
@@ -57,7 +57,7 @@ NNVM_REGISTER_OP(_contrib_quantized_flatten)
 
 NNVM_REGISTER_OP(Flatten)
 .set_attr<FQuantizedOp>("FQuantizedOp", [](const NodeAttrs& attrs) {
-    nnvm::NodePtr node = nnvm::Node::Create();
+    nnvm::ObjectPtr node = nnvm::Node::Create();
     node->attrs.op = Op::Get("_contrib_quantized_flatten");
     node->attrs.name = "quantized_" + attrs.name;
     node->attrs.dict = attrs.dict;
diff --git a/src/operator/quantization/quantized_fully_connected.cc b/src/operator/quantization/quantized_fully_connected.cc
index 4c9d9d2..d88aac8 100644
--- a/src/operator/quantization/quantized_fully_connected.cc
+++ b/src/operator/quantization/quantized_fully_connected.cc
@@ -365,7 +365,7 @@ NNVM_REGISTER_OP(FullyConnected)
     return QuantizeType::kMust;
 })
 .set_attr<FQuantizedOp>("FQuantizedOp", [](const NodeAttrs& attrs) {
-    nnvm::NodePtr node = nnvm::Node::Create();
+    nnvm::ObjectPtr node = nnvm::Node::Create();
     node->attrs.op = Op::Get("_contrib_quantized_fully_connected");
     node->attrs.name = "quantized_" + attrs.name;
     node->attrs.dict = attrs.dict;
diff --git a/src/operator/quantization/quantized_indexing_op.cc b/src/operator/quantization/quantized_indexing_op.cc
index 66f6936..7f5af2e 100644
--- a/src/operator/quantization/quantized_indexing_op.cc
+++ b/src/operator/quantization/quantized_indexing_op.cc
@@ -167,7 +167,7 @@ NNVM_REGISTER_OP(Embedding)
 .set_attr<FQuantizedOp>("FQuantizedOp", [](const NodeAttrs& attrs) {
     EmbeddingParam param;
     param.Init(attrs.dict);
-    nnvm::NodePtr node = nnvm::Node::Create();
+    nnvm::ObjectPtr node = nnvm::Node::Create();
     if (param.dtype == mshadow::kFloat32) {
       node->attrs.op = Op::Get("_contrib_quantized_embedding");
       node->attrs.name = "quantized_" + attrs.name;
diff --git a/src/operator/quantization/quantized_pooling.cc b/src/operator/quantization/quantized_pooling.cc
index eeb2ac4..1a32ba1 100644
--- a/src/operator/quantization/quantized_pooling.cc
+++ b/src/operator/quantization/quantized_pooling.cc
@@ -179,7 +179,7 @@ NNVM_REGISTER_OP(Pooling)
     // TODO(junwu): Uncomment the following line and remove the above lines
     // after pooling op is refactored
     // const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
-    nnvm::NodePtr node = nnvm::Node::Create();
+    nnvm::ObjectPtr node = nnvm::Node::Create();
     if (param.pool_type == pool_enum::kMaxPooling || param.pool_type == pool_enum::kAvgPooling) {
       node->attrs.op = Op::Get("_contrib_quantized_pooling");
       node->attrs.name = "quantized_" + attrs.name;
diff --git a/src/operator/random/sample_multinomial_op.cc b/src/operator/random/sample_multinomial_op.cc
index 7858b03..bba76ce 100644
--- a/src/operator/random/sample_multinomial_op.cc
+++ b/src/operator/random/sample_multinomial_op.cc
@@ -75,7 +75,7 @@ Examples::
         ResourceRequest::kRandom, ResourceRequest::kTempSpace};
     })
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     const SampleMultinomialParam& param = nnvm::get<SampleMultinomialParam>(n->attrs.parsed);
     if (param.get_prob) {
       return MakeGradNode("_backward_sample_multinomial", n,
diff --git a/src/operator/regression_output-inl.h b/src/operator/regression_output-inl.h
index dcee802..671e3c5 100644
--- a/src/operator/regression_output-inl.h
+++ b/src/operator/regression_output-inl.h
@@ -269,7 +269,7 @@ void RegressionBackwardEx(const nnvm::NodeAttrs& attrs,
 
 struct RegressionOpGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
     std::vector<nnvm::NodeEntry> heads;
     heads.push_back(n->inputs[reg_enum::kLabel]);
diff --git a/src/operator/rnn.cc b/src/operator/rnn.cc
index 204e792..f468b60 100644
--- a/src/operator/rnn.cc
+++ b/src/operator/rnn.cc
@@ -207,7 +207,7 @@ inline static bool RNNStorageType(const nnvm::NodeAttrs& attrs,
 
 struct RNNGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr &n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr &n,
           const std::vector<nnvm::NodeEntry> &ograd) const {
     const RNNParam& params = nnvm::get<RNNParam>(n->attrs.parsed);
     std::vector<nnvm::NodeEntry> heads{ n->inputs[rnn_enum::kData],
diff --git a/src/operator/softmax_output.cc b/src/operator/softmax_output.cc
index 194930f..13bb647 100644
--- a/src/operator/softmax_output.cc
+++ b/src/operator/softmax_output.cc
@@ -34,7 +34,7 @@ namespace op {
 DMLC_REGISTER_PARAMETER(SoftmaxOutputParam);
 struct SoftmaxOutputGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) const {
   std::vector<nnvm::NodeEntry> out_data(n->num_outputs());
   for (uint32_t i = 0; i < out_data.size(); ++i) {
@@ -44,7 +44,7 @@ struct SoftmaxOutputGrad {
   heads.push_back(out_data[softmaxout_enum::kOut]);
   heads.push_back(n->inputs[softmaxout_enum::kLabel]);
 
-  nnvm::NodePtr gnode = nnvm::Node::Create();
+  nnvm::ObjectPtr gnode = nnvm::Node::Create();
   gnode->inputs = std::move(heads);
   gnode->control_deps.emplace_back(n);
   gnode->attrs = n->attrs;
diff --git a/src/operator/subgraph/build_subgraph.cc b/src/operator/subgraph/build_subgraph.cc
index b5380b7..2e90723 100644
--- a/src/operator/subgraph/build_subgraph.cc
+++ b/src/operator/subgraph/build_subgraph.cc
@@ -33,7 +33,7 @@
 #define DEBUG_SUBGRAPH 0
 
 namespace nnvm {
-NodePtr CreateVariableNode(const std::string& name);
+ObjectPtr CreateVariableNode(const std::string& name);
 }
 
 namespace mxnet {
@@ -71,7 +71,7 @@ void CreateSimpleGraph(const nnvm::Graph& g,
                        std::vector<BiDirectedNodePtr>* simple_nodes) {
   const auto& indexed_graph = g.indexed_graph();
   simple_nodes->reserve(indexed_graph.num_nodes());
-  DFSVisit(g.outputs, [&](const nnvm::NodePtr& node) {
+  DFSVisit(g.outputs, [&](const nnvm::ObjectPtr& node) {
     BiDirectedNodePtr sn = BiDirectedNode::Create();
     sn->node = node.get();
     for (size_t i = 0; i < sn->node->inputs.size(); ++i) {
@@ -558,7 +558,7 @@ void CutGraphInputs(const std::vector<nnvm::NodeEntry*> &input_entries,
     } else {
       ++(it->second);
     }
-    nnvm::NodePtr n = nnvm::CreateVariableNode(var_name + std::to_string(name_count_map[var_name]));
+    nnvm::ObjectPtr n = nnvm::CreateVariableNode(var_name + std::to_string(name_count_map[var_name]));
     // set attribute for subgraph input to indicate if it is from an arg/param to model
     if (e->node->is_variable())
       n->attrs.dict["isArg"] = "True";
@@ -612,7 +612,7 @@ void CreateSubgraphNode(nnvm::Graph* g,
     sym.outputs[i] = *output_entries[i];
   }
   const SubgraphPropertyPtr& subg_prop = g->GetAttr<SubgraphPropertyPtr>("subgraph_property");
-  nnvm::NodePtr n = subg_prop->CreateSubgraphNode(sym, subgraph_selector, subgraph_id);
+  nnvm::ObjectPtr n = subg_prop->CreateSubgraphNode(sym, subgraph_selector, subgraph_id);
   // CreateSubgraphNode returns NULL if subgraph property determines that subgraph is sub-optimal
   // In that case, subgraph node is not created and graph is not modified
   if (n) {
diff --git a/src/operator/subgraph/common.h b/src/operator/subgraph/common.h
index 814e837..740c8d4 100644
--- a/src/operator/subgraph/common.h
+++ b/src/operator/subgraph/common.h
@@ -243,7 +243,7 @@ inline std::vector<ResourceRequest> DefaultSubgraphOpResourceRequestHelper(
     const nnvm::Symbol& subgraph_sym) {
   static auto& fresource = Op::GetAttr<FResourceRequest>("FResourceRequest");
   std::set<ResourceRequest::Type> resource_types;
-  DFSVisit(subgraph_sym.outputs, [&](const nnvm::NodePtr& node) {
+  DFSVisit(subgraph_sym.outputs, [&](const nnvm::ObjectPtr& node) {
     if (!node->is_variable() && fresource.count(node->op())) {
       for (ResourceRequest& r : fresource[node->op()](node->attrs)){
         resource_types.insert(r.type);
diff --git a/src/operator/subgraph/default_subgraph_property.cc b/src/operator/subgraph/default_subgraph_property.cc
index 246b294..dd3bfd1 100644
--- a/src/operator/subgraph/default_subgraph_property.cc
+++ b/src/operator/subgraph/default_subgraph_property.cc
@@ -55,9 +55,9 @@ class ContainOpSelector: public SubgraphSelector {
 class DefaultSubgraphProperty: public SubgraphProperty {
  public:
   static SubgraphPropertyPtr Create() { return std::make_shared<DefaultSubgraphProperty>(); }
-  virtual nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol &sym,
+  virtual nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol &sym,
                                            const int subgraph_id = 0) const {
-    nnvm::NodePtr n = nnvm::Node::Create();
+    nnvm::ObjectPtr n = nnvm::Node::Create();
     n->attrs.op = Op::Get("_CachedOp");
     n->attrs.name = "_CachedOp" + std::to_string(subgraph_id);
     n->attrs.subgraphs.push_back(std::make_shared<nnvm::Symbol>(sym));
diff --git a/src/operator/subgraph/default_subgraph_property_v2.cc b/src/operator/subgraph/default_subgraph_property_v2.cc
index c8cc3b1..65aaeb1 100644
--- a/src/operator/subgraph/default_subgraph_property_v2.cc
+++ b/src/operator/subgraph/default_subgraph_property_v2.cc
@@ -59,10 +59,10 @@ class ContainOpSelectorV2: public SubgraphSelectorV2 {
 class DefaultSubgraphProperty: public SubgraphProperty {
  public:
   static SubgraphPropertyPtr Create() { return std::make_shared<DefaultSubgraphProperty>(); }
-  nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol &sym,
+  nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol &sym,
                                            const SubgraphSelectorPtr& subgraph_selector,
                                            const int subgraph_id = 0) const override {
-    nnvm::NodePtr n = nnvm::Node::Create();
+    nnvm::ObjectPtr n = nnvm::Node::Create();
     n->attrs.op = Op::Get("_CachedOp");
     n->attrs.name = "_CachedOp" + std::to_string(subgraph_id);
     n->attrs.subgraphs.push_back(std::make_shared<nnvm::Symbol>(sym));
diff --git a/src/operator/subgraph/mkldnn/mkldnn_conv.cc b/src/operator/subgraph/mkldnn/mkldnn_conv.cc
index df44022..f1bb597 100644
--- a/src/operator/subgraph/mkldnn/mkldnn_conv.cc
+++ b/src/operator/subgraph/mkldnn/mkldnn_conv.cc
@@ -413,7 +413,7 @@ static void SgMKLDNNConvParamParser(nnvm::NodeAttrs *attrs) {
   CHECK_EQ(attrs->subgraphs.size(), 1);
   auto subgraph_sym = attrs->subgraphs[0];
   bool with_act = false;
-  DFSVisit(subgraph_sym->outputs, [&](const nnvm::NodePtr &node) {
+  DFSVisit(subgraph_sym->outputs, [&](const nnvm::ObjectPtr &node) {
     if (node->is_variable()) return;
     auto &node_name = node->op()->name;
     if (node_name == "BatchNorm") {
@@ -644,9 +644,9 @@ std::vector<std::pair<int, int>> SgMKLDNNConvInplaceOption(
   }
 }
 
-nnvm::NodePtr SgMKLDNNConvQuantizedOp(const NodeAttrs& attrs) {
+nnvm::ObjectPtr SgMKLDNNConvQuantizedOp(const NodeAttrs& attrs) {
   auto const &param = nnvm::get<MKLDNNConvFusionParam>(attrs.parsed);
-  nnvm::NodePtr node = nnvm::Node::Create();
+  nnvm::ObjectPtr node = nnvm::Node::Create();
   node->attrs.op = Op::Get("_sg_mkldnn_conv");
   CHECK_EQ(param.full_conv_param.conv_param.kernel.ndim(), 2U)
       << "Quantized Convolution of MKL-DNN only supports 2D kernel currently."
diff --git a/src/operator/subgraph/mkldnn/mkldnn_conv_property.h b/src/operator/subgraph/mkldnn/mkldnn_conv_property.h
index 4406913..dcd35d5 100644
--- a/src/operator/subgraph/mkldnn/mkldnn_conv_property.h
+++ b/src/operator/subgraph/mkldnn/mkldnn_conv_property.h
@@ -196,9 +196,9 @@ class SgMKLDNNConvProperty : public SubgraphProperty {
     }
     return property;
   }
-  nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol &sym,
+  nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol &sym,
                                    const int subgraph_id = 0) const override {
-    nnvm::NodePtr n = nnvm::Node::Create();
+    nnvm::ObjectPtr n = nnvm::Node::Create();
     // This op has single output, remove duplicated.
     auto last_node = sym.outputs[0].node;
     nnvm::Symbol new_sym;
@@ -206,7 +206,7 @@ class SgMKLDNNConvProperty : public SubgraphProperty {
     std::ostringstream node_name;
     node_name << "sg_mkldnn_";
     bool _with_sum = false;
-    DFSVisit(new_sym.outputs, [&](const nnvm::NodePtr &node) {
+    DFSVisit(new_sym.outputs, [&](const nnvm::ObjectPtr &node) {
       if (node->is_variable()) return;
       auto &sub_name = node->op()->name;
       if (sub_name == "Convolution") {
@@ -245,7 +245,7 @@ class SgMKLDNNConvProperty : public SubgraphProperty {
   }
 
   void ConnectSubgraphOutputs(
-      const nnvm::NodePtr n,
+      const nnvm::ObjectPtr n,
       std::vector<nnvm::NodeEntry *> *output_entries) const override {
     // Connect all extern output entries to output[0]
     for (size_t i = 0; i < output_entries->size(); ++i) {
@@ -254,11 +254,11 @@ class SgMKLDNNConvProperty : public SubgraphProperty {
   }
 
   void ConnectSubgraphInputs(
-      const nnvm::NodePtr n, std::vector<nnvm::NodeEntry *> *input_entries,
+      const nnvm::ObjectPtr n, std::vector<nnvm::NodeEntry *> *input_entries,
       std::vector<nnvm::NodeEntry> *orig_input_entries) const override {
     auto sym = n->attrs.subgraphs[0];
     std::unordered_set<const nnvm::Node *> node_sets;
-    DFSVisit(sym->outputs, [&](const nnvm::NodePtr &node) {
+    DFSVisit(sym->outputs, [&](const nnvm::ObjectPtr &node) {
       if (node->is_variable()) return;
       node_sets.insert(node.get());
       if (node->op()->name == "elemwise_add") {
diff --git a/src/operator/subgraph/mkldnn/mkldnn_elemwisemul_post_quantize_property.h b/src/operator/subgraph/mkldnn/mkldnn_elemwisemul_post_quantize_property.h
index 1469395..21b29a6 100644
--- a/src/operator/subgraph/mkldnn/mkldnn_elemwisemul_post_quantize_property.h
+++ b/src/operator/subgraph/mkldnn/mkldnn_elemwisemul_post_quantize_property.h
@@ -156,13 +156,13 @@ class ElemwiseMulPostQuantizeProperty : public SubgraphProperty {
     return property;
   }
 
-  nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol &sym,
+  nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol &sym,
                                    const int subgraph_id = 0) const override {
-    nnvm::NodePtr em_node = nullptr;
-    nnvm::NodePtr requantize_node = nullptr;
-    nnvm::NodePtr dequantize_node = nullptr;
+    nnvm::ObjectPtr em_node = nullptr;
+    nnvm::ObjectPtr requantize_node = nullptr;
+    nnvm::ObjectPtr dequantize_node = nullptr;
 
-    DFSVisit(sym.outputs, [&](const nnvm::NodePtr &node) {
+    DFSVisit(sym.outputs, [&](const nnvm::ObjectPtr &node) {
       if (node->is_variable()) return;
       if (node->op() == Op::Get(QUANTIZED_ElemwiseMul_NAME)) {
         em_node = node;
@@ -202,7 +202,7 @@ class ElemwiseMulPostQuantizeProperty : public SubgraphProperty {
   }
 
   void ConnectSubgraphOutputs(
-      const nnvm::NodePtr n,
+      const nnvm::ObjectPtr n,
       std::vector<nnvm::NodeEntry *> *output_entries) const override {
     for (size_t i = 0; i < output_entries->size(); ++i) {
       auto entry_ptr = output_entries->at(i);
diff --git a/src/operator/subgraph/mkldnn/mkldnn_fc.cc b/src/operator/subgraph/mkldnn/mkldnn_fc.cc
index 4d5233d..ec8ba64 100644
--- a/src/operator/subgraph/mkldnn/mkldnn_fc.cc
+++ b/src/operator/subgraph/mkldnn/mkldnn_fc.cc
@@ -387,7 +387,7 @@ static void SgMKLDNNFCParamParser(nnvm::NodeAttrs *attrs) {
     throw dmlc::ParamError(os.str());
   }
   auto subgraph_sym = attrs->subgraphs[0];
-  DFSVisit(subgraph_sym->outputs, [&](const nnvm::NodePtr &node) {
+  DFSVisit(subgraph_sym->outputs, [&](const nnvm::ObjectPtr &node) {
     if (node->is_variable()) return;
     auto &op_name = node->op()->name;
     if (op_name == "FullyConnected") {
@@ -585,8 +585,8 @@ static void SgMKLDNNFCForward(const OpStatePtr &state_pointer,
   op.Forward(ctx, inputs, req, outputs);
 }
 
-nnvm::NodePtr SgMKLDNNFCQuantizedOp(const NodeAttrs& attrs) {
-  nnvm::NodePtr node = nnvm::Node::Create();
+nnvm::ObjectPtr SgMKLDNNFCQuantizedOp(const NodeAttrs& attrs) {
+  nnvm::ObjectPtr node = nnvm::Node::Create();
   node->attrs.op = Op::Get("_sg_mkldnn_fully_connected");
   node->attrs.name = "quantized_" + attrs.name;
   node->attrs.dict = attrs.dict;
diff --git a/src/operator/subgraph/mkldnn/mkldnn_fc_post_quantize_property.h b/src/operator/subgraph/mkldnn/mkldnn_fc_post_quantize_property.h
index f4f252b..aaa613c 100644
--- a/src/operator/subgraph/mkldnn/mkldnn_fc_post_quantize_property.h
+++ b/src/operator/subgraph/mkldnn/mkldnn_fc_post_quantize_property.h
@@ -156,13 +156,13 @@ class SgMKLDNNFCPostQuantizeProperty : public SubgraphProperty {
     return property;
   }
 
-  nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol &sym,
+  nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol &sym,
                                    const int subgraph_id = 0) const override {
-    nnvm::NodePtr fc_node = nullptr;
-    nnvm::NodePtr requantize_node = nullptr;
-    nnvm::NodePtr dequantize_node = nullptr;
+    nnvm::ObjectPtr fc_node = nullptr;
+    nnvm::ObjectPtr requantize_node = nullptr;
+    nnvm::ObjectPtr dequantize_node = nullptr;
 
-    DFSVisit(sym.outputs, [&](const nnvm::NodePtr &node) {
+    DFSVisit(sym.outputs, [&](const nnvm::ObjectPtr &node) {
       if (node->is_variable()) return;
       if (node->op() == Op::Get(QUANTIZED_FC_NAME)) {
         fc_node = node;
@@ -202,7 +202,7 @@ class SgMKLDNNFCPostQuantizeProperty : public SubgraphProperty {
   }
 
   void ConnectSubgraphOutputs(
-      const nnvm::NodePtr n,
+      const nnvm::ObjectPtr n,
       std::vector<nnvm::NodeEntry *> *output_entries) const override {
     for (size_t i = 0; i < output_entries->size(); ++i) {
       auto entry_ptr = output_entries->at(i);
diff --git a/src/operator/subgraph/mkldnn/mkldnn_fc_property.h b/src/operator/subgraph/mkldnn/mkldnn_fc_property.h
index 6dcd114..aecb3a7 100644
--- a/src/operator/subgraph/mkldnn/mkldnn_fc_property.h
+++ b/src/operator/subgraph/mkldnn/mkldnn_fc_property.h
@@ -172,16 +172,16 @@ class SgMKLDNNFCProperty : public SubgraphProperty {
     return property;
   }
 
-  nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol &sym,
+  nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol &sym,
                                    const int subgraph_id = 0) const override {
-    nnvm::NodePtr n = nnvm::Node::Create();
+    nnvm::ObjectPtr n = nnvm::Node::Create();
     // This op has single output, remove duplicated.
     auto last_node = sym.outputs[0].node;
     nnvm::Symbol new_sym;
     new_sym.outputs.emplace_back(last_node);
     std::ostringstream node_name;
     node_name << "sg_mkldnn_";
-    DFSVisit(new_sym.outputs, [&](const nnvm::NodePtr &node) {
+    DFSVisit(new_sym.outputs, [&](const nnvm::ObjectPtr &node) {
       if (node->is_variable()) return;
       auto &sub_name = node->op()->name;
       if (sub_name == "FullyConnected") {
@@ -207,7 +207,7 @@ class SgMKLDNNFCProperty : public SubgraphProperty {
   }
 
   void ConnectSubgraphOutputs(
-      const nnvm::NodePtr n,
+      const nnvm::ObjectPtr n,
       std::vector<nnvm::NodeEntry *> *output_entries) const override {
     // Connect all extern output entries to output[0]
     for (size_t i = 0; i < output_entries->size(); ++i) {
diff --git a/src/operator/subgraph/mkldnn/mkldnn_post_quantize_property.h b/src/operator/subgraph/mkldnn/mkldnn_post_quantize_property.h
index 38b0896..085dd49 100644
--- a/src/operator/subgraph/mkldnn/mkldnn_post_quantize_property.h
+++ b/src/operator/subgraph/mkldnn/mkldnn_post_quantize_property.h
@@ -129,11 +129,11 @@ class SgMKLDNNPostQuantizeProperty : public SubgraphProperty {
     property->SetAttr<bool>("inference_only", true);
     return property;
   }
-  nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol &sym,
+  nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol &sym,
                                    const int subgraph_id = 0) const override {
-    nnvm::NodePtr fuse_node = nullptr;
-    nnvm::NodePtr requantize_node = nullptr;
-    DFSVisit(sym.outputs, [&](const nnvm::NodePtr &node) {
+    nnvm::ObjectPtr fuse_node = nullptr;
+    nnvm::ObjectPtr requantize_node = nullptr;
+    DFSVisit(sym.outputs, [&](const nnvm::ObjectPtr &node) {
       if (node->is_variable()) return;
       auto &op_name = node->op()->name;
       if (support_requantize_fusion_op_name.count(op_name)) {
@@ -162,7 +162,7 @@ class SgMKLDNNPostQuantizeProperty : public SubgraphProperty {
   }
 
   void ConnectSubgraphOutputs(
-      const nnvm::NodePtr n,
+      const nnvm::ObjectPtr n,
       std::vector<nnvm::NodeEntry *> *output_entries) const override {
     for (size_t i = 0; i < output_entries->size(); ++i) {
       auto entry_ptr = output_entries->at(i);
diff --git a/src/operator/subgraph/partitioner/custom_subgraph_property.h b/src/operator/subgraph/partitioner/custom_subgraph_property.h
index b4ea1a0..6f382d4 100644
--- a/src/operator/subgraph/partitioner/custom_subgraph_property.h
+++ b/src/operator/subgraph/partitioner/custom_subgraph_property.h
@@ -161,7 +161,7 @@ class  CustomSubgraphProperty: public SubgraphProperty {
     }
   }
   // override CreateSubgraphNode
-  virtual nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol &sym,
+  virtual nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol &sym,
                                            const int subgraph_id = 0) const {
     int accept = 1;
     int num_attr = 0;
@@ -195,7 +195,7 @@ class  CustomSubgraphProperty: public SubgraphProperty {
         << "Error calling accept_subgraph for '" << subgraph_prop << "'";
     }
     if (accept) {
-      nnvm::NodePtr n = nnvm::Node::Create();
+      nnvm::ObjectPtr n = nnvm::Node::Create();
       n->attrs.op = Op::Get(subgraph_op_name);
       n->attrs.name = "_op" + std::to_string(subgraph_id);
       n->attrs.subgraphs.push_back(std::make_shared<nnvm::Symbol>(sym));
diff --git a/src/operator/subgraph/subgraph_property.h b/src/operator/subgraph/subgraph_property.h
index 643c02a..f765aba 100644
--- a/src/operator/subgraph/subgraph_property.h
+++ b/src/operator/subgraph/subgraph_property.h
@@ -283,7 +283,7 @@ class SubgraphProperty {
    * \param sym the symbol to create subgraph node
    * \param subgraph_id subgraph id
    */
-  virtual nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol& sym,
+  virtual nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol& sym,
                                            const int subgraph_id = 0) const {
     CHECK_EQ(GetPropertyType(), kCreate);
     LOG(FATAL) << "Not implement CreateSubgraphNode() for this subgraph property.";
@@ -297,7 +297,7 @@ class SubgraphProperty {
    * \param subgraph_selector the selector used for creating this subgraph
    * \param subgraph_id subgraph id
    */
-  virtual nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol& sym,
+  virtual nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol& sym,
                                            const SubgraphSelectorPtr& subgraph_selector,
                                            const int subgraph_id = 0) const {
     return CreateSubgraphNode(sym, subgraph_id);
@@ -310,7 +310,7 @@ class SubgraphProperty {
    * \param subgraph_selector The selector used for selecting this node set
    * \param subgraph_id subgraph id
    */
-  virtual nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol& sym,
+  virtual nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol& sym,
                                            const SubgraphSelectorV2Ptr& subgraph_selector,
                                            const int subgraph_id = 0) const {
     CHECK_EQ(GetPropertyType(), kCreate);
@@ -339,7 +339,7 @@ class SubgraphProperty {
    * \param subgraph_node the subgraph node to connect output
    * \param output_entries external output entries depending on this subgraph node
    */
-  virtual void ConnectSubgraphOutputs(const nnvm::NodePtr subgraph_node,
+  virtual void ConnectSubgraphOutputs(const nnvm::ObjectPtr subgraph_node,
                                       std::vector<nnvm::NodeEntry*>* output_entries) const {
     for (size_t i = 0; i < output_entries->size(); ++i) {
       *output_entries->at(i) = nnvm::NodeEntry{subgraph_node, static_cast<uint32_t>(i), 0};
@@ -352,7 +352,7 @@ class SubgraphProperty {
    * \param input_entries input entries inside subgraph
    * \param orig_input_entries input entries outside subgraph
    */
-  virtual void ConnectSubgraphInputs(const nnvm::NodePtr subgraph_node,
+  virtual void ConnectSubgraphInputs(const nnvm::ObjectPtr subgraph_node,
                                      std::vector<nnvm::NodeEntry*>* input_entries,
                                      std::vector<nnvm::NodeEntry>* orig_input_entries) const {
     subgraph_node->inputs = *orig_input_entries;
diff --git a/src/operator/subgraph/tensorrt/tensorrt-inl.h b/src/operator/subgraph/tensorrt/tensorrt-inl.h
index fac927e..dcafba5 100644
--- a/src/operator/subgraph/tensorrt/tensorrt-inl.h
+++ b/src/operator/subgraph/tensorrt/tensorrt-inl.h
@@ -268,9 +268,9 @@ class TensorrtProperty : public SubgraphProperty {
     return std::make_shared<TensorrtProperty>();
   }
 
-  nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol &sym,
+  nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol &sym,
                                    const int subgraph_id) const override {
-    nnvm::NodePtr n = nnvm::Node::Create();
+    nnvm::ObjectPtr n = nnvm::Node::Create();
     nnvm::Symbol new_sym;
     std::unique_copy(sym.outputs.begin(), sym.outputs.end(),
         std::back_inserter(new_sym.outputs), [](
@@ -298,7 +298,7 @@ class TensorrtProperty : public SubgraphProperty {
     return std::make_shared<TensorrtSelector>();
   }
 
-  void ConnectSubgraphOutputs(const nnvm::NodePtr subgraph_node, \
+  void ConnectSubgraphOutputs(const nnvm::ObjectPtr subgraph_node, \
                               std::vector<nnvm::NodeEntry*>* output_entries) const override {
     std::vector<nnvm::NodeEntry>& outputs = subgraph_node->attrs.subgraphs[0]->outputs;
     TRTParam& _params = nnvm::get<TRTParam>(subgraph_node->attrs.parsed);
@@ -317,7 +317,7 @@ class TensorrtProperty : public SubgraphProperty {
     subgraph_node->attrs.parsed = std::move(_params);
   }
 
-  void ConnectSubgraphInputs(const nnvm::NodePtr subgraph_node,
+  void ConnectSubgraphInputs(const nnvm::ObjectPtr subgraph_node,
                              std::vector<nnvm::NodeEntry*>* input_entries,
                              std::vector<nnvm::NodeEntry>* orig_input_entries) const override {
     TRTParam& _params = nnvm::get<TRTParam>(subgraph_node->attrs.parsed);
diff --git a/src/operator/tensor/broadcast_reduce_op.h b/src/operator/tensor/broadcast_reduce_op.h
index 00e113d..5eb0c41 100644
--- a/src/operator/tensor/broadcast_reduce_op.h
+++ b/src/operator/tensor/broadcast_reduce_op.h
@@ -1155,7 +1155,7 @@ inline void AxesParamParser(nnvm::NodeAttrs* attrs) {
 
 struct ReduceGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) {
     return MakeNonlossGradNode(
         op_name, n,
@@ -1670,7 +1670,7 @@ Defined in )code";
   .set_num_outputs(1)                                           \
   .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
   .set_attr<nnvm::FGradient>("FGradient",                       \
-    [](const nnvm::NodePtr& n,                                  \
+    [](const nnvm::ObjectPtr& n,                                  \
        const std::vector<nnvm::NodeEntry>& ograds) {            \
       return MakeNonlossGradNode("_broadcast_backward", n, ograds, {},    \
                                  {{"keepdims", "true"}});              \
diff --git a/src/operator/tensor/broadcast_reduce_op_index.cc b/src/operator/tensor/broadcast_reduce_op_index.cc
index 52082f7..060eb5a 100644
--- a/src/operator/tensor/broadcast_reduce_op_index.cc
+++ b/src/operator/tensor/broadcast_reduce_op_index.cc
@@ -164,7 +164,7 @@ Examples::
 .set_attr<nnvm::FInferType>("FInferType", PickOpType)
 .set_attr<FCompute>("FCompute<cpu>", PickOpForward<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     if (CheckGradAllZero(ograds)) return MakeZeroGradNodes(n, ograds);
     auto ret = MakeGradNode("_backward_pick", n, {ograds[0], n->inputs[1]},
                             n->attrs.dict);
diff --git a/src/operator/tensor/broadcast_reduce_op_value.cc b/src/operator/tensor/broadcast_reduce_op_value.cc
index 31e0dd0..daea4b2 100644
--- a/src/operator/tensor/broadcast_reduce_op_value.cc
+++ b/src/operator/tensor/broadcast_reduce_op_value.cc
@@ -139,7 +139,7 @@ NNVM_REGISTER_OP(broadcast_like)
     })
 .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n,
+  [](const nnvm::ObjectPtr& n,
     const std::vector<nnvm::NodeEntry>& ograds) {
       if (CheckGradAllZero(ograds))
         return MakeZeroGradNodes(n, ograds);
diff --git a/src/operator/tensor/control_flow_op.cc b/src/operator/tensor/control_flow_op.cc
index b0394d0..8f54aba 100644
--- a/src/operator/tensor/control_flow_op.cc
+++ b/src/operator/tensor/control_flow_op.cc
@@ -70,7 +70,7 @@ Examples::
   // Use the following lambda function instead of ElemwiseGradUseIn
   // for best efficiency. grad[condition] = 0; to calculate grad[x] and grad[y]
   // we need only condition from input.
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     std::vector<nnvm::NodeEntry> ret;
     // make zero grad node for grad[condition]
     auto p = MakeNode("zeros_like", n->attrs.name + "_cond_backward",
diff --git a/src/operator/tensor/dot.cc b/src/operator/tensor/dot.cc
index 32d1c81..b3f6331 100644
--- a/src/operator/tensor/dot.cc
+++ b/src/operator/tensor/dot.cc
@@ -141,11 +141,11 @@ which is computed by::
 .set_attr<THasDeterministicOutput>("THasDeterministicOutput", true)
 .set_attr<FCompute>("FCompute<cpu>", BatchDotForward_<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n,
+    [](const nnvm::ObjectPtr& n,
        const std::vector<nnvm::NodeEntry>& ograds) {
   const DotParam& param = nnvm::get<DotParam>(n->attrs.parsed);
-  nnvm::NodePtr lhs_grad;
-  nnvm::NodePtr rhs_grad;
+  nnvm::ObjectPtr lhs_grad;
+  nnvm::ObjectPtr rhs_grad;
   std::string lhs_gnode_name = n->attrs.name + "_backward_lhs";
   std::string rhs_gnode_name = n->attrs.name + "_backward_rhs";
   if (param.transpose_a && param.transpose_b) {
diff --git a/src/operator/tensor/elemwise_sum.cc b/src/operator/tensor/elemwise_sum.cc
index b07c959..5885d73 100644
--- a/src/operator/tensor/elemwise_sum.cc
+++ b/src/operator/tensor/elemwise_sum.cc
@@ -42,7 +42,7 @@ struct ElementWiseSumParam : public dmlc::Parameter<ElementWiseSumParam> {
 DMLC_REGISTER_PARAMETER(ElementWiseSumParam);
 
 std::vector<nnvm::NodeEntry> ElementWiseSumGrad(
-    const nnvm::NodePtr& n,
+    const nnvm::ObjectPtr& n,
     const std::vector<nnvm::NodeEntry>& ograds) {
   // identity constraints in the beginning for easier shape inference.
   const nnvm::Op* copy_op =
@@ -50,7 +50,7 @@ std::vector<nnvm::NodeEntry> ElementWiseSumGrad(
   CHECK_EQ(ograds.size(), 1);
   std::vector<nnvm::NodeEntry> ret;
   for (size_t i = 0; i < n->inputs.size(); ++i) {
-    nnvm::NodePtr node = nnvm::Node::Create();
+    nnvm::ObjectPtr node = nnvm::Node::Create();
     node->attrs.op = copy_op;
     node->inputs = {ograds[0]};
     ret.emplace_back(std::move(node));
diff --git a/src/operator/tensor/elemwise_unary_op_basic.cc b/src/operator/tensor/elemwise_unary_op_basic.cc
index c8f5973..227596d 100644
--- a/src/operator/tensor/elemwise_unary_op_basic.cc
+++ b/src/operator/tensor/elemwise_unary_op_basic.cc
@@ -87,7 +87,7 @@ The storage type of ``relu`` output depends upon the input storage type:
 
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(_backward_relu, unary_bwd<mshadow_op::relu_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       std::vector<nnvm::NodeEntry> ret;
       // ograds[0]: dL/dxgrad
       // inputs[0]: dL/dy
@@ -123,7 +123,7 @@ The storage type of ``sigmoid`` output is always dense
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(_backward_sigmoid,
                                                unary_bwd<mshadow_op::sigmoid_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // n->inputs[0] : y_grad
       // n->inputs[1] : f(x) = sigmoid(x)
       // ograds[0] : head_grads
@@ -368,7 +368,7 @@ The storage type of ``make_loss`` output depends upon the input storage type:
     return std::vector<bool>{true};
   })
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     std::vector<nnvm::NodeEntry> ret;
     ret.emplace_back(MakeNode("ones_like", n->attrs.name + "_backward",
                      &(n->inputs), nullptr, &n));
@@ -396,7 +396,7 @@ NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
 .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>)
 .set_attr<FInferStorageType>("FInferStorageType", IdentityAttrLikeRhsStorageType)
 .set_attr<nnvm::FGradient>(
-    "FGradient",  [](const nnvm::NodePtr& n,
+    "FGradient",  [](const nnvm::ObjectPtr& n,
                      const std::vector<nnvm::NodeEntry>& ograds) {
       if (CheckGradAllZero(ograds)) return MakeZeroGradNodes(n, ograds);
       std::vector<nnvm::NodeEntry> lhs = MakeGradNode("_backward_copy", n, ograds,
@@ -535,7 +535,7 @@ Negative indices are supported, and `None` can be used for either `lhs_end` or `
     return ret;
   })
 .set_attr<nnvm::FGradient>(
-    "FGradient",  [](const nnvm::NodePtr& n,
+    "FGradient",  [](const nnvm::ObjectPtr& n,
                      const std::vector<nnvm::NodeEntry>& ograds) {
       if (CheckGradAllZero(ograds)) return MakeZeroGradNodes(n, ograds);
       std::vector<nnvm::NodeEntry> lhs = MakeGradNode("_backward_copy", n, ograds,
@@ -722,7 +722,7 @@ The storage type of ``abs`` output depends upon the input storage type:
 
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(_backward_abs, unary_bwd<mshadow_op::sign>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // ograds[0]: dL/dxgrad
       // inputs[0]: dL/dy
       // inputs[1]: x
diff --git a/src/operator/tensor/elemwise_unary_op_logexp.cc b/src/operator/tensor/elemwise_unary_op_logexp.cc
index 7bc7421..609877b 100644
--- a/src/operator/tensor/elemwise_unary_op_logexp.cc
+++ b/src/operator/tensor/elemwise_unary_op_logexp.cc
@@ -109,7 +109,7 @@ The storage type of ``log2`` output is always dense
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_log,
                                                   unary_bwd<mshadow_op::log_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     // ograds[0]: dL/dxgrad
     // inputs[0]: dL/dy (ygrad)
     // inputs[1]: x (ElemewiseGradUseIn)
@@ -134,7 +134,7 @@ MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_log,
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_log10,
                                                   unary_bwd<mshadow_op::log10_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     // ograds[0]: dL/dxgrad
     // inputs[0]: dL/dy (ygrad)
     // inputs[1]: x (ElemewiseGradUseIn)
@@ -160,7 +160,7 @@ MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_log10,
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_log2,
                                                   unary_bwd<mshadow_op::log2_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     // ograds[0]: dL/dxgrad
     // inputs[0]: dL/dy (ygrad)
     // inputs[1]: x (ElemewiseGradUseIn)
@@ -202,7 +202,7 @@ The storage type of ``log1p`` output depends upon the input storage type:
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_log1p,
                                                   unary_bwd<mshadow_op::log1p_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     // ograds[0]: head_grad_grads (dL/dxgrad)
     // inputs[0]: dL/dy
     // inputs[1]: x (ElemwiseGradUseIn)
@@ -244,7 +244,7 @@ The storage type of ``expm1`` output depends upon the input storage type:
 
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_expm1, unary_bwd<mshadow_op::exp>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     // ograds[0]: head_grad_grads (dL/dxgrad)
     // inputs[0]: dL/dy
     // inputs[1]: x (ElemwiseGradUseIn)
diff --git a/src/operator/tensor/elemwise_unary_op_pow.cc b/src/operator/tensor/elemwise_unary_op_pow.cc
index 6702625..b4d3a4a 100644
--- a/src/operator/tensor/elemwise_unary_op_pow.cc
+++ b/src/operator/tensor/elemwise_unary_op_pow.cc
@@ -48,7 +48,7 @@ MXNET_OPERATOR_REGISTER_BINARY(_backward_reciprocal)
 .set_attr<FCompute>("FCompute<cpu>",
   ElemwiseBinaryOp::Compute<cpu, unary_bwd<mshadow_op::reciprocal_grad> >)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     // ograds[0]: dL/dxgrad
     // inputs[0]: dL/dy
     // inputs[1]: x
@@ -123,7 +123,7 @@ The storage type of ``square`` output depends upon the input storage type:
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(_backward_square,
                                                unary_bwd<mshadow_op::square_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     // ograds[0]: head_grad_grads (dL/dxgrad)
     // inputs[0]: dL/dy
     // inputs[1]: x (ElemwiseGradUseIn)
@@ -173,7 +173,7 @@ The storage type of ``sqrt`` output depends upon the input storage type:
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_sqrt,
                                                   unary_bwd<mshadow_op::square_root_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // NodeEntry{n} : y_grad * f'(x)
       // n->inputs[0] : y_grad
       // n->inputs[1] : f(x) = x^1/2
@@ -273,7 +273,7 @@ The storage type of ``cbrt`` output depends upon the input storage type:
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_cbrt,
                                                   unary_bwd<mshadow_op::cube_root_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // NodeEntry{n} : y_grad * f'(x)
       // n->inputs[0] : y_grad
       // n->inputs[1] : f(x) = x^1/3
diff --git a/src/operator/tensor/elemwise_unary_op_trig.cc b/src/operator/tensor/elemwise_unary_op_trig.cc
index e5d662a..03eb6fb 100644
--- a/src/operator/tensor/elemwise_unary_op_trig.cc
+++ b/src/operator/tensor/elemwise_unary_op_trig.cc
@@ -49,7 +49,7 @@ The storage type of ``sin`` output depends upon the input storage type:
 
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_sin, unary_bwd<mshadow_op::sin_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // ograds[0]: head_grad_grads (dL/dxgrad)
       // inputs[0]: dL/dy
       // inputs[1]: x (ElemwiseUseIn)
@@ -92,7 +92,7 @@ The storage type of ``cos`` output is always dense
 
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(_backward_cos, unary_bwd<mshadow_op::cos_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // ograds[0]: head_grad_grads (dL/dx_grad)
       // inputs[0]: dL/dy
       // inputs[1]: x (ElemwiseUseIn)
@@ -142,7 +142,7 @@ The storage type of ``tan`` output depends upon the input storage type:
 
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_tan, unary_bwd<mshadow_op::tan_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // NodeEntry{n} : y_grad * f'(x)
       // n->inputs[0] : y_grad (dL/dy)
       // n->inputs[1] : y = f(x) = tan(x) (ElemwiseGradUseOut)
@@ -190,7 +190,7 @@ The storage type of ``arcsin`` output depends upon the input storage type:
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arcsin,
                                                   unary_bwd<mshadow_op::arcsin_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // ograds[0]: head_grad_grads (dL/dxgrad)
       // inputs[0]: dL/dy
       // inputs[1]: x (ElemwiseGradUseIn)
@@ -233,7 +233,7 @@ The storage type of ``arccos`` output is always dense
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arccos,
                                                   unary_bwd<mshadow_op::arccos_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // ograds[0]: head_grad_grads (dL/dxgrad)
       // inputs[0]: dL/dy
       // inputs[1]: x (ElemwiseGradUseIn)
@@ -279,7 +279,7 @@ The storage type of ``arctan`` output depends upon the input storage type:
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arctan,
                                                   unary_bwd<mshadow_op::arctan_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // ograds[0]: head_grad_grads (dL/dxgrad)
       // inputs[0]: dL/dy
       // inputs[1]: x (ElemwiseGradUseIn)
@@ -364,7 +364,7 @@ The storage type of ``sinh`` output depends upon the input storage type:
 
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_sinh, unary_bwd<mshadow_op::sinh_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // ograds[0]: head_grad_grads (dL/dxgrad)
       // inputs[0]: dL/dy
       // inputs[1]: x (ElemwiseUseIn)
@@ -402,7 +402,7 @@ The storage type of ``cosh`` output is always dense
 
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(_backward_cosh, unary_bwd<mshadow_op::cosh_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // ograds[0]: head_grad_grads (dL/dxgrad)
       // inputs[0]: dL/dy
       // inputs[1]: x (ElemwiseUseIn)
@@ -444,7 +444,7 @@ The storage type of ``tanh`` output depends upon the input storage type:
 
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_tanh, unary_bwd<mshadow_op::tanh_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // NodeEntry{n} : y_grad * f'(x)
       // n->inputs[0] : y_grad (dL/dy)
       // n->inputs[1] : y = f(x) = tanh(x) (ElemwiseGradUseOut)
@@ -488,7 +488,7 @@ The storage type of ``arcsinh`` output depends upon the input storage type:
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arcsinh,
                                                   unary_bwd<mshadow_op::arcsinh_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // ograds[0]: head_grad_grads (dL/dxgrad)
       // inputs[0]: dL/dy
       // inputs[1]: x (ElemwiseGradUseIn)
@@ -526,7 +526,7 @@ The storage type of ``arccosh`` output is always dense
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arccosh,
                                                   unary_bwd<mshadow_op::arccosh_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // ograds[0]: head_grad_grads (dL/dxgrad)
       // inputs[0]: dL/dy
       // inputs[1]: x (ElemwiseGradUseIn)
@@ -567,7 +567,7 @@ The storage type of ``arctanh`` output depends upon the input storage type:
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arctanh,
                                                   unary_bwd<mshadow_op::arctanh_grad>)
 .set_attr<nnvm::FGradient>("FGradient",
-    [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+    [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
       // ograds[0]: head_grad_grads (dL/dxgrad)
       // inputs[0]: dL/dy
       // inputs[1]: x (ElemwiseGradUseIn)
diff --git a/src/operator/tensor/indexing_op.cc b/src/operator/tensor/indexing_op.cc
index b802bf2..9f988e1 100644
--- a/src/operator/tensor/indexing_op.cc
+++ b/src/operator/tensor/indexing_op.cc
@@ -614,7 +614,7 @@ The storage type of weight can be either row_sparse or default.
 .set_attr<FCompute>("FCompute<cpu>", EmbeddingOpForward<cpu>)
 .set_attr<FComputeEx>("FComputeEx<cpu>", SparseEmbeddingOpForwardEx<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     return MakeNonlossGradNode("_backward_Embedding", n, ograds,
                                {n->inputs[0]}, n->attrs.dict);
   })
@@ -690,7 +690,7 @@ Examples::
 .set_attr<FInferStorageType>("FInferStorageType", SparseEmbeddingOpForwardStorageType)
 .set_attr<FComputeEx>("FComputeEx<cpu>", SparseEmbeddingOpForwardEx<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     return MakeNonlossGradNode("_backward_SparseEmbedding", n, ograds,
                                {n->inputs[0]}, n->attrs.dict);
   })
@@ -793,7 +793,7 @@ The storage type of ``take`` output depends upon the input storage type:
 .set_attr<FCompute>("FCompute<cpu>", TakeOpForward<cpu>)
 .set_attr<FComputeEx>("FComputeEx<cpu>", TakeOpForwardEx<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n,  const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n,  const std::vector<nnvm::NodeEntry>& ograds) {
     return MakeNonlossGradNode("_backward_take", n, ograds,
                                {n->inputs[1]}, n->attrs.dict);
   })
@@ -937,7 +937,7 @@ Examples::
   })
 .set_attr<FCompute>("FCompute<cpu>", GatherNDForwardCPU)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     auto p = nnvm::Node::Create();
     p->attrs.op = nnvm::Op::Get("_backward_gather_nd");
     p->attrs.name = n->attrs.name + "_backward";
@@ -1012,7 +1012,7 @@ Examples::
 .set_attr<nnvm::FInferType>("FInferType", ScatterNDType)
 .set_attr<FCompute>("FCompute<cpu>", ScatterNDForward<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     auto p = nnvm::Node::Create();
     p->attrs.op = nnvm::Op::Get("gather_nd");
     p->attrs.name = n->attrs.name + "_backward";
@@ -1075,7 +1075,7 @@ Examples::
 .set_attr<nnvm::FInferType>("FInferType", ScatterNDType)
 .set_attr<FCompute>("FCompute<cpu>", GatherNDBackward<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     auto p = nnvm::Node::Create();
     p->attrs.op = nnvm::Op::Get("gather_nd");
     p->attrs.name = n->attrs.name + "_backward";
diff --git a/src/operator/tensor/la_op.h b/src/operator/tensor/la_op.h
index 5fe7a92..e15390e 100644
--- a/src/operator/tensor/la_op.h
+++ b/src/operator/tensor/la_op.h
@@ -929,7 +929,7 @@ void LaOpDetBackward(const nnvm::NodeAttrs& attrs,
 template<int onum>
 struct ReduceDetGrad {
   const char *op_name;
-  std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
+  std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
                                           const std::vector<nnvm::NodeEntry>& ograds) {
     std::vector<nnvm::NodeEntry> heads;
     heads.push_back(ograds[onum - 1]);
diff --git a/src/operator/tensor/matrix_op.cc b/src/operator/tensor/matrix_op.cc
index 15b954f..f00caf3 100644
--- a/src/operator/tensor/matrix_op.cc
+++ b/src/operator/tensor/matrix_op.cc
@@ -332,7 +332,7 @@ Examples::
 .set_attr<mxnet::FInferShape>("FInferShape", TransposeShape)
 .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     const TransposeParam& param = nnvm::get<TransposeParam>(n->attrs.parsed);
     if (param.axes.ndim() == 0) {
       return MakeNonlossGradNode(
diff --git a/src/operator/tensor/ordering_op.cc b/src/operator/tensor/ordering_op.cc
index b54986f..69af70b 100644
--- a/src/operator/tensor/ordering_op.cc
+++ b/src/operator/tensor/ordering_op.cc
@@ -74,7 +74,7 @@ Examples::
 .set_attr<nnvm::FNumVisibleOutputs>("FNumVisibleOutputs", TopKNumVisibleOutputs)
 .set_attr<FCompute>("FCompute<cpu>", TopK<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     const TopKParam& param = nnvm::get<TopKParam>(n->attrs.parsed);
     if (param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth) {
       std::vector<nnvm::NodeEntry> inputs;
@@ -138,7 +138,7 @@ Examples::
 .set_attr<nnvm::FNumVisibleOutputs>("FNumVisibleOutputs", [](const NodeAttrs& attrs) { return 1; })
 .set_attr<FCompute>("FCompute<cpu>", Sort<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     const SortParam& param = nnvm::get<SortParam>(n->attrs.parsed);
     std::vector<nnvm::NodeEntry> inputs;
     uint32_t n_out = n->num_outputs();
diff --git a/src/operator/tensor/sparse_retain.cc b/src/operator/tensor/sparse_retain.cc
index b1f38c6..d0db53b 100644
--- a/src/operator/tensor/sparse_retain.cc
+++ b/src/operator/tensor/sparse_retain.cc
@@ -62,7 +62,7 @@ The storage type of ``retain`` output depends on storage types of inputs
 .set_attr<FInferStorageType>("FInferStorageType", SparseRetainForwardInferStorageType)
 .set_attr<FComputeEx>("FComputeEx<cpu>", SparseRetainOpForwardEx<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
-  [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+  [](const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
     return MakeNonlossGradNode("_backward_sparse_retain", n, ograds,
                                {n->inputs[sr::kIdx]}, n->attrs.dict);
   })
diff --git a/tests/cpp/include/test_core_op.h b/tests/cpp/include/test_core_op.h
index 2864961..bdf25ed 100644
--- a/tests/cpp/include/test_core_op.h
+++ b/tests/cpp/include/test_core_op.h
@@ -126,8 +126,8 @@ class CoreOpExecutor : public test::op::OperatorDataInitializer<DType>
     return array;
   }
 
-  nnvm::NodePtr MakeNode() const {
-    nnvm::NodePtr node = nnvm::Node::Create();
+  nnvm::ObjectPtr MakeNode() const {
+    nnvm::ObjectPtr node = nnvm::Node::Create();
     node->attrs = attrs_;
     return node;
   }
@@ -299,7 +299,7 @@ class CoreOpExecutor : public test::op::OperatorDataInitializer<DType>
     return foo::kFlag;
   }
 
-  nnvm::NodePtr GetBackwardDependency(const nnvm::NodePtr& node,
+  nnvm::ObjectPtr GetBackwardDependency(const nnvm::ObjectPtr& node,
                                       std::map<int, const NDArray *>* index2array) const {
     index2array->clear();
     static auto& fgradient = nnvm::Op::GetAttr<nnvm::FGradient>("FGradient");
@@ -331,8 +331,8 @@ class CoreOpExecutor : public test::op::OperatorDataInitializer<DType>
     return nullptr;
   }
 
-  nnvm::NodePtr CalcBackwardPass(std::map<int, const NDArray *> *index2array) const {
-    nnvm::NodePtr node = nnvm::Node::Create();
+  nnvm::ObjectPtr CalcBackwardPass(std::map<int, const NDArray *> *index2array) const {
+    nnvm::ObjectPtr node = nnvm::Node::Create();
     node->attrs = attrs_;
     return GetBackwardDependency(node, index2array);
   }
@@ -346,7 +346,7 @@ class CoreOpExecutor : public test::op::OperatorDataInitializer<DType>
             const std::vector<NDArray>& inputs = {},
             const std::vector<NDArray>& outputs = {},
             const CoreOpExecutor *backward_for_op = nullptr,
-            nnvm::NodePtr bwd_node_ptr = nullptr
+            nnvm::ObjectPtr bwd_node_ptr = nullptr
   ) {
     if (!initialized_) {
       initialized_ = true;
@@ -366,7 +366,7 @@ class CoreOpExecutor : public test::op::OperatorDataInitializer<DType>
       CHECK_NOTNULL(op_);
 
       std::map<int, const NDArray *> index2array;
-      nnvm::NodePtr bwd_node_ptr;
+      nnvm::ObjectPtr bwd_node_ptr;
       if (backward_for_op) {
         bwd_node_ptr = backward_for_op->CalcBackwardPass(&index2array);
       }