You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2021/02/18 02:52:29 UTC

[GitHub] [tvm] tkonolige commented on a change in pull request #7435: [TOPI] Sparse Add Op added

tkonolige commented on a change in pull request #7435:
URL: https://github.com/apache/tvm/pull/7435#discussion_r578089073



##########
File path: python/tvm/relay/op/nn/nn.py
##########
@@ -2148,6 +2148,39 @@ def sparse_transpose(x):
     return expr.TupleWrapper(_make.sparse_transpose(x[0], x[1], x[2]), 3)
 
 
+# pylint: disable=no-else-return,inconsistent-return-statements
+def sparse_add(dense_mat, sparse_mat):
+    r"""
+    Computes the matrix addition of `dense_mat` and `sparse_mat`, where `dense_mat` is
+    a dense matrix and `sparse_mat` is a sparse (either BSR or CSR) namedtuple with
+    fields `data`, `indices`, and `indptr`.
+
+    .. math::
+
+        \mbox{sparse_add}(dense_mat, sparse_mat)[m, n] = \mbox{add}(\mbox{as_dense}(S), (D))[m, n]
+
+    where `as_dense` returns dense equivalent of the given S(sparse matrix)
+    while performing addition with given D(dense matrix).
+
+    Parameters
+    ----------
+    dense_mat : tvm.relay.Expr
+        The input dense matrix for the matrix multiplication
+
+    sparse_mat : Union[namedtuple, Tuple[ndarray, ndarray, ndarray]].
+        The input sparse matrix for the matrix multiplication.
+
+    Returns
+    -------
+    result: tvm.relay.Expr
+        The computed result.

Review comment:
       Good documentation! Could you add an example?

##########
File path: python/tvm/relay/frontend/tensorflow.py
##########
@@ -1020,6 +1020,38 @@ def _impl(inputs, attr, params, mod):
     return _impl
 
 
+def _sparse_tensor_dense_add():
+    # Sparse utility from scipy
+    from scipy.sparse import csr_matrix
+
+    def _impl(inputs, attr, params, mod):
+        assert len(inputs) == 4, "There should be 4 input tensors"

Review comment:
       What are the input tensors? (Document them in this assert please).

##########
File path: python/tvm/relay/op/nn/_nn.py
##########
@@ -142,6 +142,11 @@ def alter_op_layout_sparse_dense(attrs, inputs, tinfos, out_type):
     return topi.nn.sparse_dense_alter_layout(attrs, inputs, tinfos, out_type)
 
 
+# sparse_add
+reg.register_strategy("nn.sparse_add", strategy.sparse_add_strategy)
+reg.register_pattern("nn.sparse_add", reg.OpPattern.ELEMWISE)

Review comment:
       I think this should be `reg.OpPattern.OPAQUE` instead of `reg.OpPattern.ELEMWISE`. I don't think te.extern is fusable.

##########
File path: python/tvm/topi/nn/sparse.py
##########
@@ -356,3 +356,67 @@ def sparse_dense_alter_layout(_attrs, _inputs, _tinfos, _out_type):
     Unlike other TOPI functions, this function operates on both graph level and operator level.
     """
     return None
+
+
+def sparse_add(dense_data, sparse_data, sparse_indices, sparse_indptr):
+    """
+    Computes sparse-dense addition
+
+    Parameters
+    ----------
+    dense_data : tvm.te.Tensor
+        2-D with shape [M, N], float32
+
+    sparse_data : tvm.te.Tensor
+        1-D with shape [nnz] (CSR) or
+
+    sparse_indices : tvm.te.Tensor
+        1-D with shape [nnz] (CSR) or
+
+    sparse_indptr : tvm.te.Tensor
+        1-D with shape [M + 1] (CSR) or
+
+    Returns
+    -------
+    output : tvm.te.Tensor
+        2-D with shape [M, N]
+    """
+    # TODO(ANSHUMAN87): support BSR format too
+    assert len(sparse_data.shape) == 1

Review comment:
       Could you toss an error message on this.

##########
File path: src/relay/op/nn/sparse.cc
##########
@@ -196,5 +196,43 @@ RELAY_REGISTER_OP("nn.sparse_transpose")
     .set_support_level(1)
     .add_type_rel("SparseTranspose", SparseTransposeRel);
 
+// relay.nn.sparse_add
+bool SparseAddRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
+                  const TypeReporter& reporter) {
+  ICHECK_EQ(types.size(), 5);
+  const auto* dense_data = types[0].as<TensorTypeNode>();
+  const auto* sparse_data = types[1].as<TensorTypeNode>();
+  ICHECK_EQ(sparse_data->shape.size(), 1);
+  const auto* sparse_indices = types[2].as<TensorTypeNode>();
+  ICHECK_EQ(sparse_indices->shape.size(), 1);

Review comment:
       I think this could also be user-facing. (see above).

##########
File path: src/relay/op/nn/sparse.cc
##########
@@ -196,5 +196,43 @@ RELAY_REGISTER_OP("nn.sparse_transpose")
     .set_support_level(1)
     .add_type_rel("SparseTranspose", SparseTransposeRel);
 
+// relay.nn.sparse_add
+bool SparseAddRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
+                  const TypeReporter& reporter) {
+  ICHECK_EQ(types.size(), 5);
+  const auto* dense_data = types[0].as<TensorTypeNode>();
+  const auto* sparse_data = types[1].as<TensorTypeNode>();
+  ICHECK_EQ(sparse_data->shape.size(), 1);
+  const auto* sparse_indices = types[2].as<TensorTypeNode>();
+  ICHECK_EQ(sparse_indices->shape.size(), 1);
+
+  reporter->Assign(types[4], TensorType(dense_data->shape, dense_data->dtype));
+  return true;
+}
+
+Expr MakeSparseAdd(Expr dense_data, Expr sparse_data, Expr sparse_indices, Expr sparse_indptr) {
+  static const Op& op = Op::Get("nn.sparse_add");
+  return Call(op, {dense_data, sparse_data, sparse_indices, sparse_indptr}, Attrs(), {});
+}
+
+TVM_REGISTER_GLOBAL("relay.op.nn._make.sparse_add").set_body_typed(MakeSparseAdd);
+
+RELAY_REGISTER_OP("nn.sparse_add")
+    .describe(R"code(Add a dense matrix X with sparse matrix Y.
+
+- **dense**: `(M, N)`
+- **sparse**: `(M, N)`
+
+- **out**: `(M, N)`.
+
+)code" TVM_ADD_FILELINE)
+    .set_num_inputs(4)
+    .add_argument("dense_data", "2D Tensor", "Dense data matrix.")
+    .add_argument("sparse_data", "1D Tensor", "Sparse data matrix.")
+    .add_argument("sparse_indices", "1D Tensor", "Sparse indices matrix.")
+    .add_argument("sparse_indptr", "1D Tensor", "Sparse index pointer matrix.")

Review comment:
       ```suggestion
       .add_argument("sparse_indptr", "1D Tensor", "Sparse index pointer vector.")
   ```

##########
File path: src/relay/op/nn/sparse.cc
##########
@@ -196,5 +196,43 @@ RELAY_REGISTER_OP("nn.sparse_transpose")
     .set_support_level(1)
     .add_type_rel("SparseTranspose", SparseTransposeRel);
 
+// relay.nn.sparse_add
+bool SparseAddRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
+                  const TypeReporter& reporter) {
+  ICHECK_EQ(types.size(), 5);
+  const auto* dense_data = types[0].as<TensorTypeNode>();
+  const auto* sparse_data = types[1].as<TensorTypeNode>();
+  ICHECK_EQ(sparse_data->shape.size(), 1);
+  const auto* sparse_indices = types[2].as<TensorTypeNode>();
+  ICHECK_EQ(sparse_indices->shape.size(), 1);
+
+  reporter->Assign(types[4], TensorType(dense_data->shape, dense_data->dtype));
+  return true;
+}
+
+Expr MakeSparseAdd(Expr dense_data, Expr sparse_data, Expr sparse_indices, Expr sparse_indptr) {
+  static const Op& op = Op::Get("nn.sparse_add");
+  return Call(op, {dense_data, sparse_data, sparse_indices, sparse_indptr}, Attrs(), {});
+}
+
+TVM_REGISTER_GLOBAL("relay.op.nn._make.sparse_add").set_body_typed(MakeSparseAdd);
+
+RELAY_REGISTER_OP("nn.sparse_add")
+    .describe(R"code(Add a dense matrix X with sparse matrix Y.
+
+- **dense**: `(M, N)`
+- **sparse**: `(M, N)`
+
+- **out**: `(M, N)`.
+
+)code" TVM_ADD_FILELINE)
+    .set_num_inputs(4)
+    .add_argument("dense_data", "2D Tensor", "Dense data matrix.")
+    .add_argument("sparse_data", "1D Tensor", "Sparse data matrix.")

Review comment:
       ```suggestion
       .add_argument("sparse_data", "1D Tensor", "Sparse data vector.")
   ```

##########
File path: python/tvm/relay/op/strategy/generic.py
##########
@@ -799,6 +799,36 @@ def sparse_dense_padded_strategy(attrs, inputs, out_type, target):
     raise NotImplementedError("sparse_dense_padded is only implemented for cuda")
 
 
+# sparse_add
+def wrap_compute_sparse_add(topi_compute):
+    """wrap sparse add topi compute"""
+
+    def _compute_sparse_add(attrs, inputs, out_type):
+        return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3])]
+
+    return _compute_sparse_add
+
+
+@override_native_generic_func("sparse_add_strategy")
+def sparse_add_strategy(attrs, inputs, out_type, target):
+    """sparse add generic strategy"""
+    logger.warning("sparse add is not optimized for this platform.")
+    strategy = _op.OpStrategy()
+    strategy.add_implementation(
+        wrap_compute_sparse_add(topi.nn.sparse_add),
+        wrap_topi_schedule(topi.generic.schedule_sparse_add),

Review comment:
       ```suggestion
           wrap_topi_schedule(topi.generic.schedule_extern),
   ```
   Then you can delete all the unnecessary scheduling functions.

##########
File path: python/tvm/topi/nn/sparse.py
##########
@@ -356,3 +356,67 @@ def sparse_dense_alter_layout(_attrs, _inputs, _tinfos, _out_type):
     Unlike other TOPI functions, this function operates on both graph level and operator level.
     """
     return None
+
+
+def sparse_add(dense_data, sparse_data, sparse_indices, sparse_indptr):
+    """
+    Computes sparse-dense addition
+
+    Parameters
+    ----------
+    dense_data : tvm.te.Tensor
+        2-D with shape [M, N], float32
+
+    sparse_data : tvm.te.Tensor
+        1-D with shape [nnz] (CSR) or
+
+    sparse_indices : tvm.te.Tensor
+        1-D with shape [nnz] (CSR) or
+
+    sparse_indptr : tvm.te.Tensor
+        1-D with shape [M + 1] (CSR) or
+
+    Returns
+    -------
+    output : tvm.te.Tensor
+        2-D with shape [M, N]
+    """
+    # TODO(ANSHUMAN87): support BSR format too
+    assert len(sparse_data.shape) == 1
+    return _sparse_add_csr(dense_data, sparse_data, sparse_indices, sparse_indptr)
+
+
+def _sparse_add_csr(dense_data_inp, sparse_data_inp, sparse_indices_inp, sparse_indptr_inp):
+    oshape = get_const_tuple(dense_data_inp.shape)
+
+    def _csr_add_ir(dense_data, sparse_data, sparse_indices, sparse_indptr, out_data):
+        irb = tvm.tir.ir_builder.create()
+        dense_data_ptr = irb.buffer_ptr(dense_data)
+        sparse_data_ptr = irb.buffer_ptr(sparse_data)
+        sparse_indices_ptr = irb.buffer_ptr(sparse_indices)
+        sparse_indptr_ptr = irb.buffer_ptr(sparse_indptr)
+
+        out_data_ptr = irb.buffer_ptr(out_data)
+
+        with irb.for_range(0, oshape[0], kind="serial", name="row") as row:
+            with irb.for_range(0, oshape[1], kind="serial", name="col") as col:
+                out_data_ptr[row, col] = dense_data_ptr[row, col]
+
+        with irb.for_range(0, oshape[0], kind="serial", name="row") as row:
+            offset = sparse_indptr_ptr[row]
+            diff = sparse_indptr_ptr[row + 1] - sparse_indptr_ptr[row]
+            with irb.for_range(0, diff, kind="serial", name="idx") as idx:
+                real_idx = offset + idx
+                col = sparse_indices_ptr[real_idx]
+                out_data_ptr[row, col] = sparse_data_ptr[real_idx] + out_data_ptr[row, col]
+
+        return irb.get()
+
+    return te.extern(
+        shape=oshape,
+        inputs=[dense_data_inp, sparse_data_inp, sparse_indices_inp, sparse_indptr_inp],
+        fcompute=lambda ins, outs: _csr_add_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
+        tag="sparse_add_csr",
+        dtype=["float32", "float32", "int32", "int32"],

Review comment:
       I think these dtypes should be inferred from the input.

##########
File path: src/relay/op/nn/sparse.cc
##########
@@ -196,5 +196,43 @@ RELAY_REGISTER_OP("nn.sparse_transpose")
     .set_support_level(1)
     .add_type_rel("SparseTranspose", SparseTransposeRel);
 
+// relay.nn.sparse_add
+bool SparseAddRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,

Review comment:
       I think you also need to check if the sparse_data dtype is the same as the dense dtype.

##########
File path: src/relay/op/nn/sparse.cc
##########
@@ -196,5 +196,43 @@ RELAY_REGISTER_OP("nn.sparse_transpose")
     .set_support_level(1)
     .add_type_rel("SparseTranspose", SparseTransposeRel);
 
+// relay.nn.sparse_add
+bool SparseAddRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
+                  const TypeReporter& reporter) {
+  ICHECK_EQ(types.size(), 5);
+  const auto* dense_data = types[0].as<TensorTypeNode>();
+  const auto* sparse_data = types[1].as<TensorTypeNode>();
+  ICHECK_EQ(sparse_data->shape.size(), 1);
+  const auto* sparse_indices = types[2].as<TensorTypeNode>();
+  ICHECK_EQ(sparse_indices->shape.size(), 1);
+
+  reporter->Assign(types[4], TensorType(dense_data->shape, dense_data->dtype));
+  return true;
+}
+
+Expr MakeSparseAdd(Expr dense_data, Expr sparse_data, Expr sparse_indices, Expr sparse_indptr) {
+  static const Op& op = Op::Get("nn.sparse_add");
+  return Call(op, {dense_data, sparse_data, sparse_indices, sparse_indptr}, Attrs(), {});
+}
+
+TVM_REGISTER_GLOBAL("relay.op.nn._make.sparse_add").set_body_typed(MakeSparseAdd);
+
+RELAY_REGISTER_OP("nn.sparse_add")
+    .describe(R"code(Add a dense matrix X with sparse matrix Y.
+
+- **dense**: `(M, N)`
+- **sparse**: `(M, N)`
+
+- **out**: `(M, N)`.
+
+)code" TVM_ADD_FILELINE)
+    .set_num_inputs(4)
+    .add_argument("dense_data", "2D Tensor", "Dense data matrix.")
+    .add_argument("sparse_data", "1D Tensor", "Sparse data matrix.")
+    .add_argument("sparse_indices", "1D Tensor", "Sparse indices matrix.")

Review comment:
       ```suggestion
       .add_argument("sparse_indices", "1D Tensor", "Sparse indices vector.")
   ```

##########
File path: src/relay/op/nn/sparse.cc
##########
@@ -196,5 +196,43 @@ RELAY_REGISTER_OP("nn.sparse_transpose")
     .set_support_level(1)
     .add_type_rel("SparseTranspose", SparseTransposeRel);
 
+// relay.nn.sparse_add
+bool SparseAddRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
+                  const TypeReporter& reporter) {
+  ICHECK_EQ(types.size(), 5);
+  const auto* dense_data = types[0].as<TensorTypeNode>();
+  const auto* sparse_data = types[1].as<TensorTypeNode>();
+  ICHECK_EQ(sparse_data->shape.size(), 1);

Review comment:
       This should be a user-facing error. Can you use `reporter->AssertEq`.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org