You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by la...@apache.org on 2020/04/16 03:20:08 UTC

[incubator-mxnet] branch revert-17831-colstk_ffi created (now a766853)

This is an automated email from the ASF dual-hosted git repository.

lausen pushed a change to branch revert-17831-colstk_ffi
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


      at a766853  Revert "[numpy] add new ffi for column_stack and hstack (#17831)"

This branch includes the following new commits:

     new a766853  Revert "[numpy] add new ffi for column_stack and hstack (#17831)"

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[incubator-mxnet] 01/01: Revert "[numpy] add new ffi for column_stack and hstack (#17831)"

Posted by la...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lausen pushed a commit to branch revert-17831-colstk_ffi
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git

commit a766853e87e9115c7fb9b5f89c39c583e83035a9
Author: Leonard Lausen <le...@lausen.nl>
AuthorDate: Wed Apr 15 20:19:30 2020 -0700

    Revert "[numpy] add new ffi for column_stack and hstack (#17831)"
    
    This reverts commit 94f235d5385773ce8a4f21957d9946deed15b7f3.
---
 benchmark/python/ffi/benchmark_ffi.py  |  2 --
 python/mxnet/ndarray/numpy/_op.py      |  4 ++--
 src/api/operator/numpy/np_matrix_op.cc | 41 ----------------------------------
 src/operator/numpy/np_matrix_op-inl.h  |  5 -----
 4 files changed, 2 insertions(+), 50 deletions(-)

diff --git a/benchmark/python/ffi/benchmark_ffi.py b/benchmark/python/ffi/benchmark_ffi.py
index c8255fe..8f8da67 100644
--- a/benchmark/python/ffi/benchmark_ffi.py
+++ b/benchmark/python/ffi/benchmark_ffi.py
@@ -137,8 +137,6 @@ def prepare_workloads():
                            out=dnp.array([False, False], dtype=bool), keepdims=False)
     OpArgMngr.add_workload("roll", pool["2x2"], 1, axis=0)
     OpArgMngr.add_workload("rot90", pool["2x2"], 2)
-    OpArgMngr.add_workload("column_stack", (pool['3x3'], pool['3x3'], pool['3x3']))
-    OpArgMngr.add_workload("hstack", (pool['3x3'], pool['3x3'], pool['3x3']))
     OpArgMngr.add_workload("triu", pool['3x3'])
     OpArgMngr.add_workload("array_split", pool['2x2'], 2, axis=1)
     OpArgMngr.add_workload("vsplit", pool['2x2'], 2)
diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py
index f1341f6..d1b80ca 100644
--- a/python/mxnet/ndarray/numpy/_op.py
+++ b/python/mxnet/ndarray/numpy/_op.py
@@ -4341,7 +4341,7 @@ def column_stack(tup):
            [2., 3.],
            [3., 4.]])
     """
-    return _api_internal.column_stack(*tup)
+    return _npi.column_stack(*tup)
 
 
 @set_module('mxnet.ndarray.numpy')
@@ -4380,7 +4380,7 @@ def hstack(arrays):
            [2., 3.],
            [3., 4.]])
     """
-    return _api_internal.hstack(*arrays)
+    return _npi.hstack(*arrays)
 
 
 @set_module('mxnet.ndarray.numpy')
diff --git a/src/api/operator/numpy/np_matrix_op.cc b/src/api/operator/numpy/np_matrix_op.cc
index 998823d..c8870e3 100644
--- a/src/api/operator/numpy/np_matrix_op.cc
+++ b/src/api/operator/numpy/np_matrix_op.cc
@@ -23,7 +23,6 @@
  */
 #include <mxnet/api_registry.h>
 #include <mxnet/runtime/packed_func.h>
-#include <vector>
 #include "../utils.h"
 #include "../../../operator/nn/concat-inl.h"
 #include "../../../operator/tensor/matrix_op-inl.h"
@@ -201,46 +200,6 @@ MXNET_REGISTER_API("_npi.rot90")
   *ret = ndoutputs[0];
 });
 
-MXNET_REGISTER_API("_npi.column_stack")
-.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
-  using namespace runtime;
-  const nnvm::Op* op = Op::Get("_npi_column_stack");
-  nnvm::NodeAttrs attrs;
-  op::NumpyColumnStackParam param;
-  param.num_args = args.size();
-
-  attrs.parsed = param;
-  attrs.op = op;
-  SetAttrDict<op::NumpyColumnStackParam>(&attrs);
-  int num_outputs = 0;
-  std::vector<NDArray*> inputs;
-  for (int i = 0; i < param.num_args; ++i) {
-    inputs.push_back(args[i].operator mxnet::NDArray*());
-  }
-  auto ndoutputs = Invoke(op, &attrs, param.num_args, &inputs[0], &num_outputs, nullptr);
-  *ret = ndoutputs[0];
-});
-
-MXNET_REGISTER_API("_npi.hstack")
-.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
-  using namespace runtime;
-  const nnvm::Op* op = Op::Get("_npi_hstack");
-  nnvm::NodeAttrs attrs;
-  op::ConcatParam param;
-  param.num_args = args.size();
-
-  attrs.parsed = param;
-  attrs.op = op;
-  SetAttrDict<op::ConcatParam>(&attrs);
-  int num_outputs = 0;
-  std::vector<NDArray*> inputs;
-  for (int i = 0; i < param.num_args; ++i) {
-    inputs.push_back(args[i].operator mxnet::NDArray*());
-  }
-  auto ndoutputs = Invoke(op, &attrs, param.num_args, &inputs[0], &num_outputs, nullptr);
-  *ret = ndoutputs[0];
-});
-
 MXNET_REGISTER_API("_npi.array_split")
 .set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
   using namespace runtime;
diff --git a/src/operator/numpy/np_matrix_op-inl.h b/src/operator/numpy/np_matrix_op-inl.h
index 09eb10c..57b1c33 100644
--- a/src/operator/numpy/np_matrix_op-inl.h
+++ b/src/operator/numpy/np_matrix_op-inl.h
@@ -63,11 +63,6 @@ struct NumpyColumnStackParam : public dmlc::Parameter<NumpyColumnStackParam> {
     DMLC_DECLARE_FIELD(num_args).set_lower_bound(1)
     .describe("Number of inputs to be column stacked");
   }
-  void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
-    std::ostringstream ss;
-    ss << num_args;
-    (*dict)["num_args"] = ss.str();
-  }
 };
 
 struct NumpyReshapeParam : public dmlc::Parameter<NumpyReshapeParam> {