You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by ha...@apache.org on 2020/03/18 18:38:08 UTC
[incubator-mxnet] branch master updated: * FFI for np.argmax and
np.argmin (#17843)
This is an automated email from the ASF dual-hosted git repository.
haoj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new ea2320a * FFI for np.argmax and np.argmin (#17843)
ea2320a is described below
commit ea2320a897bdadfe0127e487f7ac776a3667e771
Author: dw_sjtu <46...@users.noreply.github.com>
AuthorDate: Thu Mar 19 02:37:29 2020 +0800
* FFI for np.argmax and np.argmin (#17843)
* impl - FFI for np_indices
* fix - use MXNetTypeWithBool2String
Co-authored-by: Ubuntu <ub...@ip-172-31-10-214.us-east-2.compute.internal>
---
benchmark/python/ffi/benchmark_ffi.py | 3 +
python/mxnet/ndarray/numpy/_op.py | 10 +--
..._init_op.cc => np_broadcast_reduce_op_index.cc} | 81 ++++++++++++----------
src/api/operator/numpy/np_init_op.cc | 32 +++++++++
src/operator/numpy/np_init_op.h | 8 +++
src/operator/tensor/broadcast_reduce_op.h | 7 ++
6 files changed, 100 insertions(+), 41 deletions(-)
diff --git a/benchmark/python/ffi/benchmark_ffi.py b/benchmark/python/ffi/benchmark_ffi.py
index 2587358..1983de5 100644
--- a/benchmark/python/ffi/benchmark_ffi.py
+++ b/benchmark/python/ffi/benchmark_ffi.py
@@ -59,6 +59,9 @@ def prepare_workloads():
OpArgMngr.add_workload("add", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("linalg.svd", pool['3x3'])
OpArgMngr.add_workload("split", pool['3x3'], (0, 1, 2), axis=1)
+ OpArgMngr.add_workload("argmax", pool['3x2'], axis=-1)
+ OpArgMngr.add_workload("argmin", pool['3x2'], axis=-1)
+ OpArgMngr.add_workload("indices", dimensions=(1, 2, 3))
OpArgMngr.add_workload("subtract", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("multiply", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mod", pool['2x2'], pool['2x2'])
diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py
index 1ce1bcd..32519d1 100644
--- a/python/mxnet/ndarray/numpy/_op.py
+++ b/python/mxnet/ndarray/numpy/_op.py
@@ -4529,7 +4529,7 @@ def argmax(a, axis=None, out=None):
>>> b
array([2., 2.])
"""
- return _npi.argmax(a, axis=axis, keepdims=False, out=out)
+ return _api_internal.argmax(a, axis, False, out)
@set_module('mxnet.ndarray.numpy')
@@ -4597,7 +4597,7 @@ def argmin(a, axis=None, out=None):
>>> b
array([0., 0.])
"""
- return _npi.argmin(a, axis=axis, keepdims=False, out=out)
+ return _api_internal.argmin(a, axis, False, out)
@set_module('mxnet.ndarray.numpy')
@@ -4945,8 +4945,10 @@ def indices(dimensions, dtype=_np.int32, ctx=None):
"""
if isinstance(dimensions, (tuple, list)):
if ctx is None:
- ctx = current_context()
- return _npi.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)
+ ctx = str(current_context())
+ else:
+ ctx = str(ctx)
+ return _api_internal.indices(dimensions, dtype, ctx)
else:
raise ValueError("The dimensions must be sequence of ints")
# pylint: enable=redefined-outer-name
diff --git a/src/api/operator/numpy/np_init_op.cc b/src/api/operator/numpy/np_broadcast_reduce_op_index.cc
similarity index 53%
copy from src/api/operator/numpy/np_init_op.cc
copy to src/api/operator/numpy/np_broadcast_reduce_op_index.cc
index 4f7c6e4..aa24246 100644
--- a/src/api/operator/numpy/np_init_op.cc
+++ b/src/api/operator/numpy/np_broadcast_reduce_op_index.cc
@@ -18,74 +18,81 @@
*/
/*!
- * \file np_init_op.cc
- * \brief Implementation of the API of functions in src/operator/numpy/np_init_op.cc
+ * \file np_broadcast_reduce_op_index.cc
+ * \brief Implementation of the API of functions in
+ src/operator/numpy/np_broadcast_reduce_op_index.cc
*/
-#include <dmlc/optional.h>
#include <mxnet/api_registry.h>
#include <mxnet/runtime/packed_func.h>
#include "../utils.h"
-#include "../../../operator/tensor/init_op.h"
+#include "../../../operator/tensor/broadcast_reduce_op.h"
namespace mxnet {
-MXNET_REGISTER_API("_npi.zeros")
+MXNET_REGISTER_API("_npi.argmax")
.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
using namespace runtime;
- const nnvm::Op* op = Op::Get("_npi_zeros");
+ const nnvm::Op* op = Op::Get("_npi_argmax");
nnvm::NodeAttrs attrs;
- op::InitOpParam param;
- if (args[0].type_code() == kDLInt) {
- param.shape = TShape(1, args[0].operator int64_t());
- } else {
- param.shape = TShape(args[0].operator ObjectRef());
- }
+ op::ReduceAxisParam param;
+ // param.axis
if (args[1].type_code() == kNull) {
- param.dtype = mshadow::kFloat32;
+ param.axis = dmlc::nullopt;
} else {
- param.dtype = String2MXNetTypeWithBool(args[1].operator std::string());
+ param.axis = args[1].operator int();
}
+ // param.keepdims
+ param.keepdims = args[2].operator bool();
+
attrs.parsed = std::move(param);
attrs.op = op;
- SetAttrDict<op::InitOpParam>(&attrs);
- if (args[2].type_code() != kNull) {
- attrs.dict["ctx"] = args[2].operator std::string();
+ SetAttrDict<op::ReduceAxisParam>(&attrs);
+ // inputs
+ NDArray* inputs[] = {args[0].operator mxnet::NDArray*()};
+ int num_inputs = 1;
+ // outputs
+ NDArray* out = args[3].operator mxnet::NDArray*();
+ NDArray** outputs = out == nullptr ? nullptr : &out;
+ int num_outputs = out != nullptr;
+ auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs);
+ if (out) {
+ *ret = PythonArg(3);
+ } else {
+ *ret = reinterpret_cast<mxnet::NDArray*>(ndoutputs[0]);
}
- int num_outputs = 0;
- auto ndoutputs = Invoke(op, &attrs, 0, nullptr, &num_outputs, nullptr);
- *ret = ndoutputs[0];
});
-MXNET_REGISTER_API("_npi.full_like")
+MXNET_REGISTER_API("_npi.argmin")
.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
using namespace runtime;
- const nnvm::Op* op = Op::Get("_npi_full_like");
+ const nnvm::Op* op = Op::Get("_npi_argmin");
nnvm::NodeAttrs attrs;
- op::FullLikeOpParam param;
- param.fill_value = args[1].operator double();
- if (args[2].type_code() == kNull) {
- param.dtype = dmlc::nullopt;
+ op::ReduceAxisParam param;
+ // param.axis
+ if (args[1].type_code() == kNull) {
+ param.axis = dmlc::nullopt;
} else {
- param.dtype = String2MXNetTypeWithBool(args[2].operator std::string());
+ param.axis = args[1].operator int();
}
+ // param.keepdims
+ param.keepdims = args[2].operator bool();
+
attrs.parsed = std::move(param);
attrs.op = op;
- if (args[3].type_code() != kNull) {
- attrs.dict["ctx"] = args[3].operator std::string();
- }
- SetAttrDict<op::FullLikeOpParam>(&attrs);
- NDArray* out = args[4].operator mxnet::NDArray*();
- NDArray** outputs = out == nullptr ? nullptr : &out;
- int num_outputs = out != nullptr;
+ SetAttrDict<op::ReduceAxisParam>(&attrs);
+ // inputs
NDArray* inputs[] = {args[0].operator mxnet::NDArray*()};
int num_inputs = 1;
+ // outputs
+ NDArray* out = args[3].operator mxnet::NDArray*();
+ NDArray** outputs = out == nullptr ? nullptr : &out;
+ int num_outputs = out != nullptr;
auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs);
if (out) {
- *ret = PythonArg(4);
+ *ret = PythonArg(3);
} else {
- *ret = ndoutputs[0];
+ *ret = reinterpret_cast<mxnet::NDArray*>(ndoutputs[0]);
}
- *ret = ndoutputs[0];
});
} // namespace mxnet
diff --git a/src/api/operator/numpy/np_init_op.cc b/src/api/operator/numpy/np_init_op.cc
index 4f7c6e4..c339fb5 100644
--- a/src/api/operator/numpy/np_init_op.cc
+++ b/src/api/operator/numpy/np_init_op.cc
@@ -26,6 +26,7 @@
#include <mxnet/runtime/packed_func.h>
#include "../utils.h"
#include "../../../operator/tensor/init_op.h"
+#include "../../../operator/numpy/np_init_op.h"
namespace mxnet {
@@ -88,4 +89,35 @@ MXNET_REGISTER_API("_npi.full_like")
*ret = ndoutputs[0];
});
+MXNET_REGISTER_API("_npi.indices")
+.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
+ using namespace runtime;
+ const nnvm::Op* op = Op::Get("_npi_indices");
+ nnvm::NodeAttrs attrs;
+ op::IndicesOpParam param;
+ // param.dimensions
+ if (args[0].type_code() == kDLInt) {
+ param.dimensions = TShape(1, args[0].operator int64_t());
+ } else {
+ param.dimensions = TShape(args[0].operator ObjectRef());
+ }
+ // param.dtype
+ if (args[1].type_code() == kNull) {
+ param.dtype = mshadow::kInt32;
+ } else {
+ param.dtype = String2MXNetTypeWithBool(args[1].operator std::string());
+ }
+ attrs.parsed = std::move(param);
+ attrs.op = op;
+ SetAttrDict<op::IndicesOpParam>(&attrs);
+ // param.ctx
+ if (args[2].type_code() != kNull) {
+ attrs.dict["ctx"] = args[2].operator std::string();
+ }
+ int num_inputs = 0;
+ int num_outputs = 0;
+ auto ndoutputs = Invoke(op, &attrs, num_inputs, nullptr, &num_outputs, nullptr);
+ *ret = ndoutputs[0];
+});
+
} // namespace mxnet
diff --git a/src/operator/numpy/np_init_op.h b/src/operator/numpy/np_init_op.h
index cfc2941..e92af5f 100644
--- a/src/operator/numpy/np_init_op.h
+++ b/src/operator/numpy/np_init_op.h
@@ -31,6 +31,7 @@
#include <string>
#include "../tensor/init_op.h"
#include "../tensor/elemwise_unary_op.h"
+#include "../../api/operator/op_utils.h"
namespace mxnet {
@@ -79,6 +80,13 @@ struct IndicesOpParam : public dmlc::Parameter<IndicesOpParam> {
.describe("Context of output, in format [cpu|gpu|cpu_pinned](n)."
"Only used for imperative calls.");
}
+ void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
+ std::ostringstream dimensions_s, dtype_s;
+ dimensions_s << dimensions;
+ dtype_s << dtype;
+ (*dict)["dimensions"] = dimensions_s.str();
+ (*dict)["dtype"] = MXNetTypeWithBool2String(dtype);
+ }
};
inline bool NumpyRangeShape(const nnvm::NodeAttrs& attrs,
diff --git a/src/operator/tensor/broadcast_reduce_op.h b/src/operator/tensor/broadcast_reduce_op.h
index b064429..03aa8b9 100644
--- a/src/operator/tensor/broadcast_reduce_op.h
+++ b/src/operator/tensor/broadcast_reduce_op.h
@@ -108,6 +108,13 @@ struct ReduceAxisParam : public dmlc::Parameter<ReduceAxisParam> {
.describe("If this is set to `True`, the reduced axis is left "
"in the result as dimension with size one.");
}
+ void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
+ std::ostringstream axis_s, keepdims_s;
+ axis_s << axis;
+ keepdims_s << keepdims;
+ (*dict)["axis"] = axis_s.str();
+ (*dict)["keepdims"] = keepdims_s.str();
+ }
};
enum PickOpMode {kWrap, kClip};