You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by zh...@apache.org on 2018/11/14 00:31:37 UTC
[incubator-mxnet] branch master updated: Add gauss err function
operator (#13229)
This is an automated email from the ASF dual-hosted git repository.
zhasheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new f5ba267 Add gauss err function operator (#13229)
f5ba267 is described below
commit f5ba2678f077b58d31d31029f680f93b313e1cea
Author: Haibin Lin <li...@gmail.com>
AuthorDate: Tue Nov 13 16:31:12 2018 -0800
Add gauss err function operator (#13229)
* erf
register gpu
* add doc
---
docs/api/python/ndarray/ndarray.md | 1 +
docs/api/python/symbol/symbol.md | 1 +
src/operator/math_functions-inl.h | 2 ++
src/operator/mshadow_op.h | 4 ++++
src/operator/operator_tune.cc | 2 ++
src/operator/tensor/elemwise_unary_op_basic.cc | 16 ++++++++++++++++
src/operator/tensor/elemwise_unary_op_basic.cu | 8 ++++++++
tests/python/unittest/test_operator.py | 4 ++++
8 files changed, 38 insertions(+)
diff --git a/docs/api/python/ndarray/ndarray.md b/docs/api/python/ndarray/ndarray.md
index 37965e9..6fcf1d4 100644
--- a/docs/api/python/ndarray/ndarray.md
+++ b/docs/api/python/ndarray/ndarray.md
@@ -656,6 +656,7 @@ The `ndarray` package provides several classes:
log_softmax
relu
sigmoid
+ erf
```
### More
diff --git a/docs/api/python/symbol/symbol.md b/docs/api/python/symbol/symbol.md
index 583f174..a4038d7 100644
--- a/docs/api/python/symbol/symbol.md
+++ b/docs/api/python/symbol/symbol.md
@@ -657,6 +657,7 @@ Composite multiple symbols into a new one by an operator.
log_softmax
relu
sigmoid
+ erf
```
### More
diff --git a/src/operator/math_functions-inl.h b/src/operator/math_functions-inl.h
index a5b83ea..be5bbe2 100644
--- a/src/operator/math_functions-inl.h
+++ b/src/operator/math_functions-inl.h
@@ -60,6 +60,8 @@ double name(double a, double b) { \
return ::name(a, b); \
}
+MXNET_UNARY_MATH_FUNC(erf)
+
MXNET_UNARY_MATH_FUNC(exp)
MXNET_UNARY_MATH_FUNC(expm1)
diff --git a/src/operator/mshadow_op.h b/src/operator/mshadow_op.h
index 06a223d..0b20a02 100644
--- a/src/operator/mshadow_op.h
+++ b/src/operator/mshadow_op.h
@@ -169,6 +169,10 @@ struct softrelu : public mxnet_op::tunable {
MXNET_UNARY_MATH_OP(softrelu_grad, -math::expm1(-a));
+MXNET_UNARY_MATH_OP(erf_grad, 2.0 / math::sqrt(PI) * math::exp(-(a * a)));
+
+MXNET_SIMPLE_UNARY_MATH_OP(erf);
+
MXNET_SIMPLE_UNARY_MATH_OP(exp);
MXNET_SIMPLE_UNARY_MATH_OP(expm1);
diff --git a/src/operator/operator_tune.cc b/src/operator/operator_tune.cc
index cf5412f..2018e80 100644
--- a/src/operator/operator_tune.cc
+++ b/src/operator/operator_tune.cc
@@ -235,6 +235,8 @@ IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::log2_grad); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::log10); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::log10_grad); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::sin); // NOLINT()
+IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::erf); // NOLINT()
+IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::erf_grad); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::sin_grad); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::sinh); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::sinh_grad); // NOLINT()
diff --git a/src/operator/tensor/elemwise_unary_op_basic.cc b/src/operator/tensor/elemwise_unary_op_basic.cc
index d1f1e08..301fc48 100644
--- a/src/operator/tensor/elemwise_unary_op_basic.cc
+++ b/src/operator/tensor/elemwise_unary_op_basic.cc
@@ -886,6 +886,22 @@ The storage type of ``cbrt`` output depends upon the input storage type:
MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_cbrt,
unary_bwd<mshadow_op::cube_root_grad>);
+// erf
+MXNET_OPERATOR_REGISTER_UNARY(erf)
+.describe(R"code(Returns element-wise gauss error function of the input.
+
+Example::
+
+ erf([0, -1., 10.]) = [0., -0.8427, 1.]
+
+)code" ADD_FILELINE)
+.set_attr<FCompute>("FCompute<cpu>", UnaryOp::Compute<cpu, mshadow_op::erf>)
+.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_erf"});
+
+MXNET_OPERATOR_REGISTER_BINARY(_backward_erf)
+.set_attr<FCompute>("FCompute<cpu>",
+ ElemwiseBinaryOp::Compute<cpu, unary_bwd<mshadow_op::erf_grad>>);
+
// rcbrt
MXNET_OPERATOR_REGISTER_UNARY(rcbrt)
.describe(R"code(Returns element-wise inverse cube-root value of the input.
diff --git a/src/operator/tensor/elemwise_unary_op_basic.cu b/src/operator/tensor/elemwise_unary_op_basic.cu
index 19b8d3e..c28934e 100644
--- a/src/operator/tensor/elemwise_unary_op_basic.cu
+++ b/src/operator/tensor/elemwise_unary_op_basic.cu
@@ -54,6 +54,14 @@ NNVM_REGISTER_OP(_backward_softsign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::softsign_grad>>);
+// erf
+NNVM_REGISTER_OP(erf)
+.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erf>);
+
+NNVM_REGISTER_OP(_backward_erf)
+.set_attr<FCompute>("FCompute<gpu>",
+ ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erf_grad>>);
+
// copy
NNVM_REGISTER_OP(_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py
index 7ff4228..5fe9e3e 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -3496,6 +3496,10 @@ def test_special_functions_using_scipy():
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
+ # erf
+ mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
+ lambda x: 2.0 / math.sqrt(math.pi) * math.exp(-(x ** 2)), 0.5, 0.5)
+
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')