You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by an...@apache.org on 2018/06/13 21:32:38 UTC
[incubator-mxnet] 08/12: fix a bug in cudnn softmax activation.
(#10918)
This is an automated email from the ASF dual-hosted git repository.
anirudh2290 pushed a commit to branch v1.2.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
commit be7f358e4f64bc27b2ebe93866bcf3040953e5fe
Author: Da Zheng <zh...@gmail.com>
AuthorDate: Sat May 12 22:48:34 2018 -0700
fix a bug in cudnn softmax activation. (#10918)
---
.../nn/cudnn/cudnn_softmax_activation-inl.h | 13 ++++++++++---
tests/python/gpu/test_operator_gpu.py | 21 +++++++++++++++++++++
2 files changed, 31 insertions(+), 3 deletions(-)
diff --git a/src/operator/nn/cudnn/cudnn_softmax_activation-inl.h b/src/operator/nn/cudnn/cudnn_softmax_activation-inl.h
index 239da02..0845eb7 100644
--- a/src/operator/nn/cudnn/cudnn_softmax_activation-inl.h
+++ b/src/operator/nn/cudnn/cudnn_softmax_activation-inl.h
@@ -48,7 +48,7 @@ class CuDNNSoftmaxActivationOp {
}
void Forward(const OpContext &ctx, const TBlob &in_data,
- const OpReqType &req, const TBlob &out_data) {
+ const OpReqType &req, const TBlob &out_data) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<gpu> *s = ctx.get_stream<gpu>();
@@ -102,14 +102,14 @@ class CuDNNSoftmaxActivationOp {
}
void Backward(const OpContext &ctx, const TBlob &out_grad,
- const TBlob &out_data, const OpReqType &req, const TBlob &in_grad) {
+ const TBlob &out_data, const OpReqType &req,
+ const TBlob &in_grad) {
using namespace mshadow;
using namespace mshadow::expr;
float alpha = 1.0f;
float beta = 0.0f;
Stream<gpu> *s = ctx.get_stream<gpu>();
Tensor<gpu, 4> grad;
- Tensor<gpu, 4> data;
Tensor<gpu, 4> output_data;
Tensor<gpu, 4> input_grad;
cudnnSoftmaxMode_t softmax_mode;
@@ -141,6 +141,13 @@ class CuDNNSoftmaxActivationOp {
softmax_mode = CUDNN_SOFTMAX_MODE_CHANNEL;
}
CHECK_EQ(s->dnn_handle_ownership_, mshadow::Stream<gpu>::OwnHandle);
+ CUDNN_CALL(cudnnSetTensor4dDescriptor(shape_desc_,
+ CUDNN_TENSOR_NCHW,
+ dtype_,
+ input_grad.shape_[0],
+ input_grad.shape_[1],
+ input_grad.shape_[2],
+ input_grad.shape_[3]));
CUDNN_CALL(cudnnSoftmaxBackward(s->dnn_handle_,
CUDNN_SOFTMAX_ACCURATE,
softmax_mode,
diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py
index 83dfc42..7c18027 100644
--- a/tests/python/gpu/test_operator_gpu.py
+++ b/tests/python/gpu/test_operator_gpu.py
@@ -1837,6 +1837,27 @@ def test_batchnorm_backwards_notrain():
loss=y.square().sum()
loss.backward(train_mode=False)
+
+@with_seed()
+def test_softmax_activation():
+ gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
+ [2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))
+ cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
+ [2., -.4, 7., 3., 0.2]], ctx=mx.cpu())
+
+ cpu_a.attach_grad()
+ gpu_a.attach_grad()
+ with mx.autograd.record():
+ gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
+ cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
+ assert_almost_equal(cpu_y.asnumpy(), gpu_y.asnumpy(), atol = 1e-3, rtol = 1e-3)
+
+ gpu_y.backward()
+ cpu_y.backward()
+ assert_almost_equal(cpu_a.grad.asnumpy(), gpu_a.grad.asnumpy(),
+ atol = 1e-3, rtol = 1e-3)
+
+
if __name__ == '__main__':
import nose
nose.runmodule()
--
To stop receiving notification emails like this one, please contact
anirudh2290@apache.org.