You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2019/10/21 23:39:10 UTC
[GitHub] [incubator-mxnet] sxjscience opened a new issue #16576:
[Numpy][Bug] einsum bug
sxjscience opened a new issue #16576: [Numpy][Bug] einsum bug
URL: https://github.com/apache/incubator-mxnet/issues/16576
Example:
```python
import mxnet as mx
import numpy as np
import numpy.testing as npt
from mxnet.gluon import HybridBlock
class MultiNDimBatchDot1(HybridBlock):
def hybrid_forward(self, F, lhs, rhs):
"""
Parameters
----------
F
lhs :
Shape (N0, N1, T0, C)
rhs :
Shape (N0, N1, T1, C)
Returns
-------
ret :
Shape (N0, N1, T0, T1)
"""
return F.batch_dot(F.reshape(lhs, (-3, 0, 0)),
F.reshape(rhs, (-3, 0, 0)), transpose_b=True).reshape((-4, -1, 8, 0, 0))
class MultiNDimBatchDot2(HybridBlock):
def hybrid_forward(self, F, lhs, rhs):
"""
Parameters
----------
F
lhs :
Shape (N0, N1, T0, C)
rhs :
Shape (N0, N1, T1, C)
Returns
-------
ret :
Shape (N0, N1, T0, T1)
"""
return F.np.einsum('abiz,abjz->abij', lhs, rhs)
batch_dot1 = MultiNDimBatchDot1()
batch_dot1.hybridize()
lhs = mx.np.array(np.random.normal(0, 1, (64, 8, 128, 512)), dtype=np.float32, ctx=mx.cpu())
rhs = mx.np.array(np.random.normal(0, 1, (64, 8, 128, 512)), dtype=np.float32, ctx=mx.cpu())
mx.npx.waitall()
gt = np.einsum('abiz,abjz->abij', lhs.asnumpy(), rhs.asnumpy())
out = batch_dot1(lhs.as_nd_ndarray(), rhs.as_nd_ndarray())
npt.assert_allclose(gt, out.asnumpy(), rtol=1E-3, atol=1E-3)
mx.npx.set_np()
batch_dot2 = MultiNDimBatchDot2()
batch_dot2.hybridize()
out2 = batch_dot2(lhs, rhs)
npt.assert_allclose(gt, out2.asnumpy(), rtol=1E-3, atol=1E-3)
```
The CPU results are not correct.
Also, if we use the gpu, it will trigger a segfault:
```python
import mxnet as mx
import numpy as np
import numpy.testing as npt
from mxnet.gluon import HybridBlock
class MultiNDimBatchDot1(HybridBlock):
def hybrid_forward(self, F, lhs, rhs):
"""
Parameters
----------
F
lhs :
Shape (N0, N1, T0, C)
rhs :
Shape (N0, N1, T1, C)
Returns
-------
ret :
Shape (N0, N1, T0, T1)
"""
return F.batch_dot(F.reshape(lhs, (-3, 0, 0)),
F.reshape(rhs, (-3, 0, 0)), transpose_b=True).reshape((-4, -1, 8, 0, 0))
class MultiNDimBatchDot2(HybridBlock):
def hybrid_forward(self, F, lhs, rhs):
"""
Parameters
----------
F
lhs :
Shape (N0, N1, T0, C)
rhs :
Shape (N0, N1, T1, C)
Returns
-------
ret :
Shape (N0, N1, T0, T1)
"""
return F.np.einsum('abiz,abjz->abij', lhs, rhs)
batch_dot1 = MultiNDimBatchDot1()
batch_dot1.hybridize()
lhs = mx.np.array(np.random.normal(0, 1, (64, 8, 128, 512)), dtype=np.float32, ctx=mx.gpu())
rhs = mx.np.array(np.random.normal(0, 1, (64, 8, 128, 512)), dtype=np.float32, ctx=mx.gpu())
mx.npx.waitall()
gt = np.einsum('abiz,abjz->abij', lhs.asnumpy(), rhs.asnumpy())
out = batch_dot1(lhs.as_nd_ndarray(), rhs.as_nd_ndarray())
npt.assert_allclose(gt, out.asnumpy(), rtol=1E-3, atol=1E-3)
mx.npx.set_np()
batch_dot2 = MultiNDimBatchDot2()
batch_dot2.hybridize()
out2 = batch_dot2(lhs, rhs)
npt.assert_allclose(gt, out2.asnumpy(), rtol=1E-3, atol=1E-3)
```
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
users@infra.apache.org
With regards,
Apache Git Services