You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/11/27 02:13:53 UTC

[GitHub] wkcn closed pull request #12972: [MXNET-1174]Support np.longlong on Linux and Fix issue #12843

wkcn closed pull request #12972: [MXNET-1174]Support np.longlong on Linux and Fix issue #12843
URL: https://github.com/apache/incubator-mxnet/pull/12972
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/python/mxnet/ndarray/__init__.py b/python/mxnet/ndarray/__init__.py
index f09908e894d..40a03d87a73 100644
--- a/python/mxnet/ndarray/__init__.py
+++ b/python/mxnet/ndarray/__init__.py
@@ -29,7 +29,8 @@
 # pylint: enable=wildcard-import
 from .utils import load, load_frombuffer, save, zeros, empty, array
 from .sparse import _ndarray_cls
-from .ndarray import _GRAD_REQ_MAP, _DTYPE_MX_TO_NP, _DTYPE_NP_TO_MX, _new_empty_handle
+from .ndarray import _GRAD_REQ_MAP, _DTYPE_MX_TO_NP
+from .ndarray import _DTYPE_NP_TO_MX, _DTYPE_NAME_NP_TO_MX, _new_empty_handle
 
 __all__ = op.__all__ + ndarray.__all__ + utils.__all__ + \
           ['contrib', 'linalg', 'random', 'sparse', 'image']
diff --git a/python/mxnet/ndarray/ndarray.py b/python/mxnet/ndarray/ndarray.py
index bf1140d2071..a1f883ece3f 100644
--- a/python/mxnet/ndarray/ndarray.py
+++ b/python/mxnet/ndarray/ndarray.py
@@ -42,7 +42,7 @@
 from . import op
 from ._internal import NDArrayBase
 
-__all__ = ["NDArray", "concatenate", "_DTYPE_NP_TO_MX", "_DTYPE_MX_TO_NP", "_GRAD_REQ_MAP",
+__all__ = ["NDArray", "concatenate", "_DTYPE_NAME_NP_TO_MX", "_DTYPE_MX_TO_NP", "_GRAD_REQ_MAP",
            "ones", "add", "arange", "eye", "divide", "equal", "full", "greater", "greater_equal",
            "imdecode", "lesser", "lesser_equal", "logical_and", "logical_or", "logical_xor",
            "maximum", "minimum", "moveaxis", "modulo", "multiply", "not_equal", "onehot_encode",
@@ -66,6 +66,10 @@
     np.int64: 6,
 }
 
+_DTYPE_NAME_NP_TO_MX = dict(
+    [(np.dtype(_dtype).name, _value) if _dtype is not None else (None, _value) \
+    for _dtype, _value in _DTYPE_NP_TO_MX.items()])
+
 _DTYPE_MX_TO_NP = {
     -1: None,
     0: np.float32,
@@ -136,7 +140,7 @@ def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
         ctypes.c_int(ctx.device_typeid),
         ctypes.c_int(ctx.device_id),
         ctypes.c_int(int(delay_alloc)),
-        ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
+        ctypes.c_int(int(_DTYPE_NAME_NP_TO_MX[np.dtype(dtype).name])),
         ctypes.byref(hdl)))
     return hdl
 
@@ -148,7 +152,7 @@ def _new_from_shared_mem(shared_pid, shared_id, shape, dtype):
         ctypes.c_int(shared_id),
         c_array(mx_uint, shape),
         mx_uint(len(shape)),
-        ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
+        ctypes.c_int(int(_DTYPE_NAME_NP_TO_MX[np.dtype(dtype).name])),
         ctypes.byref(hdl)))
     return hdl
 
diff --git a/python/mxnet/ndarray/sparse.py b/python/mxnet/ndarray/sparse.py
index 7b4cc90648c..21677d7e20c 100644
--- a/python/mxnet/ndarray/sparse.py
+++ b/python/mxnet/ndarray/sparse.py
@@ -50,7 +50,7 @@
 except ImportError:
     gs_retain = None
 from ._internal import _set_ndarray_class
-from .ndarray import NDArray, _storage_type, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
+from .ndarray import NDArray, _storage_type, _DTYPE_NAME_NP_TO_MX, _DTYPE_MX_TO_NP
 from .ndarray import _STORAGE_TYPE_STR_TO_ID, _STORAGE_TYPE_ROW_SPARSE, _STORAGE_TYPE_CSR
 from .ndarray import _STORAGE_TYPE_UNDEFINED, _STORAGE_TYPE_DEFAULT
 from .ndarray import zeros as _zeros_ndarray
@@ -83,7 +83,7 @@ def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shap
     for aux_t in aux_types:
         if np.dtype(aux_t) != np.dtype("int64"):
             raise NotImplementedError("only int64 is supported for aux types")
-    aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
+    aux_type_ids = [int(_DTYPE_NAME_NP_TO_MX[np.dtype(aux_t).name]) for aux_t in aux_types]
     aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
     aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
     aux_shapes = py_sum(aux_shapes, ())
@@ -95,7 +95,7 @@ def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shap
         ctypes.c_int(ctx.device_typeid),
         ctypes.c_int(ctx.device_id),
         ctypes.c_int(int(delay_alloc)),
-        ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
+        ctypes.c_int(int(_DTYPE_NAME_NP_TO_MX[np.dtype(dtype).name])),
         num_aux,
         c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
         c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
diff --git a/python/mxnet/operator.py b/python/mxnet/operator.py
index e8fa571d44d..e949d0d4ca1 100644
--- a/python/mxnet/operator.py
+++ b/python/mxnet/operator.py
@@ -27,11 +27,12 @@
 from threading import Lock
 from ctypes import CFUNCTYPE, POINTER, Structure, pointer
 from ctypes import c_void_p, c_int, c_char, c_char_p, cast, c_bool
+import numpy as np
 
 from .base import _LIB, check_call, MXCallbackList, c_array, c_array_buf
 from .base import c_str, mx_uint, mx_float, ctypes2numpy_shared, NDArrayHandle, py_str
 from . import symbol, context
-from .ndarray import NDArray, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
+from .ndarray import NDArray, _DTYPE_NAME_NP_TO_MX, _DTYPE_MX_TO_NP
 from .ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID, _STORAGE_TYPE_ID_TO_STR
 from .ndarray.ndarray import _STORAGE_TYPE_UNDEFINED, _STORAGE_TYPE_DEFAULT
 from .ndarray.ndarray import _STORAGE_TYPE_CSR, _STORAGE_TYPE_ROW_SPARSE
@@ -884,7 +885,7 @@ def infer_type_entry(num_tensor, tensor_types, _):
                         "types, got %d."%(n_aux, len(atype))
                     rtype = list(itype) + list(otype) + list(atype)
                     for i, dtype in enumerate(rtype):
-                        tensor_types[i] = _DTYPE_NP_TO_MX[dtype]
+                        tensor_types[i] = _DTYPE_NAME_NP_TO_MX[np.dtype(dtype).name]
 
                     infer_type_entry._ref_holder = [tensor_types]
                 except Exception:
diff --git a/python/mxnet/rtc.py b/python/mxnet/rtc.py
index 4dea0e656b7..5238022b112 100644
--- a/python/mxnet/rtc.py
+++ b/python/mxnet/rtc.py
@@ -25,7 +25,7 @@
 
 from .base import _LIB, mx_uint, c_array, c_array_buf, c_str_array, check_call
 from .base import c_str, CudaModuleHandle, CudaKernelHandle, numeric_types, string_types
-from .ndarray import _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP, NDArray
+from .ndarray import _DTYPE_NAME_NP_TO_MX, _DTYPE_MX_TO_NP, NDArray
 
 _DTYPE_CPP_TO_NP = {
     'float': np.float32,
@@ -39,6 +39,10 @@
     'int64_t': np.int64,
 }
 
+_DTYPE_CPP_TO_MX = dict(
+    [(_ctype, _DTYPE_NAME_NP_TO_MX[np.dtype(_dtype).name]) \
+    for _ctype, _dtype in _DTYPE_CPP_TO_NP.items()])
+
 class CudaModule(object):
     r"""Compile and run CUDA code from Python.
 
@@ -153,11 +157,11 @@ def get_kernel(self, name, signature):
             is_const.append(bool(match.groups()[0]))
             dtype = match.groups()[1]
             is_ndarray.append(bool(match.groups()[2]))
-            if dtype not in _DTYPE_CPP_TO_NP:
+            if dtype not in _DTYPE_CPP_TO_MX:
                 raise TypeError(
                     "Unsupported kernel argument type %s. Supported types are: %s."%(
-                        arg, ','.join(_DTYPE_CPP_TO_NP.keys())))
-            dtypes.append(_DTYPE_NP_TO_MX[_DTYPE_CPP_TO_NP[dtype]])
+                        arg, ','.join(_DTYPE_CPP_TO_MX.keys())))
+            dtypes.append(_DTYPE_CPP_TO_MX[dtype])
 
         check_call(_LIB.MXRtcCudaKernelCreate(
             self.handle,
diff --git a/python/mxnet/symbol/symbol.py b/python/mxnet/symbol/symbol.py
index c6575072cc7..359859582cc 100644
--- a/python/mxnet/symbol/symbol.py
+++ b/python/mxnet/symbol/symbol.py
@@ -38,7 +38,8 @@
 from ..base import NDArrayHandle, ExecutorHandle, SymbolHandle
 from ..base import check_call, MXNetError, NotImplementedForSymbol
 from ..context import Context, current_context
-from ..ndarray import NDArray, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP, _GRAD_REQ_MAP
+from ..ndarray import NDArray, _DTYPE_NP_TO_MX, _DTYPE_NAME_NP_TO_MX
+from ..ndarray import _DTYPE_MX_TO_NP, _GRAD_REQ_MAP
 from ..ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID
 from ..ndarray import _ndarray_cls
 from ..executor import Executor
@@ -891,19 +892,19 @@ def infer_type(self, *args, **kwargs):
             keys = c_array(ctypes.c_char_p, [])
             for s in args:
                 if s is not None:
-                    s = _numpy.dtype(s).type
-                    if s not in _DTYPE_NP_TO_MX:
+                    s_name = _numpy.dtype(s).name
+                    if s_name not in _DTYPE_NAME_NP_TO_MX:
                         raise TypeError('Argument need to be one of ' + str(_DTYPE_NP_TO_MX))
-                    sdata.append(_DTYPE_NP_TO_MX[s])
+                    sdata.append(_DTYPE_NAME_NP_TO_MX[s_name])
                 else:
                     sdata.append(-1)
         else:
             str_keys = []
             for k, v in kwargs.items():
-                v = _numpy.dtype(v).type
-                if v in _DTYPE_NP_TO_MX:
+                v_name = _numpy.dtype(v).name
+                if v_name in _DTYPE_NAME_NP_TO_MX:
                     str_keys.append(k)
-                    sdata.append(_DTYPE_NP_TO_MX[v])
+                    sdata.append(_DTYPE_NAME_NP_TO_MX[v_name])
             keys = c_str_array(str_keys)
         arg_type_size = mx_uint()
         arg_type_data = ctypes.POINTER(ctypes.c_int)()
@@ -1366,10 +1367,10 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
             provided_arg_type_names = []
             provided_arg_type_data = []
             for k, v in type_dict.items():
-                v = _numpy.dtype(v).type
-                if v in _DTYPE_NP_TO_MX:
+                v_name = _numpy.dtype(v).name
+                if v_name in _DTYPE_NAME_NP_TO_MX:
                     provided_arg_type_names.append(k)
-                    provided_arg_type_data.append(_DTYPE_NP_TO_MX[v])
+                    provided_arg_type_data.append(_DTYPE_NAME_NP_TO_MX[v_name])
             num_provided_arg_types = mx_uint(len(provided_arg_type_names))
             provided_arg_type_names = c_str_array(provided_arg_type_names)
             provided_arg_type_data = c_array_buf(ctypes.c_int, array('i', provided_arg_type_data))
@@ -2537,7 +2538,7 @@ def var(name, attr=None, shape=None, lr_mult=None, wd_mult=None, dtype=None,
     if wd_mult is not None:
         attr['__wd_mult__'] = str(wd_mult)
     if dtype is not None:
-        attr['__dtype__'] = str(_DTYPE_NP_TO_MX[_numpy.dtype(dtype).type])
+        attr['__dtype__'] = str(_DTYPE_NAME_NP_TO_MX[_numpy.dtype(dtype).name])
     if init is not None:
         if not isinstance(init, string_types):
             init = init.dumps()
diff --git a/tests/python/unittest/test_ndarray.py b/tests/python/unittest/test_ndarray.py
index 0aa48553901..cf4f4564b80 100644
--- a/tests/python/unittest/test_ndarray.py
+++ b/tests/python/unittest/test_ndarray.py
@@ -1506,6 +1506,31 @@ def test_dlpack():
             mx.test_utils.assert_almost_equal(a_np, d_np)
             mx.test_utils.assert_almost_equal(a_np, e_np)
 
+
+@with_seed()
+def test_dtype():
+    dtypes = [np.int8, np.uint8,
+              np.int32, np.int64,
+              np.float16, np.float32, np.float64,
+              np.long, np.longlong]
+    for dtype in dtypes:
+        values = np.array([1, 2, 3], dtype=dtype)
+        data = mx.nd.array(values, dtype=dtype)
+        data_np = data.asnumpy()
+        assert np.dtype(dtype).name == np.dtype(data.dtype).name
+        assert np.dtype(dtype).name == np.dtype(data_np.dtype).name
+        mx.test_utils.assert_almost_equal(data_np, values)
+    """Test case from https://github.com/apache/incubator-mxnet/issues/12843"""
+    try:
+        import scipy as sp
+        one = np.ones((2, 2), dtype='int64')
+        sp_one = sp.sparse.csr_matrix(one).data
+        mx.nd.array(sp_one, dtype=sp_one.dtype)
+        assert np.dtype(dtype).name == 'int64'
+    except ImportError:
+        pass
+
+
 @with_seed()
 def test_ndarray_is_inf():
     random_dimensions = np.random.randint(2, 5)


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services