You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by ha...@apache.org on 2019/07/18 00:01:08 UTC
[incubator-mxnet] 32/42: [numpy] fix cython (#15418)
This is an automated email from the ASF dual-hosted git repository.
haoj pushed a commit to branch numpy
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
commit a12353f1a941fc06e990cf6a95397874f87cf6ad
Author: Haozheng Fan <fh...@gmail.com>
AuthorDate: Wed Jul 3 02:08:41 2019 +0800
[numpy] fix cython (#15418)
* add cython support for numpy
* stay with original API for backward compatibility
---
ci/jenkins/Jenkins_steps.groovy | 18 ++++++------------
ci/jenkins/Jenkinsfile_unix_cpu | 4 ++--
python/mxnet/cython/ndarray.pyx | 27 +++++++++++++++++++--------
python/mxnet/cython/symbol.pyx | 16 ++++++++++++----
4 files changed, 39 insertions(+), 26 deletions(-)
diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy
index 8fd52f6..40700ad 100644
--- a/ci/jenkins/Jenkins_steps.groovy
+++ b/ci/jenkins/Jenkins_steps.groovy
@@ -112,8 +112,7 @@ def compile_unix_cpu_openblas() {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_openblas', false)
- // utils.pack_lib('cpu', mx_lib_cython, true)
- utils.pack_lib('cpu', mx_lib, true)
+ utils.pack_lib('cpu', mx_lib_cython, true)
}
}
}
@@ -267,8 +266,7 @@ def compile_unix_cmake_gpu() {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_gpu_cu100', 'build_ubuntu_gpu_cmake', false)
- // utils.pack_lib('cmake_gpu', mx_cmake_lib_cython, true)
- utils.pack_lib('cmake_gpu', mx_cmake_lib, true)
+ utils.pack_lib('cmake_gpu', mx_cmake_lib_cython, true)
}
}
}
@@ -645,10 +643,8 @@ def test_unix_python2_cpu() {
node(NODE_LINUX_CPU) {
ws('workspace/ut-python2-cpu') {
try {
- // utils.unpack_and_init('cpu', mx_lib_cython, true)
- // python2_ut_cython('ubuntu_cpu')
- utils.unpack_and_init('cpu', mx_lib, true)
- python2_ut('ubuntu_cpu')
+ utils.unpack_and_init('cpu', mx_lib_cython, true)
+ python2_ut_cython('ubuntu_cpu')
utils.publish_test_coverage()
} finally {
utils.collect_test_results_unix('nosetests_unittest.xml', 'nosetests_python2_cpu_unittest.xml')
@@ -749,10 +745,8 @@ def test_unix_python3_gpu() {
node(NODE_LINUX_GPU) {
ws('workspace/ut-python3-gpu') {
try {
- // utils.unpack_and_init('gpu', mx_lib_cython, true)
- // python3_gpu_ut_cython('ubuntu_gpu_cu100')
- utils.unpack_and_init('gpu', mx_lib, true)
- python3_gpu_ut('ubuntu_gpu_cu100')
+ utils.unpack_and_init('gpu', mx_lib_cython, true)
+ python3_gpu_ut_cython('ubuntu_gpu_cu100')
utils.publish_test_coverage()
} finally {
utils.collect_test_results_unix('nosetests_gpu.xml', 'nosetests_python3_gpu.xml')
diff --git a/ci/jenkins/Jenkinsfile_unix_cpu b/ci/jenkins/Jenkinsfile_unix_cpu
index c3a1481..fa09429 100644
--- a/ci/jenkins/Jenkinsfile_unix_cpu
+++ b/ci/jenkins/Jenkinsfile_unix_cpu
@@ -52,8 +52,8 @@ core_logic: {
custom_steps.test_unix_python3_mkldnn_mkl_cpu(),
custom_steps.test_unix_scala_cpu(),
custom_steps.test_unix_scala_mkldnn_cpu(),
- // custom_steps.test_unix_clojure_cpu(),
- // custom_steps.test_unix_clojure_integration_cpu(),
+ custom_steps.test_unix_clojure_cpu(),
+ custom_steps.test_unix_clojure_integration_cpu(),
custom_steps.test_unix_perl_cpu(),
custom_steps.test_unix_r_cpu(),
custom_steps.test_unix_r_mkldnn_cpu(),
diff --git a/python/mxnet/cython/ndarray.pyx b/python/mxnet/cython/ndarray.pyx
index f927988..50791e9 100644
--- a/python/mxnet/cython/ndarray.pyx
+++ b/python/mxnet/cython/ndarray.pyx
@@ -64,21 +64,27 @@ cdef class NDArrayBase:
_ndarray_cls = None
+_np_ndarray_cls = None
def _set_ndarray_class(cls):
global _ndarray_cls
_ndarray_cls = cls
-cdef NewArray(NDArrayHandle handle, int stype=-1):
+def _set_np_ndarray_class(cls):
+ global _np_ndarray_cls
+ _np_ndarray_cls = cls
+
+
+cdef NewArray(NDArrayHandle handle, int stype=-1, int is_np_array=0):
"""Create a new array given handle"""
- return _ndarray_cls(_ctypes.cast(<unsigned long long>handle, _ctypes.c_void_p), stype=stype)
+ create_array_fn = _np_ndarray_cls if is_np_array else _ndarray_cls
+ return create_array_fn(_ctypes.cast(<unsigned long long>handle, _ctypes.c_void_p), stype=stype)
cdef class CachedOp:
"""Cached operator handle."""
cdef CachedOpHandle chandle
-
cdef _set_handle(self, handle):
cdef unsigned long long ptr
if handle is None:
@@ -96,6 +102,8 @@ cdef class CachedOp:
def __set__(self, value):
self._set_handle(value)
+ cdef int is_np_sym
+
def __init__(self, sym, flags=()):
cdef vector[string] s_flag_keys
cdef vector[string] s_flag_vals
@@ -106,6 +114,9 @@ cdef class CachedOp:
cdef vector[const char*] c_flag_keys = SVec2Ptr(s_flag_keys)
cdef vector[const char*] c_flag_vals = SVec2Ptr(s_flag_vals)
+ from ..symbol.numpy._symbol import _Symbol
+ self.is_np_sym = bool(isinstance(sym, _Symbol))
+
CALL(MXCreateCachedOpEx(
<SymbolHandle>(<unsigned long long>sym.handle.value),
len(flags),
@@ -154,12 +165,12 @@ cdef class CachedOp:
if original_output is not None:
return original_output
if num_output == 1:
- return NewArray(p_output_vars[0], p_output_stypes[0])
+ return NewArray(p_output_vars[0], p_output_stypes[0], self.is_np_sym)
else:
- return [NewArray(p_output_vars[i], p_output_stypes[i]) for i in range(num_output)]
+ return [NewArray(p_output_vars[i], p_output_stypes[i], self.is_np_sym) for i in range(num_output)]
-def _imperative_invoke(handle, ndargs, keys, vals, out):
+def _imperative_invoke(handle, ndargs, keys, vals, out, is_np_op=0):
"""cython implementation of imperative invoke wrapper"""
cdef unsigned long long ihandle = handle
cdef OpHandle chandle = <OpHandle>ihandle
@@ -211,6 +222,6 @@ def _imperative_invoke(handle, ndargs, keys, vals, out):
if original_output is not None:
return original_output
if num_output == 1:
- return NewArray(p_output_vars[0], p_output_stypes[0])
+ return NewArray(p_output_vars[0], p_output_stypes[0], is_np_op)
else:
- return [NewArray(p_output_vars[i], p_output_stypes[i]) for i in range(num_output)]
+ return [NewArray(p_output_vars[i], p_output_stypes[i], is_np_op) for i in range(num_output)]
diff --git a/python/mxnet/cython/symbol.pyx b/python/mxnet/cython/symbol.pyx
index 1bdea6c..86fe8ae 100644
--- a/python/mxnet/cython/symbol.pyx
+++ b/python/mxnet/cython/symbol.pyx
@@ -84,19 +84,27 @@ cdef SymbolSetAttr(SymbolHandle handle, dict kwargs):
_symbol_cls = SymbolBase
+_np_symbol_cls = None
def _set_symbol_class(cls):
global _symbol_cls
_symbol_cls = cls
-cdef NewSymbol(SymbolHandle handle):
+
+def _set_np_symbol_class(cls):
+ global _np_symbol_cls
+ _np_symbol_cls = cls
+
+
+cdef NewSymbol(SymbolHandle handle, int is_np_sym=0):
"""Create a new symbol given handle"""
- sym = _symbol_cls(None)
+ create_symbol_fn = _np_symbol_cls if is_np_sym else _symbol_cls
+ sym = create_symbol_fn(None)
(<SymbolBase>sym).chandle = handle
return sym
-def _symbol_creator(handle, args, kwargs, keys, vals, name):
+def _symbol_creator(handle, args, kwargs, keys, vals, name, is_np_op=0):
cdef unsigned long long ihandle = handle
cdef OpHandle chandle = <OpHandle>ihandle
cdef vector[string] ckeys
@@ -143,4 +151,4 @@ def _symbol_creator(handle, args, kwargs, keys, vals, name):
&csym_keys[0] if csym_keys.size() != 0 else NULL,
&sym_args[0] if sym_args.size() != 0 else NULL))
- return NewSymbol(ret_handle)
+ return NewSymbol(ret_handle, is_np_op)