You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2017/11/16 23:19:14 UTC

[GitHub] piiswrong closed pull request #8642: ctypes speed improvement

piiswrong closed pull request #8642: ctypes speed improvement
URL: https://github.com/apache/incubator-mxnet/pull/8642
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/python/mxnet/_ctypes/ndarray.py b/python/mxnet/_ctypes/ndarray.py
index 0d02c049e3..a0c01a6e06 100644
--- a/python/mxnet/_ctypes/ndarray.py
+++ b/python/mxnet/_ctypes/ndarray.py
@@ -24,7 +24,7 @@
 import ctypes
 
 from ..base import _LIB
-from ..base import c_array, c_str
+from ..base import c_str_array, c_handle_array
 from ..base import NDArrayHandle, CachedOpHandle
 from ..base import check_call
 
@@ -69,7 +69,7 @@ def _imperative_invoke(handle, ndargs, keys, vals, out):
         if isinstance(out, NDArrayBase):
             out = (out,)
         num_output = ctypes.c_int(len(out))
-        output_vars = c_array(NDArrayHandle, [i.handle for i in out])
+        output_vars = c_handle_array(out)
         output_vars = ctypes.cast(output_vars, ctypes.POINTER(NDArrayHandle))
     else:
         original_output = None
@@ -83,12 +83,12 @@ def _imperative_invoke(handle, ndargs, keys, vals, out):
     check_call(_LIB.MXImperativeInvokeEx(
         ctypes.c_void_p(handle),
         ctypes.c_int(len(ndargs)),
-        c_array(NDArrayHandle, [arr.handle for arr in ndargs]),
+        c_handle_array(ndargs),
         ctypes.byref(num_output),
         ctypes.byref(output_vars),
         ctypes.c_int(len(keys)),
-        c_array(ctypes.c_char_p, [c_str(key) for key in keys]),
-        c_array(ctypes.c_char_p, [c_str(str(val)) for val in vals]),
+        c_str_array(keys),
+        c_str_array([str(s) for s in vals]),
         ctypes.byref(out_stypes)))
 
     if original_output is not None:
@@ -122,7 +122,7 @@ def __call__(self, *args, **kwargs):
             if isinstance(out, NDArrayBase):
                 out = (out,)
             num_output = ctypes.c_int(len(out))
-            output_vars = c_array(NDArrayHandle, [i.handle for i in out])
+            output_vars = c_handle_array(out)
             output_vars = ctypes.cast(output_vars, ctypes.POINTER(NDArrayHandle))
         else:
             original_output = None
@@ -140,7 +140,7 @@ def __call__(self, *args, **kwargs):
         check_call(_LIB.MXInvokeCachedOpEx(
             self.handle,
             ctypes.c_int(len(args)),
-            c_array(NDArrayHandle, [arr.handle for arr in args]),
+            c_handle_array(args),
             ctypes.byref(num_output),
             ctypes.byref(output_vars),
             ctypes.byref(out_stypes)))
diff --git a/python/mxnet/_ctypes/symbol.py b/python/mxnet/_ctypes/symbol.py
index 3ec2ddcdc5..fe4cb950ed 100644
--- a/python/mxnet/_ctypes/symbol.py
+++ b/python/mxnet/_ctypes/symbol.py
@@ -22,7 +22,7 @@
 
 import ctypes
 from ..base import _LIB
-from ..base import c_array, c_str, mx_uint
+from ..base import c_str_array, c_handle_array, c_str, mx_uint
 from ..base import SymbolHandle
 from ..base import check_call
 
@@ -79,11 +79,11 @@ def _compose(self, *args, **kwargs):
 
         num_args = len(args) + len(kwargs)
         if len(kwargs) != 0:
-            keys = c_array(ctypes.c_char_p, [c_str(key) for key in kwargs])
-            args = c_array(SymbolHandle, [s.handle for s in kwargs.values()])
+            keys = c_str_array(kwargs.keys())
+            args = c_handle_array(kwargs.values())
         else:
             keys = None
-            args = c_array(SymbolHandle, [s.handle for s in args])
+            args = c_handle_array(kwargs.values())
         check_call(_LIB.NNSymbolCompose(
             self.handle, name, num_args, keys, args))
 
@@ -95,10 +95,8 @@ def _set_attr(self, **kwargs):
         **kwargs
             The attributes to set
         """
-        keys = c_array(ctypes.c_char_p,
-                       [c_str(key) for key in kwargs])
-        vals = c_array(ctypes.c_char_p,
-                       [c_str(str(val)) for val in kwargs.values()])
+        keys = c_str_array(kwargs.keys())
+        vals = c_str_array([str(s) for s in kwargs.values()])
         num_args = mx_uint(len(kwargs))
         check_call(_LIB.MXSymbolSetAttrs(
             self.handle, num_args, keys, vals))
@@ -122,8 +120,8 @@ def _symbol_creator(handle, args, kwargs, keys, vals, name):
     check_call(_LIB.MXSymbolCreateAtomicSymbol(
         ctypes.c_void_p(handle),
         mx_uint(len(keys)),
-        c_array(ctypes.c_char_p, [c_str(i) for i in keys]),
-        c_array(ctypes.c_char_p, [c_str(str(i)) for i in vals]),
+        c_str_array(keys),
+        c_str_array([str(v) for v in vals]),
         ctypes.byref(sym_handle)))
 
     if args and kwargs:
diff --git a/python/mxnet/autograd.py b/python/mxnet/autograd.py
index dc81fbedbf..340a9e66f4 100644
--- a/python/mxnet/autograd.py
+++ b/python/mxnet/autograd.py
@@ -20,12 +20,13 @@
 from __future__ import absolute_import
 from __future__ import division
 
+from array import array
 from threading import Lock
 import traceback
 import ctypes
 from ctypes import c_int, c_void_p, CFUNCTYPE, POINTER, cast
-from .base import _LIB, check_call, string_types
-from .base import mx_uint, NDArrayHandle, c_array, MXCallbackList, SymbolHandle
+from .base import _LIB, check_call, string_types, mx_uint
+from .base import NDArrayHandle, c_array, c_handle_array, c_array_buf, MXCallbackList, SymbolHandle
 from .ndarray import NDArray, _ndarray_cls
 from .ndarray import _GRAD_REQ_MAP
 from .symbol import Symbol
@@ -207,21 +208,16 @@ def mark_variables(variables, gradients, grad_reqs='write'):
         variables = [variables]
         gradients = [gradients]
 
-    variable_handles = []
-    gradient_handles = []
-    for var, gradvar in zip(variables, gradients):
-        variable_handles.append(var.handle)
-        gradient_handles.append(gradvar.handle)
     if isinstance(grad_reqs, string_types):
         grad_reqs = [_GRAD_REQ_MAP[grad_reqs]]*len(variables)
     else:
         grad_reqs = [_GRAD_REQ_MAP[i] for i in grad_reqs]
 
     check_call(_LIB.MXAutogradMarkVariables(
-        len(variable_handles),
-        c_array(NDArrayHandle, variable_handles),
-        c_array(mx_uint, grad_reqs),
-        c_array(NDArrayHandle, gradient_handles)))
+        len(variables),
+        c_handle_array(variables),
+        c_array_buf(mx_uint, array('I', grad_reqs)),
+        c_handle_array(gradients)))
 
 
 def _parse_head(heads, head_grads):
@@ -231,7 +227,7 @@ def _parse_head(heads, head_grads):
     if isinstance(head_grads, NDArray):
         head_grads = [head_grads]
 
-    head_handles = c_array(NDArrayHandle, [i.handle for i in heads])
+    head_handles = c_handle_array(heads)
 
     if head_grads is None:
         hgrad_handles = ctypes.c_void_p(0)
@@ -318,11 +314,10 @@ def grad(heads, variables, head_grads=None, retain_graph=None, create_graph=Fals
     head_handles, hgrad_handles = _parse_head(heads, head_grads)
 
     if isinstance(variables, NDArray):
-        var_handles = [variables.handle]
+        variables = [variables]
     else:
         assert len(variables), "variables cannot be an empty list."
-        var_handles = [i.handle for i in variables]
-    var_handles = c_array(NDArrayHandle, var_handles)
+    var_handles = c_handle_array(variables)
 
     retain_graph = retain_graph if retain_graph is not None else create_graph
     grad_vars = ctypes.POINTER(NDArrayHandle)()
@@ -474,8 +469,6 @@ def delete_entry(_):
                 return False
             return True
 
-        input_handles = [x.handle for x in inputs]
-        output_handles = [x.handle for x in outputs]
         callbacks = [Function._bwd_functype(backward_entry),
                      Function._del_functype(delete_entry)]
         callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks]
@@ -486,9 +479,9 @@ def delete_entry(_):
                                       POINTER(c_void_p)))
         check_call(_LIB.MXCustomFunctionRecord(
             c_int(len(inputs)),
-            c_array(NDArrayHandle, input_handles),
+            c_handle_array(inputs),
             c_int(len(outputs)),
-            c_array(NDArrayHandle, output_handles),
+            c_handle_array(outputs),
             ctypes.byref(context)))
 
         Function._registry.ref_holder[key] = context
diff --git a/python/mxnet/base.py b/python/mxnet/base.py
index 5882a50921..80fc9011db 100644
--- a/python/mxnet/base.py
+++ b/python/mxnet/base.py
@@ -145,6 +145,7 @@ def check_call(ret):
     if ret != 0:
         raise MXNetError(py_str(_LIB.MXGetLastError()))
 
+
 if sys.version_info[0] < 3:
     def c_str(string):
         """Create ctypes char * from a Python string.
@@ -166,6 +167,24 @@ def c_str(string):
         Hello, World
         """
         return ctypes.c_char_p(string)
+
+    def c_str_array(strings):
+        """Create ctypes const char ** from a list of Python strings.
+
+        Parameters
+        ----------
+        strings : list of string
+            Python strings.
+
+        Returns
+        -------
+        (ctypes.c_char_p * len(strings))
+            A const char ** pointer that can be passed to C API.
+        """
+        arr = (ctypes.c_char_p * len(strings))()
+        arr[:] = strings
+        return arr
+
 else:
     def c_str(string):
         """Create ctypes char * from a Python string.
@@ -188,6 +207,22 @@ def c_str(string):
         """
         return ctypes.c_char_p(string.encode('utf-8'))
 
+    def c_str_array(strings):
+        """Create ctypes const char ** from a list of Python strings.
+
+        Parameters
+        ----------
+        strings : list of string
+            Python strings.
+
+        Returns
+        -------
+        (ctypes.c_char_p * len(strings))
+            A const char ** pointer that can be passed to C API.
+        """
+        arr = (ctypes.c_char_p * len(strings))()
+        arr[:] = [s.encode('utf-8') for s in strings]
+        return arr
 
 def c_array(ctype, values):
     """Create ctypes array from a Python array.
@@ -213,7 +248,55 @@ def c_array(ctype, values):
     >>> x[1]
     2.0
     """
-    return (ctype * len(values))(*values)
+    out = (ctype * len(values))()
+    out[:] = values
+    return out
+
+
+def c_array_buf(ctype, buf):
+    """Create ctypes array from a Python buffer.
+    For primitive types, using the buffer created with array.array is faster
+    than a c_array call.
+
+    Parameters
+    ----------
+    ctype : ctypes data type
+        Data type of the array we want to convert to, such as mx_float.
+
+    buf : buffer type
+        Data content.
+
+    Returns
+    -------
+    out : ctypes array
+        Created ctypes array.
+
+    Examples
+    --------
+    >>> x = mx.base.c_array_buf(mx.base.mx_float, array.array('i', [1, 2, 3]))
+    >>> print len(x)
+    3
+    >>> x[1]
+    2.0
+    """
+    return (ctype * len(buf)).from_buffer(buf)
+
+def c_handle_array(objs):
+    """Create ctypes const void ** from a list of MXNet objects with handles.
+
+    Parameters
+    ----------
+    objs : list of NDArray/Symbol.
+        MXNet objects.
+
+    Returns
+    -------
+    (ctypes.c_void_p * len(objs))
+        A void ** pointer that can be passed to C API.
+    """
+    arr = (ctypes.c_void_p * len(objs))()
+    arr[:] = [o.handle for o in objs]
+    return arr
 
 def ctypes2buffer(cptr, length):
     """Convert ctypes pointer to buffer type.
diff --git a/python/mxnet/contrib/autograd.py b/python/mxnet/contrib/autograd.py
index 68ce31bb05..c5c9c027ee 100644
--- a/python/mxnet/contrib/autograd.py
+++ b/python/mxnet/contrib/autograd.py
@@ -20,10 +20,11 @@
 from __future__ import absolute_import
 from __future__ import division
 
+from array import array
 import ctypes
 import functools
 from ..base import _LIB, check_call, string_types
-from ..base import mx_uint, NDArrayHandle, c_array
+from ..base import mx_uint, NDArrayHandle, c_array, c_array_buf, c_handle_array
 # pylint: disable= unused-import
 from ..ndarray import NDArray, zeros_like, _GRAD_REQ_MAP
 
@@ -107,21 +108,16 @@ def mark_variables(variables, gradients, grad_reqs='write'):
     gradients: list of NDArray
     grad_reqs: list of string
     """
-    variable_handles = []
-    gradient_handles = []
-    for var, gradvar in zip(variables, gradients):
-        variable_handles.append(var.handle)
-        gradient_handles.append(gradvar.handle)
     if isinstance(grad_reqs, string_types):
         grad_reqs = [_GRAD_REQ_MAP[grad_reqs]]*len(variables)
     else:
         grad_reqs = [_GRAD_REQ_MAP[i] for i in grad_reqs]
 
     check_call(_LIB.MXAutogradMarkVariables(
-        len(variable_handles),
-        c_array(NDArrayHandle, variable_handles),
-        c_array(mx_uint, grad_reqs),
-        c_array(NDArrayHandle, gradient_handles)))
+        len(variables),
+        c_handle_array(variables),
+        c_array_buf(mx_uint, array('I', grad_reqs)),
+        c_handle_array(gradients)))
 
 
 def backward(outputs, out_grads=None, retain_graph=False):
@@ -134,14 +130,11 @@ def backward(outputs, out_grads=None, retain_graph=False):
     """
     assert isinstance(outputs, (list, tuple)), \
         "outputs must be a list or tuple of NDArrays"
-    output_handles = []
-    for arr in outputs:
-        output_handles.append(arr.handle)
 
     if out_grads is None:
         check_call(_LIB.MXAutogradBackward(
-            len(output_handles),
-            c_array(NDArrayHandle, output_handles),
+            len(outputs),
+            c_handle_array(outputs),
             ctypes.c_void_p(0),
             ctypes.c_int(retain_graph)))
         return
@@ -152,12 +145,12 @@ def backward(outputs, out_grads=None, retain_graph=False):
             ograd_handles.append(arr.handle)
         else:
             ograd_handles.append(NDArrayHandle(0))
-    assert len(ograd_handles) == len(output_handles), \
+    assert len(ograd_handles) == len(outputs), \
         "outputs and out_grads must have the same length"
 
     check_call(_LIB.MXAutogradBackward(
-        len(output_handles),
-        c_array(NDArrayHandle, output_handles),
+        len(outputs),
+        c_handle_array(outputs),
         c_array(NDArrayHandle, ograd_handles),
         ctypes.c_int(retain_graph)))
 
diff --git a/python/mxnet/executor.py b/python/mxnet/executor.py
index 5cc94a5e80..579e6d3e35 100644
--- a/python/mxnet/executor.py
+++ b/python/mxnet/executor.py
@@ -25,7 +25,7 @@
 import numpy as np
 from .base import _LIB
 from .base import mx_uint, NDArrayHandle, ExecutorHandle
-from .base import check_call, c_array, py_str
+from .base import check_call, c_handle_array, py_str
 from .ndarray import NDArray
 from .ndarray import _ndarray_cls
 from . import ndarray as nd
@@ -226,7 +226,7 @@ def backward(self, out_grads=None, is_train=True):
         for obj in out_grads:
             if not isinstance(obj, NDArray):
                 raise TypeError("inputs must be NDArray")
-        ndarray = c_array(NDArrayHandle, [item.handle for item in out_grads])
+        ndarray = c_handle_array(out_grads)
         check_call(_LIB.MXExecutorBackwardEx(
             self.handle,
             mx_uint(len(out_grads)),
diff --git a/python/mxnet/io.py b/python/mxnet/io.py
index ef2f647eec..25a95be787 100644
--- a/python/mxnet/io.py
+++ b/python/mxnet/io.py
@@ -29,7 +29,7 @@
     h5py = None
 import numpy as np
 from .base import _LIB
-from .base import c_array, c_str, mx_uint, py_str
+from .base import c_str_array, mx_uint, py_str
 from .base import DataIterHandle, NDArrayHandle
 from .base import mx_real_t
 from .base import check_call, build_param_doc as _build_param_doc
@@ -919,11 +919,11 @@ def creator(*args, **kwargs):
         param_vals = []
 
         for k, val in kwargs.items():
-            param_keys.append(c_str(k))
-            param_vals.append(c_str(str(val)))
+            param_keys.append(k)
+            param_vals.append(str(val))
         # create atomic symbol
-        param_keys = c_array(ctypes.c_char_p, param_keys)
-        param_vals = c_array(ctypes.c_char_p, param_vals)
+        param_keys = c_str_array(param_keys)
+        param_vals = c_str_array(param_vals)
         iter_handle = DataIterHandle()
         check_call(_LIB.MXDataIterCreateIter(
             handle,
diff --git a/python/mxnet/kvstore.py b/python/mxnet/kvstore.py
index adfef9a949..8625303ee4 100644
--- a/python/mxnet/kvstore.py
+++ b/python/mxnet/kvstore.py
@@ -19,12 +19,13 @@
 """ Key value store interface of MXNet for parameter synchronization."""
 from __future__ import absolute_import
 
+from array import array
 import ctypes
 import pickle
 from .ndarray import NDArray
 from .ndarray import _ndarray_cls
-from .base import _LIB
-from .base import check_call, c_array, c_str, string_types, mx_uint, py_str
+from .base import _LIB, c_str_array, c_handle_array, c_array, c_array_buf, c_str
+from .base import check_call, string_types, mx_uint, py_str
 from .base import NDArrayHandle, KVStoreHandle
 from . import optimizer as opt
 
@@ -46,22 +47,22 @@ def _ctype_key_value(keys, vals):
             assert(use_str_keys == str_keys_i), "inconsistent types of keys detected."
         c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys \
                      else c_array(ctypes.c_int, c_keys)
-        c_vals_arr = c_array(NDArrayHandle, c_vals)
+        c_vals_arr = c_array(ctypes.c_void_p, c_vals)
         return (c_keys_arr, c_vals_arr, use_str_keys)
 
     assert(isinstance(keys, (int,) + string_types)), \
            "unexpected type for keys: " + str(type(keys))
     use_str_keys = isinstance(keys, string_types)
     if isinstance(vals, NDArray):
-        c_keys = c_array(ctypes.c_char_p, [c_str(keys)]) if use_str_keys \
-                 else c_array(ctypes.c_int, [keys])
-        return (c_keys, c_array(NDArrayHandle, [vals.handle]), use_str_keys)
+        c_keys = c_str_array([keys]) if use_str_keys \
+                 else c_array_buf(ctypes.c_int, array('i', [keys]))
+        return (c_keys, c_handle_array([vals]), use_str_keys)
     else:
         for value in vals:
             assert(isinstance(value, NDArray))
-        c_keys = c_array(ctypes.c_char_p, [c_str(keys)] * len(vals)) if use_str_keys \
-                 else c_array(ctypes.c_int, [keys] * len(vals))
-        return (c_keys, c_array(NDArrayHandle, [value.handle for value in vals]), use_str_keys)
+        c_keys = c_str_array([keys] * len(vals)) if use_str_keys \
+                 else c_array_buf(ctypes.c_int, array('i', [keys] * len(vals)))
+        return (c_keys, c_handle_array(vals), use_str_keys)
 
 def _updater_wrapper(updater):
     """A wrapper for the user-defined handle."""
diff --git a/python/mxnet/ndarray/ndarray.py b/python/mxnet/ndarray/ndarray.py
index ad1acaf9b5..13407b3750 100644
--- a/python/mxnet/ndarray/ndarray.py
+++ b/python/mxnet/ndarray/ndarray.py
@@ -27,13 +27,14 @@
 except ImportError:
     from builtins import slice as py_slice
 
+from array import array as native_array
 import ctypes
 import warnings
 import operator
 from functools import reduce # pylint: disable=redefined-builtin
 import numpy as np
 from ..base import _LIB, numeric_types, integer_types
-from ..base import c_array, mx_real_t
+from ..base import c_array, c_array_buf, c_handle_array, mx_real_t
 from ..base import mx_uint, NDArrayHandle, check_call
 from ..base import ctypes2buffer
 from ..context import Context
@@ -124,7 +125,7 @@ def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
     """
     hdl = NDArrayHandle()
     check_call(_LIB.MXNDArrayCreateEx(
-        c_array(mx_uint, shape),
+        c_array_buf(mx_uint, native_array('I', shape)),
         mx_uint(len(shape)),
         ctypes.c_int(ctx.device_typeid),
         ctypes.c_int(ctx.device_id),
@@ -736,7 +737,7 @@ def reshape(self, shape):
         # Actual reshape
         check_call(_LIB.MXNDArrayReshape(self.handle,
                                          len(shape),
-                                         c_array(ctypes.c_int, shape),
+                                         c_array_buf(ctypes.c_int, native_array('i', shape)),
                                          ctypes.byref(handle)))
         return NDArray(handle=handle, writable=self.writable)
 
@@ -1754,7 +1755,7 @@ def backward(self, out_grad=None, retain_graph=False, train_mode=True):
             ograd_handles = [out_grad.handle]
 
         check_call(_LIB.MXAutogradBackwardEx(
-            1, c_array(NDArrayHandle, [self.handle]),
+            1, c_handle_array([self]),
             c_array(NDArrayHandle, ograd_handles),
             0,
             ctypes.c_void_p(0),
diff --git a/python/mxnet/ndarray/sparse.py b/python/mxnet/ndarray/sparse.py
index 45a269a10d..ed644fddbb 100644
--- a/python/mxnet/ndarray/sparse.py
+++ b/python/mxnet/ndarray/sparse.py
@@ -30,6 +30,7 @@
 
 import ctypes
 import warnings
+from array import array as native_array
 
 __all__ = ["_ndarray_cls", "csr_matrix", "row_sparse_array",
            "BaseSparseNDArray", "CSRNDArray", "RowSparseNDArray"]
@@ -37,7 +38,7 @@
 import numpy as np
 from ..base import NotSupportedForSparseNDArray
 from ..base import _LIB, numeric_types
-from ..base import c_array, mx_real_t, integer_types
+from ..base import c_array_buf, mx_real_t, integer_types
 from ..base import mx_uint, NDArrayHandle, check_call
 from ..context import Context
 from . import _internal
@@ -86,16 +87,16 @@ def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shap
     num_aux = mx_uint(len(aux_types))
     check_call(_LIB.MXNDArrayCreateSparseEx(
         ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
-        c_array(mx_uint, shape),
+        c_array_buf(mx_uint, native_array('I', shape)),
         mx_uint(len(shape)),
         ctypes.c_int(ctx.device_typeid),
         ctypes.c_int(ctx.device_id),
         ctypes.c_int(int(delay_alloc)),
         ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
         num_aux,
-        c_array(ctypes.c_int, aux_type_ids),
-        c_array(mx_uint, aux_shape_lens),
-        c_array(mx_uint, aux_shapes),
+        c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
+        c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
+        c_array_buf(mx_uint, native_array('I', aux_shapes)),
         ctypes.byref(hdl)))
     return hdl
 
diff --git a/python/mxnet/ndarray/utils.py b/python/mxnet/ndarray/utils.py
index 6f3b0ff9c5..4f597c749f 100644
--- a/python/mxnet/ndarray/utils.py
+++ b/python/mxnet/ndarray/utils.py
@@ -19,7 +19,8 @@
 """Utility functions for NDArray and BaseSparseNDArray."""
 import ctypes
 
-from ..base import _LIB, check_call, py_str, c_str, string_types, mx_uint, NDArrayHandle, c_array
+from ..base import _LIB, check_call, py_str, c_str, string_types, mx_uint, NDArrayHandle
+from ..base import c_array, c_handle_array, c_str_array
 from .ndarray import NDArray
 from .ndarray import array as _array
 from .ndarray import empty as _empty_ndarray
@@ -212,27 +213,24 @@ def save(fname, data):
     """
     if isinstance(data, NDArray):
         data = [data]
-    handles = []
+        handles = c_array(NDArrayHandle, [])
     if isinstance(data, dict):
-        keys = []
-        for key, val in data.items():
-            if not isinstance(key, string_types):
-                raise TypeError('save only accept dict str->NDArray or list of NDArray')
-            if not isinstance(val, NDArray):
-                raise TypeError('save only accept dict str->NDArray or list of NDArray')
-            keys.append(c_str(key))
-            handles.append(val.handle)
-        keys = c_array(ctypes.c_char_p, keys)
+        str_keys = data.keys()
+        nd_vals = data.values()
+        if any(not isinstance(k, string_types) for k in str_keys) or \
+           any(not isinstance(v, NDArray) for v in nd_vals):
+            raise TypeError('save only accept dict str->NDArray or list of NDArray')
+        keys = c_str_array(str_keys)
+        handles = c_handle_array(nd_vals)
     elif isinstance(data, list):
-        for val in data:
-            if not isinstance(val, NDArray):
-                raise TypeError('save only accept dict str->NDArray or list of NDArray')
-            handles.append(val.handle)
+        if any(not isinstance(v, NDArray) for v in data):
+            raise TypeError('save only accept dict str->NDArray or list of NDArray')
         keys = None
+        handles = c_handle_array(data)
     else:
         raise ValueError("data needs to either be a NDArray, dict of str, NDArray pairs "
                          "or a list of NDarrays.")
     check_call(_LIB.MXNDArraySave(c_str(fname),
                                   mx_uint(len(handles)),
-                                  c_array(NDArrayHandle, handles),
+                                  handles,
                                   keys))
diff --git a/python/mxnet/operator.py b/python/mxnet/operator.py
index 1337bbccc3..141a33806a 100644
--- a/python/mxnet/operator.py
+++ b/python/mxnet/operator.py
@@ -22,12 +22,13 @@
 
 import traceback
 
+from array import array
 from threading import Lock
 from ctypes import CFUNCTYPE, POINTER, Structure, pointer
 from ctypes import c_void_p, c_int, c_char, c_char_p, cast, c_bool
 
-from .base import _LIB, check_call, MXCallbackList
-from .base import c_array, c_str, mx_uint, mx_float, ctypes2numpy_shared, NDArrayHandle, py_str
+from .base import _LIB, check_call, MXCallbackList, c_array, c_array_buf
+from .base import c_str, mx_uint, mx_float, ctypes2numpy_shared, NDArrayHandle, py_str
 from . import symbol, context
 from .ndarray import NDArray, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
 
@@ -206,7 +207,9 @@ def infer_shape_entry(num_tensor, tensor_dims,
             assert len(ishape) == n_in
             rshape = list(ishape) + list(oshape)
             for i in range(n_in+n_out):
-                tensor_shapes[i] = cast(c_array(mx_uint, rshape[i]), POINTER(mx_uint))
+                tensor_shapes[i] = cast(c_array_buf(mx_uint,
+                                                    array('I', rshape[i])),
+                                        POINTER(mx_uint))
                 tensor_dims[i] = len(rshape[i])
 
         def list_outputs_entry(out, _):
@@ -324,7 +327,9 @@ def infer_shape_entry(num_tensor, tensor_dims,
                 assert len(ishape) == n_in
                 rshape = list(ishape) + list(oshape)
                 for i in range(n_in+n_out):
-                    tensor_shapes[i] = cast(c_array(mx_uint, rshape[i]), POINTER(mx_uint))
+                    tensor_shapes[i] = cast(c_array_buf(mx_uint,
+                                                        array('I', rshape[i])),
+                                            POINTER(mx_uint))
                     tensor_dims[i] = len(rshape[i])
             except Exception:
                 print('Error in NDArrayOp.infer_shape: %s' % traceback.format_exc())
@@ -363,7 +368,7 @@ def declare_backward_dependency(out_grad, in_data, out_data, num_dep, deps, _):
                 out_data = [out_data[i] for i in range(len(self.list_outputs()))]
                 rdeps = self.declare_backward_dependency(out_grad, in_data, out_data)
                 num_dep[0] = len(rdeps)
-                rdeps = cast(c_array(c_int, rdeps), c_int_p)
+                rdeps = cast(c_array_buf(c_int, array('i', rdeps)), c_int_p)
                 deps[0] = rdeps
             except Exception:
                 print('Error in NDArrayOp.declare_backward_dependency: %s' % traceback.format_exc())
@@ -645,7 +650,9 @@ def infer_shape_entry(num_tensor, tensor_dims,
                         "shapes, got %d."%(n_aux, len(ashape))
                     rshape = list(ishape) + list(oshape) + list(ashape)
                     for i in range(n_in+n_out+n_aux):
-                        tensor_shapes[i] = cast(c_array(mx_uint, rshape[i]), POINTER(mx_uint))
+                        tensor_shapes[i] = cast(c_array_buf(mx_uint,
+                                                            array('I', rshape[i])),
+                                                POINTER(mx_uint))
                         tensor_dims[i] = len(rshape[i])
 
                     infer_shape_entry._ref_holder = [tensor_shapes]
@@ -741,7 +748,7 @@ def declare_backward_dependency_entry(out_grad, in_data, out_data, num_dep, deps
                     out_data = [out_data[i] for i in range(len(op_prop.list_outputs()))]
                     rdeps = op_prop.declare_backward_dependency(out_grad, in_data, out_data)
                     num_dep[0] = len(rdeps)
-                    rdeps = cast(c_array(c_int, rdeps), c_int_p)
+                    rdeps = cast(c_array_buf(c_int, array('i', rdeps)), c_int_p)
                     deps[0] = rdeps
 
                     declare_backward_dependency_entry._ref_holder = [deps]
diff --git a/python/mxnet/rtc.py b/python/mxnet/rtc.py
index aff4588be2..4dea0e656b 100644
--- a/python/mxnet/rtc.py
+++ b/python/mxnet/rtc.py
@@ -18,11 +18,12 @@
 """Interface to runtime cuda kernel compile module."""
 from __future__ import absolute_import
 
+from array import array
 import re
 import ctypes
 import numpy as np
 
-from .base import _LIB, mx_uint, c_array, check_call
+from .base import _LIB, mx_uint, c_array, c_array_buf, c_str_array, check_call
 from .base import c_str, CudaModuleHandle, CudaKernelHandle, numeric_types, string_types
 from .ndarray import _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP, NDArray
 
@@ -100,9 +101,9 @@ def __init__(self, source, options=(), exports=()):
         check_call(_LIB.MXRtcCudaModuleCreate(
             c_str(source),
             len(options),
-            c_array(ctypes.c_char_p, [c_str(opt) for opt in options]),
+            c_str_array(options),
             len(exports),
-            c_array(ctypes.c_char_p, [c_str(name) for name in exports]),
+            c_str_array(exports),
             ctypes.byref(self.handle)))
 
     def __del__(self):
@@ -162,9 +163,9 @@ def get_kernel(self, name, signature):
             self.handle,
             c_str(name),
             len(dtypes),
-            c_array(ctypes.c_int, [ctypes.c_int(i) for i in is_ndarray]),
-            c_array(ctypes.c_int, [ctypes.c_int(i) for i in is_const]),
-            c_array(ctypes.c_int, [ctypes.c_int(i) for i in dtypes]),
+            c_array_buf(ctypes.c_int, array('i', is_ndarray)),
+            c_array_buf(ctypes.c_int, array('i', is_const)),
+            c_array_buf(ctypes.c_int, array('i', dtypes)),
             ctypes.byref(hdl)))
 
         return CudaKernel(hdl, name, is_ndarray, dtypes)
diff --git a/python/mxnet/symbol/symbol.py b/python/mxnet/symbol/symbol.py
index 4713c1ee1c..e2cf0ecb68 100644
--- a/python/mxnet/symbol/symbol.py
+++ b/python/mxnet/symbol/symbol.py
@@ -25,6 +25,7 @@
 except ImportError:
     from builtins import slice as py_slice
 
+from array import array
 import ctypes
 import warnings
 from numbers import Number
@@ -32,8 +33,8 @@
 import numpy as _numpy
 
 from ..attribute import AttrScope
-from ..base import _LIB, numeric_types
-from ..base import c_array, c_str, mx_uint, py_str, string_types
+from ..base import _LIB, numeric_types, c_array, c_array_buf, c_str, c_str_array, c_handle_array
+from ..base import mx_uint, py_str, string_types
 from ..base import NDArrayHandle, ExecutorHandle, SymbolHandle
 from ..base import check_call, MXNetError, NotImplementedForSymbol
 from ..context import Context
@@ -463,11 +464,11 @@ def _compose(self, *args, **kwargs):
 
         num_args = len(args) + len(kwargs)
         if len(kwargs) != 0:
-            keys = c_array(ctypes.c_char_p, [c_str(key) for key in kwargs])
-            args = c_array(SymbolHandle, [s.handle for s in kwargs.values()])
+            keys = c_str_array(kwargs.keys())
+            args = c_handle_array(kwargs.values())
         else:
             keys = None
-            args = c_array(SymbolHandle, [s.handle for s in args])
+            args = c_handle_array(args)
         check_call(_LIB.MXSymbolCompose(
             self.handle, name, num_args, keys, args))
 
@@ -856,7 +857,7 @@ def infer_type(self, *args, **kwargs):
                     types either by positional or kwargs way.')
         sdata = []
         if len(args) != 0:
-            keys = None
+            keys = c_array(ctypes.c_char_p, [])
             for s in args:
                 if s is not None:
                     s = _numpy.dtype(s).type
@@ -866,12 +867,13 @@ def infer_type(self, *args, **kwargs):
                 else:
                     sdata.append(-1)
         else:
-            keys = []
+            str_keys = []
             for k, v in kwargs.items():
                 v = _numpy.dtype(v).type
                 if v in _DTYPE_NP_TO_MX:
-                    keys.append(c_str(k))
+                    str_keys.append(k)
                     sdata.append(_DTYPE_NP_TO_MX[v])
+            keys = c_str_array(str_keys)
         arg_type_size = mx_uint()
         arg_type_data = ctypes.POINTER(ctypes.c_int)()
         out_type_size = mx_uint()
@@ -882,8 +884,8 @@ def infer_type(self, *args, **kwargs):
         check_call(_LIB.MXSymbolInferType(
             self.handle,
             mx_uint(len(sdata)),
-            c_array(ctypes.c_char_p, keys),
-            c_array(ctypes.c_int, sdata),
+            keys,
+            c_array_buf(ctypes.c_int, array('i', sdata)),
             ctypes.byref(arg_type_size),
             ctypes.byref(arg_type_data),
             ctypes.byref(out_type_size),
@@ -1043,7 +1045,7 @@ def _infer_shape_impl(self, partial, *args, **kwargs):
         sdata = []
         indptr = [0]
         if len(args) != 0:
-            keys = None
+            keys = c_array(ctypes.c_char_p, [])
             for i, s in enumerate(args):
                 if s is not None:
                     if not isinstance(s, tuple):
@@ -1052,14 +1054,15 @@ def _infer_shape_impl(self, partial, *args, **kwargs):
                     sdata.extend(s)
                 indptr.append(len(sdata))
         else:
-            keys = []
+            str_keys = []
             for k, v in kwargs.items():
                 if not isinstance(v, tuple):
                     raise TypeError("Arguments need to be shapes (tuple), "
                                     "but '%s' is %s." % (k, type(v)))
-                keys.append(c_str(k))
+                str_keys.append(k)
                 sdata.extend(v)
                 indptr.append(len(sdata))
+            keys = c_str_array(str_keys)
         arg_shape_size = mx_uint()
         arg_shape_ndim = ctypes.POINTER(mx_uint)()
         arg_shape_data = ctypes.POINTER(ctypes.POINTER(mx_uint))()
@@ -1077,9 +1080,9 @@ def _infer_shape_impl(self, partial, *args, **kwargs):
         check_call(infer_func(
             self.handle,
             mx_uint(len(indptr) - 1),
-            c_array(ctypes.c_char_p, keys),
-            c_array(mx_uint, indptr),
-            c_array(mx_uint, sdata),
+            keys,
+            c_array_buf(mx_uint, array('I', indptr)),
+            c_array_buf(mx_uint, array('I', sdata)),
             ctypes.byref(arg_shape_size),
             ctypes.byref(arg_shape_ndim),
             ctypes.byref(arg_shape_data),
@@ -1330,11 +1333,11 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
             for k, v in type_dict.items():
                 v = _numpy.dtype(v).type
                 if v in _DTYPE_NP_TO_MX:
-                    provided_arg_type_names.append(c_str(k))
-                    provided_arg_type_data.append(ctypes.c_int(_DTYPE_NP_TO_MX[v]))
+                    provided_arg_type_names.append(k)
+                    provided_arg_type_data.append(_DTYPE_NP_TO_MX[v])
             num_provided_arg_types = mx_uint(len(provided_arg_type_names))
-            provided_arg_type_names = c_array(ctypes.c_char_p, provided_arg_type_names)
-            provided_arg_type_data = c_array(ctypes.c_int, provided_arg_type_data)
+            provided_arg_type_names = c_str_array(provided_arg_type_names)
+            provided_arg_type_data = c_array_buf(ctypes.c_int, array('i', provided_arg_type_data))
 
         # storage types
         num_provided_arg_stypes = 0
@@ -1346,11 +1349,11 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
             provided_arg_stype_data = []
             for k, v in stype_dict.items():
                 if v in _STORAGE_TYPE_STR_TO_ID:
-                    provided_arg_stype_names.append(c_str(k))
-                    provided_arg_stype_data.append(ctypes.c_int(_STORAGE_TYPE_STR_TO_ID[v]))
+                    provided_arg_stype_names.append(k)
+                    provided_arg_stype_data.append(_STORAGE_TYPE_STR_TO_ID[v])
             num_provided_arg_stypes = mx_uint(len(provided_arg_stype_names))
-            provided_arg_stype_names = c_array(ctypes.c_char_p, provided_arg_stype_names)
-            provided_arg_stype_data = c_array(ctypes.c_int, provided_arg_stype_data)
+            provided_arg_stype_names = c_str_array(provided_arg_stype_names)
+            provided_arg_stype_data = c_array_buf(ctypes.c_int, array('i', provided_arg_stype_data))
 
         provided_arg_shape_data = []  # shape data
         # argument shape index in sdata,
@@ -1361,7 +1364,7 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
             # if k not in listed_arguments and k not in listed_aux_states:
             #   raise ValueError('arg name %s is not valid', k)
             if isinstance(v, tuple):
-                provided_arg_shape_names.append(c_str(k))
+                provided_arg_shape_names.append(k)
                 provided_arg_shape_data.extend(v)
                 provided_arg_shape_idx.append(len(provided_arg_shape_data))
 
@@ -1372,11 +1375,11 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
             if isinstance(grad_req, string_types):
                 # use provided_req_type_list_len = 0 to indicate this situation
                 provided_req_type_list_len = 0
-                provided_grad_req_types = [c_str(grad_req)]
+                provided_grad_req_types = [grad_req]
             elif isinstance(grad_req, list):
                 if len(grad_req) == 0:
                     raise RuntimeError('grad_req in simple_bind cannot be an empty list')
-                provided_grad_req_types = [c_str(item) for item in grad_req]
+                provided_grad_req_types = grad_req
                 provided_req_type_list_len = len(provided_grad_req_types)
             elif isinstance(grad_req, dict):
                 if len(grad_req) == 0:
@@ -1384,11 +1387,11 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
                 provided_grad_req_names = []
                 provided_grad_req_types = []
                 for k, v in grad_req.items():
-                    provided_grad_req_names.append(c_str(k))
-                    provided_grad_req_types.append(c_str(v))
-                provided_grad_req_names = c_array(ctypes.c_char_p, provided_grad_req_names)
+                    provided_grad_req_names.append(k)
+                    provided_grad_req_types.append(v)
+                provided_grad_req_names = c_str_array(provided_grad_req_names)
                 provided_req_type_list_len = len(provided_grad_req_types)
-            provided_grad_req_types = c_array(ctypes.c_char_p, provided_grad_req_types)
+            provided_grad_req_types = c_str_array(provided_grad_req_types)
 
         num_ctx_map_keys = mx_uint(0)
         ctx_map_keys = ctypes.POINTER(ctypes.c_char_p)()
@@ -1399,20 +1402,20 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
             ctx_map_dev_types = []
             ctx_map_dev_ids = []
             for key, val in group2ctx.items():
-                ctx_map_keys.append(c_str(key))
-                ctx_map_dev_types.append(ctypes.c_int(val.device_typeid))
-                ctx_map_dev_ids.append(ctypes.c_int(val.device_id))
+                ctx_map_keys.append(key)
+                ctx_map_dev_types.append(val.device_typeid)
+                ctx_map_dev_ids.append(val.device_id)
             num_ctx_map_keys = mx_uint(len(ctx_map_keys))
-            ctx_map_keys = c_array(ctypes.c_char_p, ctx_map_keys)
-            ctx_map_dev_types = c_array(ctypes.c_int, ctx_map_dev_types)
-            ctx_map_dev_ids = c_array(ctypes.c_int, ctx_map_dev_ids)
+            ctx_map_keys = c_str_array(ctx_map_keys)
+            ctx_map_dev_types = c_array(ctypes.c_int, array('i', ctx_map_dev_types))
+            ctx_map_dev_ids = c_array(ctypes.c_int, array('i', ctx_map_dev_ids))
 
         # prepare param names
         shared_arg_name_list = []
         if shared_arg_names is not None:
             if not isinstance(shared_arg_names, list):
                 raise ValueError('shared_arg_names in simple_bind must be a list or None')
-            shared_arg_name_list = [c_str(name) for name in shared_arg_names]
+            shared_arg_name_list = shared_arg_names
 
         # prepare shared_buffer
         if shared_buffer is None:
@@ -1422,16 +1425,14 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
         else:
             if not isinstance(shared_buffer, dict):
                 raise ValueError('shared_buffer in simple_bind must be dict or None')
-            shared_buffer_names = []
-            shared_buffer_handles = []
-            for k, v in shared_buffer.items():
+            buffer_names = shared_buffer.keys()
+            buffer_arrays = shared_buffer.values()
+            for v in buffer_arrays:
                 assert(v.stype == 'default'), \
                     "shared_buffer is expected to only contain NDArrays with default storage"
-                shared_buffer_names.append(c_str(k))
-                shared_buffer_handles.append(v.handle)
-            shared_buffer_names = c_array(ctypes.c_char_p, shared_buffer_names)
-            shared_buffer_len = ctypes.c_int(len(shared_buffer_handles))
-            shared_buffer_handles = c_array(NDArrayHandle, shared_buffer_handles)
+            shared_buffer_names = c_str_array(buffer_names)
+            shared_buffer_len = ctypes.c_int(len(buffer_arrays))
+            shared_buffer_handles = c_handle_array(buffer_arrays)
         updated_shared_buffer_names = ctypes.POINTER(ctypes.c_char_p)()
         updated_shared_buffer_handles = ctypes.POINTER(NDArrayHandle)()
 
@@ -1460,9 +1461,11 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
                                                  provided_grad_req_names,
                                                  provided_grad_req_types,
                                                  mx_uint(len(provided_arg_shape_names)),
-                                                 c_array(ctypes.c_char_p, provided_arg_shape_names),
-                                                 c_array(mx_uint, provided_arg_shape_data),
-                                                 c_array(mx_uint, provided_arg_shape_idx),
+                                                 c_str_array(provided_arg_shape_names),
+                                                 c_array_buf(mx_uint,
+                                                             array('I', provided_arg_shape_data)),
+                                                 c_array_buf(mx_uint,
+                                                             array('I', provided_arg_shape_idx)),
                                                  num_provided_arg_types,
                                                  provided_arg_type_names,
                                                  provided_arg_type_data,
@@ -1470,7 +1473,7 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
                                                  provided_arg_stype_names,
                                                  provided_arg_stype_data,
                                                  mx_uint(len(shared_arg_name_list)),
-                                                 c_array(ctypes.c_char_p, shared_arg_name_list),
+                                                 c_str_array(shared_arg_name_list),
                                                  ctypes.byref(shared_buffer_len),
                                                  shared_buffer_names,
                                                  shared_buffer_handles,
@@ -1623,19 +1626,19 @@ def bind(self, ctx, args, args_grad=None, grad_req='write',
         if isinstance(grad_req, string_types):
             if grad_req not in _GRAD_REQ_MAP:
                 raise ValueError('grad_req must be in %s' % str(_GRAD_REQ_MAP))
-            reqs_array = c_array(
-                mx_uint,
-                [mx_uint(_GRAD_REQ_MAP[grad_req])] * len(listed_arguments))
+            reqs_array = c_array_buf(mx_uint,
+                                     array('I', [_GRAD_REQ_MAP[grad_req]] * len(listed_arguments)))
         elif isinstance(grad_req, list):
-            reqs_array = c_array(mx_uint, [mx_uint(_GRAD_REQ_MAP[item]) for item in grad_req])
+            reqs_array = c_array_buf(mx_uint,
+                                     array('I', [_GRAD_REQ_MAP[item] for item in grad_req]))
         elif isinstance(grad_req, dict):
             req_array = []
             for name in listed_arguments:
                 if name in grad_req:
-                    req_array.append(mx_uint(_GRAD_REQ_MAP[grad_req[name]]))
+                    req_array.append(_GRAD_REQ_MAP[grad_req[name]])
                 else:
-                    req_array.append(mx_uint(0))
-            reqs_array = c_array(mx_uint, req_array)
+                    req_array.append(0)
+            reqs_array = c_array_buf(mx_uint, array('I', req_array))
 
         ctx_map_keys = []
         ctx_map_dev_types = []
@@ -1643,9 +1646,9 @@ def bind(self, ctx, args, args_grad=None, grad_req='write',
 
         if group2ctx:
             for key, val in group2ctx.items():
-                ctx_map_keys.append(c_str(key))
-                ctx_map_dev_types.append(ctypes.c_int(val.device_typeid))
-                ctx_map_dev_ids.append(ctypes.c_int(val.device_id))
+                ctx_map_keys.append(key)
+                ctx_map_dev_types.append(val.device_typeid)
+                ctx_map_dev_ids.append(val.device_id)
 
         handle = ExecutorHandle()
         shared_handle = shared_exec.handle if shared_exec is not None else ExecutorHandle()
@@ -1653,9 +1656,9 @@ def bind(self, ctx, args, args_grad=None, grad_req='write',
                                          ctypes.c_int(ctx.device_typeid),
                                          ctypes.c_int(ctx.device_id),
                                          mx_uint(len(ctx_map_keys)),
-                                         c_array(ctypes.c_char_p, ctx_map_keys),
-                                         c_array(ctypes.c_int, ctx_map_dev_types),
-                                         c_array(ctypes.c_int, ctx_map_dev_ids),
+                                         c_str_array(ctx_map_keys),
+                                         c_array_buf(ctypes.c_int, array('i', ctx_map_dev_types)),
+                                         c_array_buf(ctypes.c_int, array('i', ctx_map_dev_ids)),
                                          mx_uint(len(args)),
                                          args_handle,
                                          args_grad_handle,
@@ -1688,7 +1691,7 @@ def gradient(self, wrt):
             A gradient Symbol with returns to be the corresponding gradients.
         """
         handle = SymbolHandle()
-        c_wrt = c_array(ctypes.c_char_p, [c_str(key) for key in wrt])
+        c_wrt = c_str_array(wrt)
         check_call(_LIB.MXSymbolGrad(self.handle,
                                      mx_uint(len(wrt)),
                                      c_wrt,
@@ -2450,15 +2453,12 @@ def Group(symbols):
     sym : Symbol
         A group symbol.
      """
-    ihandles = []
-    for sym in symbols:
-        if not isinstance(sym, Symbol):
-            raise TypeError('Expected a list of symbols as input')
-        ihandles.append(sym.handle)
+    if any(not isinstance(sym, Symbol) for sym in symbols):
+        raise TypeError('Expected a list of symbols as input')
     handle = SymbolHandle()
     check_call(_LIB.MXSymbolCreateGroup(
-        mx_uint(len(ihandles)),
-        c_array(SymbolHandle, ihandles), ctypes.byref(handle)))
+        mx_uint(len(symbols)),
+        c_handle_array(symbols), ctypes.byref(handle)))
     return Symbol(handle)
 
 
diff --git a/python/mxnet/torch.py b/python/mxnet/torch.py
index b7fce6d5c8..fc815b1469 100644
--- a/python/mxnet/torch.py
+++ b/python/mxnet/torch.py
@@ -23,8 +23,8 @@
 import ctypes
 import sys
 from .base import _LIB
-from .base import c_array, py_str, build_param_doc as _build_param_doc
-from .base import mx_uint, mx_float, NDArrayHandle, FunctionHandle
+from .base import c_array, c_str_array, c_handle_array, py_str, build_param_doc as _build_param_doc
+from .base import mx_uint, mx_float, FunctionHandle
 from .base import check_call
 from .ndarray import NDArray, _new_empty_handle
 
@@ -144,12 +144,12 @@ def generic_torch_function(*args, **kwargs):
 
         check_call(_LIB.MXFuncInvokeEx( \
                    handle, \
-                   c_array(NDArrayHandle, [x.handle for x in ndargs[n_mutate_vars:]]), \
+                   c_handle_array(ndargs[n_mutate_vars:]), \
                    c_array(mx_float, []), \
-                   c_array(NDArrayHandle, [x.handle for x in ndargs[:n_mutate_vars]]),
+                   c_handle_array(ndargs[:n_mutate_vars]),
                    ctypes.c_int(len(kwargs)),
-                   c_array(ctypes.c_char_p, kwargs.keys()),
-                   c_array(ctypes.c_char_p, kwargs.values()),))
+                   c_str_array(kwargs.keys()),
+                   c_str_array(kwargs.values())))
         if n_mutate_vars == 1:
             return ndargs[0]
         else:


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services