You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@arrow.apache.org by uw...@apache.org on 2019/02/20 14:20:43 UTC

[arrow] branch master updated: ARROW-4629: [Python] Pandas arrow conversion slowed down by imports

This is an automated email from the ASF dual-hosted git repository.

uwe pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/arrow.git


The following commit(s) were added to refs/heads/master by this push:
     new 957fe15  ARROW-4629: [Python] Pandas arrow conversion slowed down by imports
957fe15 is described below

commit 957fe15bb3dafadda4d0a6f9359e4f40107440bb
Author: fjetter <fj...@users.noreply.github.com>
AuthorDate: Wed Feb 20 15:20:35 2019 +0100

    ARROW-4629: [Python] Pandas arrow conversion slowed down by imports
    
    The local imports slow down the conversion from pandas to arrow significantly (see [here](https://issues.apache.org/jira/browse/ARROW-4629))
    
    Author: fjetter <fj...@users.noreply.github.com>
    Author: Uwe L. Korn <xh...@users.noreply.github.com>
    
    Closes #3706 from fjetter/local_imports and squashes the following commits:
    
    eb5c8bad <Uwe L. Korn> Apply suggestions from code review
    b4604bec <fjetter> Only import pandas_compat if pandas is available
    f1c8b401 <fjetter> Don't use local imports
---
 python/pyarrow/array.pxi        | 11 ++++++++---
 python/pyarrow/pandas_compat.py |  6 +-----
 2 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/python/pyarrow/array.pxi b/python/pyarrow/array.pxi
index cc77798..324c140 100644
--- a/python/pyarrow/array.pxi
+++ b/python/pyarrow/array.pxi
@@ -15,6 +15,11 @@
 # specific language governing permissions and limitations
 # under the License.
 
+from pyarrow.compat import HAVE_PANDAS
+
+if HAVE_PANDAS:
+    import pyarrow.pandas_compat as pdcompat
+
 
 cdef _sequence_to_array(object sequence, object mask, object size,
                         DataType type, CMemoryPool* pool, c_bool from_pandas):
@@ -165,9 +170,9 @@ def array(object obj, type=None, mask=None, size=None, bint from_pandas=False,
                 from_pandas=True, safe=safe,
                 memory_pool=memory_pool)
         else:
-            import pyarrow.pandas_compat as pdcompat
-            values, type = pdcompat.get_datetimetz_type(values, obj.dtype,
-                                                        type)
+            if HAVE_PANDAS:
+                values, type = pdcompat.get_datetimetz_type(
+                    values, obj.dtype, type)
             return _ndarray_to_array(values, mask, type, from_pandas, safe,
                                      pool)
     else:
diff --git a/python/pyarrow/pandas_compat.py b/python/pyarrow/pandas_compat.py
index 403f15d..86e826f 100644
--- a/python/pyarrow/pandas_compat.py
+++ b/python/pyarrow/pandas_compat.py
@@ -28,7 +28,7 @@ import pandas as pd
 import six
 
 import pyarrow as pa
-from pyarrow.compat import builtin_pickle, PY2, zip_longest  # noqa
+from pyarrow.compat import builtin_pickle, DatetimeTZDtype, PY2, zip_longest  # noqa
 
 
 def infer_dtype(column):
@@ -447,8 +447,6 @@ def dataframe_to_arrays(df, schema, preserve_index, nthreads=1, columns=None,
 
 
 def get_datetimetz_type(values, dtype, type_):
-    from pyarrow.compat import DatetimeTZDtype
-
     if values.dtype.type != np.datetime64:
         return values, type_
 
@@ -543,7 +541,6 @@ def _reconstruct_block(item):
 
 
 def _make_datetimetz(tz):
-    from pyarrow.compat import DatetimeTZDtype
     tz = pa.lib.string_to_tzinfo(tz)
     return DatetimeTZDtype('ns', tz=tz)
 
@@ -554,7 +551,6 @@ def _make_datetimetz(tz):
 
 def table_to_blockmanager(options, table, categories=None,
                           ignore_metadata=False):
-    from pyarrow.compat import DatetimeTZDtype
 
     index_columns = []
     columns = []