You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@arrow.apache.org by "Wes McKinney (JIRA)" <ji...@apache.org> on 2019/07/22 17:44:00 UTC

[jira] [Commented] (ARROW-6001) Add from_pydict(), from_pylist() and to_pylist() to pyarrow.Table + improve pandas.to_dict()

    [ https://issues.apache.org/jira/browse/ARROW-6001?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16890340#comment-16890340 ] 

Wes McKinney commented on ARROW-6001:
-------------------------------------

Hm, so we have {{Table.from_pydict}} 

https://github.com/apache/arrow/blob/master/python/pyarrow/table.pxi#L1021

I think {{Table.from_arrays}} could be improved to accept other Python sequences. Can we split up this ticket into the different improvements you're proposing (and clarify how what you're saying is different from the existing {{Table.from_pydict}} function)?

> Add from_pydict(), from_pylist() and to_pylist() to pyarrow.Table + improve pandas.to_dict()
> --------------------------------------------------------------------------------------------
>
>                 Key: ARROW-6001
>                 URL: https://issues.apache.org/jira/browse/ARROW-6001
>             Project: Apache Arrow
>          Issue Type: Improvement
>            Reporter: David Lee
>            Priority: Minor
>
> I noticed that pyarrow.Table.to_pydict() exists, but there is no pyarrow.Table.from_pydict() doesn't exist. There is a proposed ticket to create one, but it doesn't take into account potential mismatches between column order and number of columns.
> I'm attached some code I've written which I've been using to handle arrow to ordered dictionaries and arrow to lists of dictionaries.. I've also included an example where this can be used to speed up pandas.to_dict() by a factor of 20x.
>  
> {code:java}
> def from_pylist(pylist, names=None, schema=None, safe=True):
>     """
>     Converts a python list of dictionaries to a pyarrow table
>     :param pylist: pylist list of dictionaries
>     :param names: list of column names
>     :param schema: pyarrow schema
>     :param safe: True or False
>     :return: arrow table
>     """
>     arrow_columns = list()
>     if schema:
>         for column in schema.names:
>             arrow_columns.append(pa.array([v[column] if column in v else None for v in pylist], safe=safe, type=schema.types[schema.get_field_index(column)]))
>         arrow_table = pa.Table.from_arrays(arrow_columns, schema.names)
>     else:
>         for column in names:
>             arrow_columns.append(pa.array([v[column] if column in v else None for v in pylist], safe=safe))
>         arrow_table = pa.Table.from_arrays(arrow_columns, names)
>     return arrow_table
> def to_pylist(arrow_table, index_columns=None):
>     """
>     Converts a pyarrow table to a python list of dictionaries
>     :param arrow_table: arrow table
>     :param index_columns: columns to index
>     :return: python list of dictionaries
>     """
>     pydict = arrow_table.to_pydict()
>     if index_columns:
>         columns = arrow_table.schema.names
>         columns.append("_index")
>         pylist = [{column: tuple([pydict[index_column][row] for index_column in index_columns]) if column == '_index' else pydict[column][row] for column in columns} for row in range(arrow_table.num_rows)]
>     else:
>         pylist = [{column: pydict[column][row] for column in arrow_table.schema.names} for row in range(arrow_table.num_rows)]
>     return pylist
> def from_pydict(pydict, names=None, schema=None, safe=True):
>     """
>     Converts a pyarrow table to a python ordered dictionary
>     :param pydict: ordered dictionary
>     :param names: list of column names
>     :param schema: pyarrow schema
>     :param safe: True or False
>     :return: arrow table
>     """
>     arrow_columns = list()
>     dict_columns = list(pydict.keys())
>     if schema:
>         for column in schema.names:
>             if column in pydict:
>                 arrow_columns.append(pa.array(pydict[column], safe=safe, type=schema.types[schema.get_field_index(column)]))
>             else:
>                 arrow_columns.append(pa.array([None] * len(pydict[dict_columns[0]]), safe=safe, type=schema.types[schema.get_field_index(column)]))
>         arrow_table = pa.Table.from_arrays(arrow_columns, schema.names)
>     else:
>         if not names:
>             names = dict_columns
>         for column in names:
>             if column in dict_columns:
>                 arrow_columns.append(pa.array(pydict[column], safe=safe))
>             else:
>                 arrow_columns.append(pa.array([None] * len(pydict[dict_columns[0]]), safe=safe))
>         arrow_table = pa.Table.from_arrays(arrow_columns, names)
>     return arrow_table
> def get_indexed_values(arrow_table, index_columns):
>     """
>     returns back a set of unique values for a list of columns.
>     :param arrow_table: arrow_table
>     :param index_columns: list of column names
>     :return: set of tuples
>     """
>     pydict = arrow_table.to_pydict()
>     index_set = set([tuple([pydict[index_column][row] for index_column in index_columns]) for row in range(arrow_table.num_rows)])
>     return index_set
> {code}
> Here are my benchmarks using pandas to arrow to python vs of pandas.to_dict()
>  
> {code:java}
> # benchmark panda conversion to python objects.
> start_time = time.time()
> python_df1 = panda_df1.to_dict(orient='records')
> total_time = time.time() - start_time
> print("pandas to python - 1 million rows - " + str(total_time))
> start_time = time.time()
> python_df4 = panda_df4.to_dict(orient='records')
> total_time = time.time() - start_time
> print("pandas to python - 4 million rows - " + str(total_time))
> start_time = time.time()
> arrow_df1 = pa.Table.from_pandas(panda_df1)
> pydict = arrow_df1.to_pydict()
> python_df1 = [{column: pydict[column][row] for column in arrow_df1.schema.names} for row in range(arrow_df1.num_rows)]
> total_time = time.time() - start_time
> print("pandas to arrow to python - 1 million rows - " + str(total_time))
> start_time = time.time()
> arrow_df4 = pa.Table.from_pandas(panda_df1)
> pydict = arrow_df4.to_pydict()
> python_df4 = [{column: pydict[column][row] for column in arrow_df1.schema.names} for row in range(arrow_df1.num_rows)]
> total_time = time.time() - start_time
> print("pandas to arrow to python - 4 million rows - " + str(total_time))
> {code}
>  
>  
> {code:java}
> pandas to python - 1 million rows - 13.03793740272522
> pandas to python - 4 million rows - 51.73868107795715
> pandas to arrow to python - 1 million rows - 2.2239842414855957
> pandas to arrow to python - 4 million rows - 2.6550424098968506{code}



--
This message was sent by Atlassian JIRA
(v7.6.14#76016)