You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@arrow.apache.org by "Licht Takeuchi (JIRA)" <ji...@apache.org> on 2017/11/26 12:39:00 UTC

[jira] [Comment Edited] (ARROW-1436) PyArrow Timestamps written to Parquet as INT96 appear in Spark as 'bigint'

    [ https://issues.apache.org/jira/browse/ARROW-1436?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16265989#comment-16265989 ] 

Licht Takeuchi edited comment on ARROW-1436 at 11/26/17 12:38 PM:
------------------------------------------------------------------

[~wesmckinn] This seems already fixed.

* Python code:
{code:java}
import pandas as pd
import pyarrow as pa
from pyarrow import parquet as pq
import numpy as np

t = pa.timestamp('ns')
start = pd.Timestamp('2001-01-01').value
data = np.array([start, start + 1000, start + 2000], dtype='int64')
a = pa.array(data, type=t)

table = pa.Table.from_arrays([a], ['ts'])

pq.write_table(table, 'test-1.parquet', use_deprecated_int96_timestamps=True)
pq.write_table(table, 'test-2.parquet', use_deprecated_int96_timestamps=False)
{code}

* Spark code:
{code:java}
import org.apache.parquet.hadoop.ParquetFileReader
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.conf.Configuration
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
import sqlContext.implicits._
val conf = sc.hadoopConfiguration
val fs = FileSystem.get(conf)


// int96 timestamp case
ParquetFileReader.readAllFootersInParallel(conf, fs.getFileStatus(new Path("test-1.parquet")))
var df = sqlContext.read.parquet("../../../arrow/python/test-1.parquet")
df.take(3)


// int64 timestamp case
ParquetFileReader.readAllFootersInParallel(conf, fs.getFileStatus(new Path("test-2.parquet")))
var df = sqlContext.read.parquet("../../../arrow/python/test-2.parquet")
df.take(3)
{code}
* Spark output
{code:java}
scala> // int96 timestamp case

scala> ParquetFileReader.readAllFootersInParallel(conf, fs.getFileStatus(new Path("test-1.parquet")))
res12: java.util.List[org.apache.parquet.hadoop.Footer] =
[Footer{file:/Users/rito/GitHub/arrow/python/test-1.parquet, ParquetMetaData{FileMetaData{schema: message schema {
  optional int96 ts;
}
, metadata: {}}, blocks: [BlockMetaData{3, 104 [ColumnMetaData{SNAPPY [ts] INT96  [PLAIN_DICTIONARY, RLE, PLAIN], 47}]}]}}]

scala> var df = sqlContext.read.parquet("test-1.parquet")
df: org.apache.spark.sql.DataFrame = [ts: timestamp]

scala> df.take(3)
res13: Array[org.apache.spark.sql.Row] = Array([2001-01-01 09:00:00.0], [2001-01-01 09:00:00.000001], [2001-01-01 09:00:00.000002])

scala>

scala>

scala> // int64 timestamp case

scala> ParquetFileReader.readAllFootersInParallel(conf, fs.getFileStatus(new Path("test-2.parquet")))
res14: java.util.List[org.apache.parquet.hadoop.Footer] =
[Footer{file:/Users/rito/GitHub/arrow/python/test-2.parquet, ParquetMetaData{FileMetaData{schema: message schema {
  optional int64 ts (TIMESTAMP_MICROS);
}
, metadata: {}}, blocks: [BlockMetaData{3, 93 [ColumnMetaData{SNAPPY [ts] INT64  [PLAIN_DICTIONARY, RLE, PLAIN], 44}]}]}}]

scala> var df = sqlContext.read.parquet("test-2.parquet")
df: org.apache.spark.sql.DataFrame = [ts: timestamp]

scala> df.take(3)
res15: Array[org.apache.spark.sql.Row] = Array([2001-01-01 09:00:00.0], [2001-01-01 09:00:00.000001], [2001-01-01 09:00:00.000002])
{code}




was (Author: licht-t):
[~wesmckinn] This seems already fixed.

* Python code:
{code:java}
import pandas as pd
import pyarrow as pa
from pyarrow import parquet as pq
import numpy as np

t = pa.timestamp('ns')
start = pd.Timestamp('2001-01-01').value
data = np.array([start, start + 1000, start + 2000], dtype='int64')
a = pa.array(data, type=t)

table = pa.Table.from_arrays([a], ['ts'])

pq.write_table(table, 'test-1.parquet', use_deprecated_int96_timestamps=True)
pq.write_table(table, 'test-2.parquet', use_deprecated_int96_timestamps=False)
{code}

* Spark code:

{code:java}
import org.apache.parquet.hadoop.ParquetFileReader
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.conf.Configuration
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
import sqlContext.implicits._
val conf = sc.hadoopConfiguration
val fs = FileSystem.get(conf)


// int96 timestamp case
ParquetFileReader.readAllFootersInParallel(conf, fs.getFileStatus(new Path("test-1.parquet")))
var df = sqlContext.read.parquet("../../../arrow/python/test-1.parquet")
df.take(3)


// int64 timestamp case
ParquetFileReader.readAllFootersInParallel(conf, fs.getFileStatus(new Path("test-2.parquet")))
var df = sqlContext.read.parquet("../../../arrow/python/test-2.parquet")
df.take(3)

{code}


> PyArrow Timestamps written to Parquet as INT96 appear in Spark as 'bigint'
> --------------------------------------------------------------------------
>
>                 Key: ARROW-1436
>                 URL: https://issues.apache.org/jira/browse/ARROW-1436
>             Project: Apache Arrow
>          Issue Type: Bug
>          Components: Format, Python
>    Affects Versions: 0.6.0
>            Reporter: Lucas Pickup
>            Assignee: Licht Takeuchi
>             Fix For: 0.8.0
>
>
> When using the 'use_deprecated_int96_timestamps' option to write Parquet files compatible with Spark <2.2.0 (which doesn't support INT64 backed Timestamps) Spark identifies the Timestamp columns as BigInts. Some metadata may be missing.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)