You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by da...@apache.org on 2016/09/06 17:48:57 UTC

spark git commit: [SPARK-16334] [BACKPORT] Reusing same dictionary column for decoding consecutive row groups shouldn't throw an error

Repository: spark
Updated Branches:
  refs/heads/branch-2.0 95e44dca1 -> 534380484


[SPARK-16334] [BACKPORT] Reusing same dictionary column for decoding consecutive row groups shouldn't throw an error

## What changes were proposed in this pull request?

Backports https://github.com/apache/spark/pull/14941 in 2.0.

This patch fixes a bug in the vectorized parquet reader that's caused by re-using the same dictionary column vector while reading consecutive row groups. Specifically, this issue manifests for a certain distribution of dictionary/plain encoded data while we read/populate the underlying bit packed dictionary data into a column-vector based data structure.

Manually tested on datasets provided by the community. Thanks to Chris Perluss and Keith Kraus for their invaluable help in tracking down this issue!

Author: Sameer Agarwal <sameeragcs.berkeley.edu>

Closes #14941 from sameeragarwal/parquet-exception-2.

Author: Sameer Agarwal <sa...@cs.berkeley.edu>

Closes #14944 from sameeragarwal/branch-2.0.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/53438048
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/53438048
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/53438048

Branch: refs/heads/branch-2.0
Commit: 534380484ac5f56bd3e14a8917a24ca6cccf198f
Parents: 95e44dc
Author: Sameer Agarwal <sa...@cs.berkeley.edu>
Authored: Tue Sep 6 10:48:53 2016 -0700
Committer: Davies Liu <da...@gmail.com>
Committed: Tue Sep 6 10:48:53 2016 -0700

----------------------------------------------------------------------
 .../parquet/VectorizedColumnReader.java         | 54 ++++++++++++++------
 1 file changed, 38 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/53438048/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java
----------------------------------------------------------------------
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java
index 6c47dc0..3141edd 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java
@@ -221,15 +221,21 @@ public class VectorizedColumnReader {
         if (column.dataType() == DataTypes.IntegerType ||
             DecimalType.is32BitDecimalType(column.dataType())) {
           for (int i = rowId; i < rowId + num; ++i) {
-            column.putInt(i, dictionary.decodeToInt(dictionaryIds.getInt(i)));
+            if (!column.isNullAt(i)) {
+              column.putInt(i, dictionary.decodeToInt(dictionaryIds.getInt(i)));
+            }
           }
         } else if (column.dataType() == DataTypes.ByteType) {
           for (int i = rowId; i < rowId + num; ++i) {
-            column.putByte(i, (byte) dictionary.decodeToInt(dictionaryIds.getInt(i)));
+            if (!column.isNullAt(i)) {
+              column.putByte(i, (byte) dictionary.decodeToInt(dictionaryIds.getInt(i)));
+            }
           }
         } else if (column.dataType() == DataTypes.ShortType) {
           for (int i = rowId; i < rowId + num; ++i) {
-            column.putShort(i, (short) dictionary.decodeToInt(dictionaryIds.getInt(i)));
+            if (!column.isNullAt(i)) {
+              column.putShort(i, (short) dictionary.decodeToInt(dictionaryIds.getInt(i)));
+            }
           }
         } else {
           throw new UnsupportedOperationException("Unimplemented type: " + column.dataType());
@@ -240,7 +246,9 @@ public class VectorizedColumnReader {
         if (column.dataType() == DataTypes.LongType ||
             DecimalType.is64BitDecimalType(column.dataType())) {
           for (int i = rowId; i < rowId + num; ++i) {
-            column.putLong(i, dictionary.decodeToLong(dictionaryIds.getInt(i)));
+            if (!column.isNullAt(i)) {
+              column.putLong(i, dictionary.decodeToLong(dictionaryIds.getInt(i)));
+            }
           }
         } else {
           throw new UnsupportedOperationException("Unimplemented type: " + column.dataType());
@@ -249,21 +257,27 @@ public class VectorizedColumnReader {
 
       case FLOAT:
         for (int i = rowId; i < rowId + num; ++i) {
-          column.putFloat(i, dictionary.decodeToFloat(dictionaryIds.getInt(i)));
+          if (!column.isNullAt(i)) {
+            column.putFloat(i, dictionary.decodeToFloat(dictionaryIds.getInt(i)));
+          }
         }
         break;
 
       case DOUBLE:
         for (int i = rowId; i < rowId + num; ++i) {
-          column.putDouble(i, dictionary.decodeToDouble(dictionaryIds.getInt(i)));
+          if (!column.isNullAt(i)) {
+            column.putDouble(i, dictionary.decodeToDouble(dictionaryIds.getInt(i)));
+          }
         }
         break;
       case INT96:
         if (column.dataType() == DataTypes.TimestampType) {
           for (int i = rowId; i < rowId + num; ++i) {
             // TODO: Convert dictionary of Binaries to dictionary of Longs
-            Binary v = dictionary.decodeToBinary(dictionaryIds.getInt(i));
-            column.putLong(i, ParquetRowConverter.binaryToSQLTimestamp(v));
+            if (!column.isNullAt(i)) {
+              Binary v = dictionary.decodeToBinary(dictionaryIds.getInt(i));
+              column.putLong(i, ParquetRowConverter.binaryToSQLTimestamp(v));
+            }
           }
         } else {
           throw new UnsupportedOperationException();
@@ -275,26 +289,34 @@ public class VectorizedColumnReader {
         // and reuse it across batches. This should mean adding a ByteArray would just update
         // the length and offset.
         for (int i = rowId; i < rowId + num; ++i) {
-          Binary v = dictionary.decodeToBinary(dictionaryIds.getInt(i));
-          column.putByteArray(i, v.getBytes());
+          if (!column.isNullAt(i)) {
+            Binary v = dictionary.decodeToBinary(dictionaryIds.getInt(i));
+            column.putByteArray(i, v.getBytes());
+          }
         }
         break;
       case FIXED_LEN_BYTE_ARRAY:
         // DecimalType written in the legacy mode
         if (DecimalType.is32BitDecimalType(column.dataType())) {
           for (int i = rowId; i < rowId + num; ++i) {
-            Binary v = dictionary.decodeToBinary(dictionaryIds.getInt(i));
-            column.putInt(i, (int) ParquetRowConverter.binaryToUnscaledLong(v));
+            if (!column.isNullAt(i)) {
+              Binary v = dictionary.decodeToBinary(dictionaryIds.getInt(i));
+              column.putInt(i, (int) ParquetRowConverter.binaryToUnscaledLong(v));
+            }
           }
         } else if (DecimalType.is64BitDecimalType(column.dataType())) {
           for (int i = rowId; i < rowId + num; ++i) {
-            Binary v = dictionary.decodeToBinary(dictionaryIds.getInt(i));
-            column.putLong(i, ParquetRowConverter.binaryToUnscaledLong(v));
+            if (!column.isNullAt(i)) {
+              Binary v = dictionary.decodeToBinary(dictionaryIds.getInt(i));
+              column.putLong(i, ParquetRowConverter.binaryToUnscaledLong(v));
+            }
           }
         } else if (DecimalType.isByteArrayDecimalType(column.dataType())) {
           for (int i = rowId; i < rowId + num; ++i) {
-            Binary v = dictionary.decodeToBinary(dictionaryIds.getInt(i));
-            column.putByteArray(i, v.getBytes());
+            if (!column.isNullAt(i)) {
+              Binary v = dictionary.decodeToBinary(dictionaryIds.getInt(i));
+              column.putByteArray(i, v.getBytes());
+            }
           }
         } else {
           throw new UnsupportedOperationException();


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org