You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@parquet.apache.org by an...@apache.org on 2020/06/01 17:25:44 UTC

[parquet-mr] branch master updated: PARQUET-1850: Fix dictionaryPageOffset flag setting in toParquetMetadata method

This is an automated email from the ASF dual-hosted git repository.

aniket486 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/parquet-mr.git


The following commit(s) were added to refs/heads/master by this push:
     new 0acade0  PARQUET-1850: Fix dictionaryPageOffset flag setting in toParquetMetadata method
0acade0 is described below

commit 0acade062893cc0ac6e8cb7ed01470c38ea4bdb5
Author: srinivasst <sr...@gmail.com>
AuthorDate: Mon Jun 1 10:25:28 2020 -0700

    PARQUET-1850: Fix dictionaryPageOffset flag setting in toParquetMetadata method
    
    ### Issue
    
    toParquetMetadata method converts org.apache.parquet.hadoop.metadata.ParquetMetadata to org.apache.parquet.format.FileMetaData but this does not set the dictionary page offset bit in FileMetaData.
    
    When a FileMetaData object is serialized while writing to the footer and then deserialized, the dictionary offset is lost as the dictionary page offset bit was never set.
    
    ### Fix
    
    The flag is set to true when a dictionary page is used for encoding.
    
    ### Tests
    
    A ParquetMetadata object is created with PLAIN_DICTIONARY encoding and dictionaryPageOffset is set to a non zero value.
    
    The ParquetMetadata object is converted to FileMetaData using toParquetMetadata method.
    The FileMetaData object is then serialized and deserialized to FileMetaData and converted back to ParquetMetadata using fromParquetMetadata method.
    
    The new ParquetMetadata should have the same dictionaryPageOffset as the original ParquetMetadata object.
    
    Author: srinivasst <sr...@gmail.com>
    
    Closes #789 from srinivasst/ParquetConverterFix and squashes the following commits:
    
    e3d867e6 [srinivasst] Set dictionary page offset flag while setting the page offset
    79471854 [srinivasst] Fix dictionary flag setting in toParquetMetadata method in ParquetMetadataConverter class
---
 .../format/converter/ParquetMetadataConverter.java |  4 +-
 .../converter/TestParquetMetadataConverter.java    | 90 ++++++++++++++++++++++
 2 files changed, 93 insertions(+), 1 deletion(-)

diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java b/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
index 3908463..0c6c2df 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
@@ -479,7 +479,9 @@ public class ParquetMetadataConverter {
           columnMetaData.getTotalUncompressedSize(),
           columnMetaData.getTotalSize(),
           columnMetaData.getFirstDataPageOffset());
-      columnChunk.meta_data.dictionary_page_offset = columnMetaData.getDictionaryPageOffset();
+      if (columnMetaData.getEncodingStats() != null && columnMetaData.getEncodingStats().hasDictionaryPages()) {
+        columnChunk.meta_data.setDictionary_page_offset(columnMetaData.getDictionaryPageOffset());
+      }
       columnChunk.meta_data.setBloom_filter_offset(columnMetaData.getBloomFilterOffset());
       if (!columnMetaData.getStatistics().isEmpty()) {
         columnChunk.meta_data.setStatistics(toParquetStatistics(columnMetaData.getStatistics(), this.statisticsTruncateLength));
diff --git a/parquet-hadoop/src/test/java/org/apache/parquet/format/converter/TestParquetMetadataConverter.java b/parquet-hadoop/src/test/java/org/apache/parquet/format/converter/TestParquetMetadataConverter.java
index 256ea36..e9051ea 100644
--- a/parquet-hadoop/src/test/java/org/apache/parquet/format/converter/TestParquetMetadataConverter.java
+++ b/parquet-hadoop/src/test/java/org/apache/parquet/format/converter/TestParquetMetadataConverter.java
@@ -72,6 +72,8 @@ import org.apache.parquet.FixedBinaryTestUtils;
 import org.apache.parquet.Version;
 import org.apache.parquet.bytes.BytesUtils;
 import org.apache.parquet.column.ColumnDescriptor;
+import org.apache.parquet.column.Encoding;
+import org.apache.parquet.column.EncodingStats;
 import org.apache.parquet.column.ParquetProperties;
 import org.apache.parquet.column.statistics.BinaryStatistics;
 import org.apache.parquet.column.statistics.BooleanStatistics;
@@ -82,6 +84,7 @@ import org.apache.parquet.column.statistics.LongStatistics;
 import org.apache.parquet.column.statistics.Statistics;
 import org.apache.parquet.format.DecimalType;
 import org.apache.parquet.format.LogicalType;
+import org.apache.parquet.format.Util;
 import org.apache.parquet.hadoop.metadata.BlockMetaData;
 import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
 import org.apache.parquet.hadoop.metadata.ColumnPath;
@@ -177,6 +180,65 @@ public class TestParquetMetadataConverter {
   }
 
   @Test
+  public void testParquetMetadataConverterWithDictionary()
+    throws IOException {
+    ParquetMetadata parquetMetaData =
+      createParquetMetaData(Encoding.PLAIN_DICTIONARY, Encoding.PLAIN);
+
+    ParquetMetadataConverter converter = new ParquetMetadataConverter();
+    FileMetaData fmd1 = converter.toParquetMetadata(1, parquetMetaData);
+
+    // Flag should be true
+    fmd1.row_groups.forEach(rowGroup -> rowGroup.columns.forEach(column -> {
+      assertTrue(column.meta_data.isSetDictionary_page_offset());
+    }));
+
+    ByteArrayOutputStream metaDataOutputStream = new ByteArrayOutputStream();
+    Util.writeFileMetaData(fmd1, metaDataOutputStream);
+    ByteArrayInputStream metaDataInputStream =
+      new ByteArrayInputStream(metaDataOutputStream.toByteArray());
+    FileMetaData fmd2 = Util.readFileMetaData(metaDataInputStream);
+    ParquetMetadata parquetMetaDataConverted =
+      converter.fromParquetMetadata(fmd2);
+
+    long dicOffsetOriginal =
+      parquetMetaData.getBlocks().get(0).getColumns().get(0)
+        .getDictionaryPageOffset();
+    long dicOffsetConverted =
+      parquetMetaDataConverted.getBlocks().get(0).getColumns().get(0)
+        .getDictionaryPageOffset();
+
+    Assert.assertEquals(dicOffsetOriginal, dicOffsetConverted);
+  }
+
+  @Test
+  public void testParquetMetadataConverterWithoutDictionary()
+    throws IOException {
+    ParquetMetadata parquetMetaData =
+      createParquetMetaData(null, Encoding.PLAIN);
+
+    ParquetMetadataConverter converter = new ParquetMetadataConverter();
+    FileMetaData fmd1 = converter.toParquetMetadata(1, parquetMetaData);
+
+    // Flag should be false
+    fmd1.row_groups.forEach(rowGroup -> rowGroup.columns.forEach(column -> {
+      assertFalse(column.meta_data.isSetDictionary_page_offset());
+    }));
+
+    ByteArrayOutputStream metaDataOutputStream = new ByteArrayOutputStream();
+    Util.writeFileMetaData(fmd1, metaDataOutputStream);
+    ByteArrayInputStream metaDataInputStream =
+      new ByteArrayInputStream(metaDataOutputStream.toByteArray());
+    FileMetaData fmd2 = Util.readFileMetaData(metaDataInputStream);
+    ParquetMetadata pmd2 = converter.fromParquetMetadata(fmd2);
+
+    long dicOffsetConverted =
+      pmd2.getBlocks().get(0).getColumns().get(0).getDictionaryPageOffset();
+
+    Assert.assertEquals(0, dicOffsetConverted);
+  }
+
+  @Test
   public void testLogicalTypesBackwardCompatibleWithConvertedTypes() {
     ParquetMetadataConverter parquetMetadataConverter = new ParquetMetadataConverter();
     MessageType expected = Types.buildMessage()
@@ -1053,6 +1115,34 @@ public class TestParquetMetadataConverter {
     return stats;
   }
 
+  private static ParquetMetadata createParquetMetaData(Encoding dicEncoding,
+    Encoding dataEncoding) {
+    MessageType schema =
+      parseMessageType("message schema { optional int32 col (INT_32); }");
+    org.apache.parquet.hadoop.metadata.FileMetaData fileMetaData =
+      new org.apache.parquet.hadoop.metadata.FileMetaData(schema,
+        new HashMap<String, String>(), null);
+    List<BlockMetaData> blockMetaDataList = new ArrayList<BlockMetaData>();
+    BlockMetaData blockMetaData = new BlockMetaData();
+    EncodingStats.Builder builder = new EncodingStats.Builder();
+    if (dicEncoding!= null) {
+      builder.addDictEncoding(dicEncoding).build();
+    }
+    builder.addDataEncoding(dataEncoding);
+    EncodingStats es = builder.build();
+    Set<org.apache.parquet.column.Encoding> e =
+      new HashSet<org.apache.parquet.column.Encoding>();
+    PrimitiveTypeName t = PrimitiveTypeName.INT32;
+    ColumnPath p = ColumnPath.get("col");
+    CompressionCodecName c = CompressionCodecName.UNCOMPRESSED;
+    BinaryStatistics s = new BinaryStatistics();
+    ColumnChunkMetaData md =
+      ColumnChunkMetaData.get(p, t, c, es, e, s, 20, 30, 0, 0, 0);
+    blockMetaData.addColumn(md);
+    blockMetaDataList.add(blockMetaData);
+    return new ParquetMetadata(fileMetaData, blockMetaDataList);
+  }
+
   private enum StatsHelper {
     // Only min and max are filled (min_value and max_value are not)
     V1() {