You are viewing a plain text version of this content. The canonical link for it is here.
Posted to github@beam.apache.org by GitBox <gi...@apache.org> on 2020/07/17 20:16:46 UTC

[GitHub] [beam] boyuanzz commented on a change in pull request #12223: [Beam-4379] Make ParquetIO read splittable

boyuanzz commented on a change in pull request #12223:
URL: https://github.com/apache/beam/pull/12223#discussion_r456632511



##########
File path: sdks/java/io/parquet/src/main/java/org/apache/beam/sdk/io/parquet/ParquetIO.java
##########
@@ -190,12 +227,19 @@ public Read withAvroDataModel(GenericData model) {
     @Override
     public PCollection<GenericRecord> expand(PBegin input) {
       checkNotNull(getFilepattern(), "Filepattern cannot be null.");
-
-      return input
-          .apply("Create filepattern", Create.ofProvider(getFilepattern(), StringUtf8Coder.of()))
-          .apply(FileIO.matchAll())
-          .apply(FileIO.readMatches())
-          .apply(readFiles(getSchema()).withAvroDataModel(getAvroDataModel()));
+      if (getSplit() != null) {

Review comment:
       `getSplit()` can be `False`. You can mark `getSplit()` as not null with `False` as default value.

##########
File path: sdks/java/io/parquet/src/main/java/org/apache/beam/sdk/io/parquet/ParquetIO.java
##########
@@ -235,12 +284,151 @@ public ReadFiles withAvroDataModel(GenericData model) {
       return toBuilder().setAvroDataModel(model).build();
     }
 
+    public ReadFiles withSplit() {
+      return toBuilder().setSplit(true).build();
+    }
+
     @Override
     public PCollection<GenericRecord> expand(PCollection<FileIO.ReadableFile> input) {
       checkNotNull(getSchema(), "Schema can not be null");
-      return input
-          .apply(ParDo.of(new ReadFn(getAvroDataModel())))
-          .setCoder(AvroCoder.of(getSchema()));
+      if (getSplit() != null) {

Review comment:
       Same above.

##########
File path: sdks/java/io/parquet/src/main/java/org/apache/beam/sdk/io/parquet/ParquetIO.java
##########
@@ -235,12 +284,151 @@ public ReadFiles withAvroDataModel(GenericData model) {
       return toBuilder().setAvroDataModel(model).build();
     }
 
+    public ReadFiles withSplit() {
+      return toBuilder().setSplit(true).build();
+    }
+
     @Override
     public PCollection<GenericRecord> expand(PCollection<FileIO.ReadableFile> input) {
       checkNotNull(getSchema(), "Schema can not be null");
-      return input
-          .apply(ParDo.of(new ReadFn(getAvroDataModel())))
-          .setCoder(AvroCoder.of(getSchema()));
+      if (getSplit() != null) {
+        return input
+            .apply(ParDo.of(new SplitReadFn(getAvroDataModel())))
+            .setCoder(AvroCoder.of(getSchema()));
+      } else {
+        return input
+            .apply(ParDo.of(new ReadFn(getAvroDataModel())))
+            .setCoder(AvroCoder.of(getSchema()));
+      }
+    }
+
+    @DoFn.BoundedPerElement
+    static class SplitReadFn extends DoFn<FileIO.ReadableFile, GenericRecord> {
+      private Class<? extends GenericData> modelClass;
+      private static final Logger LOG = LoggerFactory.getLogger(SplitReadFn.class);
+      ReadSupport<GenericRecord> readSupport;
+
+      SplitReadFn(GenericData model) {
+        this.modelClass = model != null ? model.getClass() : null;
+      }
+
+      private static <K, V> Map<K, Set<V>> toSetMultiMap(Map<K, V> map) {
+        Map<K, Set<V>> setMultiMap = new HashMap<K, Set<V>>();
+        for (Map.Entry<K, V> entry : map.entrySet()) {
+          Set<V> set = new HashSet<V>();
+          set.add(entry.getValue());
+          setMultiMap.put(entry.getKey(), Collections.unmodifiableSet(set));
+        }
+        return Collections.unmodifiableMap(setMultiMap);
+      }
+
+      @ProcessElement
+      public void processElement(
+          @Element FileIO.ReadableFile file,
+          RestrictionTracker<OffsetRange, Long> tracker,
+          OutputReceiver<GenericRecord> outputReceiver)
+          throws Exception {
+        if (!file.getMetadata().isReadSeekEfficient()) {
+          ResourceId filename = file.getMetadata().resourceId();
+          throw new RuntimeException(String.format("File has to be seekable: %s", filename));
+        }
+
+        SeekableByteChannel seekableByteChannel = file.openSeekable();
+        ReadSupport<GenericRecord> readSupport;
+        InputFile inputFile = new BeamParquetInputFile(seekableByteChannel);
+        Configuration conf = setConf();
+        GenericData model = null;
+        if (modelClass != null) {
+          model = (GenericData) modelClass.getMethod("get").invoke(null);
+        }
+        readSupport = new AvroReadSupport<GenericRecord>(model);
+        ParquetReadOptions options = HadoopReadOptions.builder(conf).build();
+        ParquetFileReader reader = ParquetFileReader.open(inputFile, options);
+        Filter filter = checkNotNull(options.getRecordFilter(), "filter");
+        conf = ((HadoopReadOptions) options).getConf();
+        for (String property : options.getPropertyNames()) {
+          conf.set(property, options.getProperty(property));
+        }
+        FileMetaData parquetFileMetadata = reader.getFooter().getFileMetaData();
+        MessageType fileSchema = parquetFileMetadata.getSchema();
+        Map<String, String> fileMetadata = parquetFileMetadata.getKeyValueMetaData();
+
+        ReadSupport.ReadContext readContext =
+            readSupport.init(new InitContext(conf, toSetMultiMap(fileMetadata), fileSchema));
+        ColumnIOFactory columnIOFactory = new ColumnIOFactory(parquetFileMetadata.getCreatedBy());
+        MessageType requestedSchema = readContext.getRequestedSchema();
+        RecordMaterializer<GenericRecord> recordConverter =
+            readSupport.prepareForRead(conf, fileMetadata, fileSchema, readContext);
+        boolean strictTypeChecking = options.isEnabled(STRICT_TYPE_CHECKING, true);
+        boolean filterRecords = options.useRecordFilter();
+        reader.setRequestedSchema(requestedSchema);
+        MessageColumnIO columnIO =
+            columnIOFactory.getColumnIO(requestedSchema, fileSchema, strictTypeChecking);
+        for (int i = 0; i < reader.getRowGroups().size(); i++) {

Review comment:
       I would get the start position by `tracker.currentRestriction().getFrom()` and have a util function to move the cursor of reader to such index. Then you can do a while loop like `while(tracker.tryClaim(i))`.

##########
File path: sdks/java/io/parquet/src/main/java/org/apache/beam/sdk/io/parquet/ParquetIO.java
##########
@@ -190,12 +227,19 @@ public Read withAvroDataModel(GenericData model) {
     @Override
     public PCollection<GenericRecord> expand(PBegin input) {
       checkNotNull(getFilepattern(), "Filepattern cannot be null.");
-
-      return input
-          .apply("Create filepattern", Create.ofProvider(getFilepattern(), StringUtf8Coder.of()))
-          .apply(FileIO.matchAll())
-          .apply(FileIO.readMatches())
-          .apply(readFiles(getSchema()).withAvroDataModel(getAvroDataModel()));
+      if (getSplit() != null) {
+        return input

Review comment:
       L231-L234 and L237-L240 are the same. We can make them as common part and set `readFiles` differently.

##########
File path: sdks/java/io/parquet/src/main/java/org/apache/beam/sdk/io/parquet/ParquetIO.java
##########
@@ -235,12 +284,151 @@ public ReadFiles withAvroDataModel(GenericData model) {
       return toBuilder().setAvroDataModel(model).build();
     }
 
+    public ReadFiles withSplit() {
+      return toBuilder().setSplit(true).build();
+    }
+
     @Override
     public PCollection<GenericRecord> expand(PCollection<FileIO.ReadableFile> input) {
       checkNotNull(getSchema(), "Schema can not be null");
-      return input
-          .apply(ParDo.of(new ReadFn(getAvroDataModel())))
-          .setCoder(AvroCoder.of(getSchema()));
+      if (getSplit() != null) {
+        return input
+            .apply(ParDo.of(new SplitReadFn(getAvroDataModel())))
+            .setCoder(AvroCoder.of(getSchema()));
+      } else {
+        return input
+            .apply(ParDo.of(new ReadFn(getAvroDataModel())))
+            .setCoder(AvroCoder.of(getSchema()));
+      }
+    }
+
+    @DoFn.BoundedPerElement
+    static class SplitReadFn extends DoFn<FileIO.ReadableFile, GenericRecord> {
+      private Class<? extends GenericData> modelClass;
+      private static final Logger LOG = LoggerFactory.getLogger(SplitReadFn.class);
+      ReadSupport<GenericRecord> readSupport;
+
+      SplitReadFn(GenericData model) {
+        this.modelClass = model != null ? model.getClass() : null;
+      }
+
+      private static <K, V> Map<K, Set<V>> toSetMultiMap(Map<K, V> map) {
+        Map<K, Set<V>> setMultiMap = new HashMap<K, Set<V>>();
+        for (Map.Entry<K, V> entry : map.entrySet()) {
+          Set<V> set = new HashSet<V>();
+          set.add(entry.getValue());
+          setMultiMap.put(entry.getKey(), Collections.unmodifiableSet(set));
+        }
+        return Collections.unmodifiableMap(setMultiMap);
+      }
+
+      @ProcessElement
+      public void processElement(
+          @Element FileIO.ReadableFile file,
+          RestrictionTracker<OffsetRange, Long> tracker,
+          OutputReceiver<GenericRecord> outputReceiver)
+          throws Exception {
+        if (!file.getMetadata().isReadSeekEfficient()) {
+          ResourceId filename = file.getMetadata().resourceId();
+          throw new RuntimeException(String.format("File has to be seekable: %s", filename));
+        }
+
+        SeekableByteChannel seekableByteChannel = file.openSeekable();
+        ReadSupport<GenericRecord> readSupport;
+        InputFile inputFile = new BeamParquetInputFile(seekableByteChannel);
+        Configuration conf = setConf();
+        GenericData model = null;
+        if (modelClass != null) {
+          model = (GenericData) modelClass.getMethod("get").invoke(null);
+        }
+        readSupport = new AvroReadSupport<GenericRecord>(model);
+        ParquetReadOptions options = HadoopReadOptions.builder(conf).build();
+        ParquetFileReader reader = ParquetFileReader.open(inputFile, options);
+        Filter filter = checkNotNull(options.getRecordFilter(), "filter");
+        conf = ((HadoopReadOptions) options).getConf();
+        for (String property : options.getPropertyNames()) {
+          conf.set(property, options.getProperty(property));
+        }
+        FileMetaData parquetFileMetadata = reader.getFooter().getFileMetaData();
+        MessageType fileSchema = parquetFileMetadata.getSchema();
+        Map<String, String> fileMetadata = parquetFileMetadata.getKeyValueMetaData();
+
+        ReadSupport.ReadContext readContext =
+            readSupport.init(new InitContext(conf, toSetMultiMap(fileMetadata), fileSchema));
+        ColumnIOFactory columnIOFactory = new ColumnIOFactory(parquetFileMetadata.getCreatedBy());
+        MessageType requestedSchema = readContext.getRequestedSchema();
+        RecordMaterializer<GenericRecord> recordConverter =
+            readSupport.prepareForRead(conf, fileMetadata, fileSchema, readContext);
+        boolean strictTypeChecking = options.isEnabled(STRICT_TYPE_CHECKING, true);
+        boolean filterRecords = options.useRecordFilter();
+        reader.setRequestedSchema(requestedSchema);
+        MessageColumnIO columnIO =
+            columnIOFactory.getColumnIO(requestedSchema, fileSchema, strictTypeChecking);
+        for (int i = 0; i < reader.getRowGroups().size(); i++) {
+          if (i < tracker.currentRestriction().getFrom()) {
+            reader.skipNextRowGroup();
+            continue;
+          }
+          if (tracker.tryClaim((long) i)) {
+            PageReadStore pages = reader.readNextRowGroup();
+            i += 1;
+            RecordReader<GenericRecord> recordReader =
+                columnIO.getRecordReader(
+                    pages, recordConverter, filterRecords ? filter : FilterCompat.NOOP);
+            GenericRecord read;
+            long current = 0;
+            long totalRows = pages.getRowCount();
+            while (current < totalRows) {
+              read = recordReader.read();
+              outputReceiver.output(read);
+              current += 1;
+            }
+          } else {
+            break;
+          }
+        }
+      }
+
+      private Configuration setConf() throws Exception {
+        Configuration conf = new Configuration();
+        GenericData model = null;
+        if (modelClass != null) {
+          model = (GenericData) modelClass.getMethod("get").invoke(null);
+        }
+        if (model != null
+            && (model.getClass() == GenericData.class || model.getClass() == SpecificData.class)) {
+          conf.setBoolean(AvroReadSupport.AVRO_COMPATIBILITY, true);
+        } else {
+          conf.setBoolean(AvroReadSupport.AVRO_COMPATIBILITY, false);
+        }
+        return conf;
+      }
+
+      @GetInitialRestriction
+      public OffsetRange getInitialRestriction(@Element FileIO.ReadableFile file) throws Exception {
+        if (!file.getMetadata().isReadSeekEfficient()) {
+          ResourceId filename = file.getMetadata().resourceId();
+          throw new RuntimeException(String.format("File has to be seekable: %s", filename));
+        }
+        SeekableByteChannel seekableByteChannel = file.openSeekable();
+        InputFile inputFile = new BeamParquetInputFile(seekableByteChannel);
+        Configuration conf = setConf();
+        ParquetReadOptions options = HadoopReadOptions.builder(conf).build();
+        ParquetFileReader reader = ParquetFileReader.open(inputFile, options);
+        return new OffsetRange(0, reader.getRowGroups().size());
+      }
+
+      @SplitRestriction
+      public void split(@Restriction OffsetRange restriction, OutputReceiver<OffsetRange> out) {

Review comment:
       Are you sure you want to split the `OffsetRange` initially into one offset per range? It seems too much to me. Also one offset per range means no more further split could happen.

##########
File path: sdks/java/io/parquet/src/main/java/org/apache/beam/sdk/io/parquet/ParquetIO.java
##########
@@ -235,12 +284,151 @@ public ReadFiles withAvroDataModel(GenericData model) {
       return toBuilder().setAvroDataModel(model).build();
     }
 
+    public ReadFiles withSplit() {
+      return toBuilder().setSplit(true).build();
+    }
+
     @Override
     public PCollection<GenericRecord> expand(PCollection<FileIO.ReadableFile> input) {
       checkNotNull(getSchema(), "Schema can not be null");
-      return input
-          .apply(ParDo.of(new ReadFn(getAvroDataModel())))
-          .setCoder(AvroCoder.of(getSchema()));
+      if (getSplit() != null) {
+        return input
+            .apply(ParDo.of(new SplitReadFn(getAvroDataModel())))
+            .setCoder(AvroCoder.of(getSchema()));
+      } else {
+        return input
+            .apply(ParDo.of(new ReadFn(getAvroDataModel())))
+            .setCoder(AvroCoder.of(getSchema()));
+      }
+    }
+
+    @DoFn.BoundedPerElement
+    static class SplitReadFn extends DoFn<FileIO.ReadableFile, GenericRecord> {
+      private Class<? extends GenericData> modelClass;
+      private static final Logger LOG = LoggerFactory.getLogger(SplitReadFn.class);
+      ReadSupport<GenericRecord> readSupport;
+
+      SplitReadFn(GenericData model) {
+        this.modelClass = model != null ? model.getClass() : null;
+      }
+
+      private static <K, V> Map<K, Set<V>> toSetMultiMap(Map<K, V> map) {
+        Map<K, Set<V>> setMultiMap = new HashMap<K, Set<V>>();
+        for (Map.Entry<K, V> entry : map.entrySet()) {
+          Set<V> set = new HashSet<V>();
+          set.add(entry.getValue());
+          setMultiMap.put(entry.getKey(), Collections.unmodifiableSet(set));
+        }
+        return Collections.unmodifiableMap(setMultiMap);
+      }
+
+      @ProcessElement
+      public void processElement(
+          @Element FileIO.ReadableFile file,
+          RestrictionTracker<OffsetRange, Long> tracker,
+          OutputReceiver<GenericRecord> outputReceiver)
+          throws Exception {
+        if (!file.getMetadata().isReadSeekEfficient()) {
+          ResourceId filename = file.getMetadata().resourceId();
+          throw new RuntimeException(String.format("File has to be seekable: %s", filename));
+        }
+
+        SeekableByteChannel seekableByteChannel = file.openSeekable();
+        ReadSupport<GenericRecord> readSupport;
+        InputFile inputFile = new BeamParquetInputFile(seekableByteChannel);
+        Configuration conf = setConf();
+        GenericData model = null;
+        if (modelClass != null) {
+          model = (GenericData) modelClass.getMethod("get").invoke(null);
+        }
+        readSupport = new AvroReadSupport<GenericRecord>(model);
+        ParquetReadOptions options = HadoopReadOptions.builder(conf).build();
+        ParquetFileReader reader = ParquetFileReader.open(inputFile, options);
+        Filter filter = checkNotNull(options.getRecordFilter(), "filter");
+        conf = ((HadoopReadOptions) options).getConf();
+        for (String property : options.getPropertyNames()) {
+          conf.set(property, options.getProperty(property));
+        }
+        FileMetaData parquetFileMetadata = reader.getFooter().getFileMetaData();
+        MessageType fileSchema = parquetFileMetadata.getSchema();
+        Map<String, String> fileMetadata = parquetFileMetadata.getKeyValueMetaData();
+
+        ReadSupport.ReadContext readContext =
+            readSupport.init(new InitContext(conf, toSetMultiMap(fileMetadata), fileSchema));
+        ColumnIOFactory columnIOFactory = new ColumnIOFactory(parquetFileMetadata.getCreatedBy());
+        MessageType requestedSchema = readContext.getRequestedSchema();
+        RecordMaterializer<GenericRecord> recordConverter =
+            readSupport.prepareForRead(conf, fileMetadata, fileSchema, readContext);
+        boolean strictTypeChecking = options.isEnabled(STRICT_TYPE_CHECKING, true);
+        boolean filterRecords = options.useRecordFilter();
+        reader.setRequestedSchema(requestedSchema);
+        MessageColumnIO columnIO =
+            columnIOFactory.getColumnIO(requestedSchema, fileSchema, strictTypeChecking);
+        for (int i = 0; i < reader.getRowGroups().size(); i++) {
+          if (i < tracker.currentRestriction().getFrom()) {
+            reader.skipNextRowGroup();
+            continue;
+          }
+          if (tracker.tryClaim((long) i)) {
+            PageReadStore pages = reader.readNextRowGroup();
+            i += 1;
+            RecordReader<GenericRecord> recordReader =
+                columnIO.getRecordReader(
+                    pages, recordConverter, filterRecords ? filter : FilterCompat.NOOP);
+            GenericRecord read;
+            long current = 0;
+            long totalRows = pages.getRowCount();
+            while (current < totalRows) {
+              read = recordReader.read();
+              outputReceiver.output(read);

Review comment:
       You could do ` outputReceiver.output(recordReader.read())` to get rid of local var.

##########
File path: sdks/java/io/parquet/src/main/java/org/apache/beam/sdk/io/parquet/ParquetIO.java
##########
@@ -235,12 +284,151 @@ public ReadFiles withAvroDataModel(GenericData model) {
       return toBuilder().setAvroDataModel(model).build();
     }
 
+    public ReadFiles withSplit() {
+      return toBuilder().setSplit(true).build();
+    }
+
     @Override
     public PCollection<GenericRecord> expand(PCollection<FileIO.ReadableFile> input) {
       checkNotNull(getSchema(), "Schema can not be null");
-      return input
-          .apply(ParDo.of(new ReadFn(getAvroDataModel())))
-          .setCoder(AvroCoder.of(getSchema()));
+      if (getSplit() != null) {
+        return input
+            .apply(ParDo.of(new SplitReadFn(getAvroDataModel())))
+            .setCoder(AvroCoder.of(getSchema()));
+      } else {
+        return input
+            .apply(ParDo.of(new ReadFn(getAvroDataModel())))
+            .setCoder(AvroCoder.of(getSchema()));
+      }
+    }
+
+    @DoFn.BoundedPerElement
+    static class SplitReadFn extends DoFn<FileIO.ReadableFile, GenericRecord> {
+      private Class<? extends GenericData> modelClass;
+      private static final Logger LOG = LoggerFactory.getLogger(SplitReadFn.class);
+      ReadSupport<GenericRecord> readSupport;
+
+      SplitReadFn(GenericData model) {
+        this.modelClass = model != null ? model.getClass() : null;
+      }
+
+      private static <K, V> Map<K, Set<V>> toSetMultiMap(Map<K, V> map) {
+        Map<K, Set<V>> setMultiMap = new HashMap<K, Set<V>>();
+        for (Map.Entry<K, V> entry : map.entrySet()) {
+          Set<V> set = new HashSet<V>();
+          set.add(entry.getValue());
+          setMultiMap.put(entry.getKey(), Collections.unmodifiableSet(set));
+        }
+        return Collections.unmodifiableMap(setMultiMap);
+      }
+
+      @ProcessElement
+      public void processElement(
+          @Element FileIO.ReadableFile file,
+          RestrictionTracker<OffsetRange, Long> tracker,
+          OutputReceiver<GenericRecord> outputReceiver)
+          throws Exception {
+        if (!file.getMetadata().isReadSeekEfficient()) {
+          ResourceId filename = file.getMetadata().resourceId();
+          throw new RuntimeException(String.format("File has to be seekable: %s", filename));

Review comment:
       you can replace `filename ` with `file.getMetadata().resourceId()` directly instead of creating a local var.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org