You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-issues@hadoop.apache.org by GitBox <gi...@apache.org> on 2021/08/02 20:58:27 UTC

[GitHub] [hadoop] sunchao commented on a change in pull request #3250: HADOOP-17825. Add BuiltInGzipCompressor

sunchao commented on a change in pull request #3250:
URL: https://github.com/apache/hadoop/pull/3250#discussion_r681254845



##########
File path: hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
##########
@@ -154,7 +155,7 @@ public CompressionOutputStream createOutputStream(OutputStream out,
   public Compressor createCompressor() {
     return (ZlibFactory.isNativeZlibLoaded(conf))
       ? new GzipZlibCompressor(conf)
-      : null;
+      : new BuiltInGzipCompressor(conf);

Review comment:
       we also need to update `getCompressorType`.

##########
File path: hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipCompressor.java
##########
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress.zlib;
+
+import java.io.IOException;
+import java.util.zip.Checksum;
+import java.util.zip.Deflater;
+import java.util.zip.GZIPOutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.DoNotPool;
+import org.apache.hadoop.util.DataChecksum;
+
+/**
+ * A {@link Compressor} based on the popular gzip compressed file format.
+ * http://www.gzip.org/
+ *

Review comment:
       nit: redundant line.

##########
File path: hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipCompressor.java
##########
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress.zlib;
+
+import java.io.IOException;
+import java.util.zip.Checksum;
+import java.util.zip.Deflater;
+import java.util.zip.GZIPOutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.DoNotPool;
+import org.apache.hadoop.util.DataChecksum;
+
+/**
+ * A {@link Compressor} based on the popular gzip compressed file format.
+ * http://www.gzip.org/
+ *
+ */
+@DoNotPool
+public class BuiltInGzipCompressor implements Compressor {
+
+    /**
+     * Fixed ten-byte gzip header. See {@link GZIPOutputStream}'s source for
+     * details.
+     */
+    private static final byte[] GZIP_HEADER = new byte[] {
+            0x1f, (byte) 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+    // 'true' (nowrap) => Deflater will handle raw deflate stream only
+    private Deflater deflater = new Deflater(Deflater.DEFAULT_COMPRESSION, true);
+
+    private int headerOff = 0;
+
+    private byte[] userBuf = null;
+    private int userBufOff = 0;
+    private int userBufLen = 0;
+
+    private int headerBytesWritten = 0;
+    private int trailerBytesWritten = 0;
+
+    private int currentInputLen = 0;
+
+    private Checksum crc = DataChecksum.newCrc32();
+
+    private BuiltInGzipDecompressor.GzipStateLabel state;
+
+    public BuiltInGzipCompressor(Configuration conf) {
+        ZlibCompressor.CompressionLevel level = ZlibFactory.getCompressionLevel(conf);
+        ZlibCompressor.CompressionStrategy strategy = ZlibFactory.getCompressionStrategy(conf);
+
+        deflater = new Deflater(level.compressionLevel(), true);
+        deflater.setStrategy(strategy.compressionStrategy());
+
+        state = BuiltInGzipDecompressor.GzipStateLabel.HEADER_BASIC;
+        crc.reset();
+    }
+
+    @Override
+    public boolean finished() {
+        return deflater.finished();
+    }
+
+    @Override
+    public boolean needsInput() {
+        if (state == BuiltInGzipDecompressor.GzipStateLabel.INFLATE_STREAM) {
+            return deflater.needsInput();
+        }
+
+        return (state != BuiltInGzipDecompressor.GzipStateLabel.FINISHED);
+    }
+
+    @Override
+    public int compress(byte[] b, int off, int len) throws IOException {
+        int numAvailBytes = 0;
+
+        // If we are not within uncompression data yet. Output the header.
+        if (state != BuiltInGzipDecompressor.GzipStateLabel.INFLATE_STREAM) {
+            if (userBufLen <= 0) {
+                return numAvailBytes;
+            }
+
+            int outputHeaderSize = writeHeader(b, off, len);
+
+            // Completes header output.
+            if (headerOff == 10) {
+                state = BuiltInGzipDecompressor.GzipStateLabel.INFLATE_STREAM;
+            }
+
+            numAvailBytes += outputHeaderSize;
+
+            if (outputHeaderSize == len) {
+                return numAvailBytes;
+            }
+
+            off += outputHeaderSize;
+            len -= outputHeaderSize;
+        }
+
+        if (state == BuiltInGzipDecompressor.GzipStateLabel.INFLATE_STREAM) {
+            // hand off user data (or what's left of it) to Deflater--but note that
+            // Deflater may not have consumed all of previous bufferload, in which case
+            // userBufLen will be zero
+            if (userBufLen > 0) {
+                deflater.setInput(userBuf, userBufOff, userBufLen);
+
+                crc.update(userBuf, userBufOff, userBufLen);  // CRC-32 is on uncompressed data
+
+                currentInputLen = userBufLen;
+                userBufOff += userBufLen;
+                userBufLen = 0;
+            }
+
+
+            // now compress it into b[]
+            int deflated = deflater.deflate(b, off, len - 8, Deflater.FULL_FLUSH);
+
+            numAvailBytes += deflated;
+            off += deflated;
+            len -= deflated;
+
+            // All current input are processed. Going to output trailer.
+            if (deflater.finished()) {
+                state = BuiltInGzipDecompressor.GzipStateLabel.TRAILER_CRC;
+            } else {
+                return numAvailBytes;
+            }
+        }
+
+        numAvailBytes += writeTrailer(b, off, len);
+
+        return numAvailBytes;
+    }
+
+    @Override
+    public long getBytesRead() {
+        return deflater.getTotalIn();
+    }
+
+    @Override
+    public long getBytesWritten() {
+        return headerBytesWritten + deflater.getTotalOut() + trailerBytesWritten;
+    }
+
+    @Override
+    public void end() { deflater.end(); }
+
+    @Override
+    public void finish() { deflater.finish(); }
+
+    @Override
+    public void reinit(Configuration conf) {

Review comment:
       I think we'll need to reset compression level, strategy etc here too?

##########
File path: hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipCompressor.java
##########
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress.zlib;
+
+import java.io.IOException;
+import java.util.zip.Checksum;
+import java.util.zip.Deflater;
+import java.util.zip.GZIPOutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.DoNotPool;
+import org.apache.hadoop.util.DataChecksum;
+
+/**
+ * A {@link Compressor} based on the popular gzip compressed file format.
+ * http://www.gzip.org/
+ *
+ */
+@DoNotPool
+public class BuiltInGzipCompressor implements Compressor {
+
+    /**
+     * Fixed ten-byte gzip header. See {@link GZIPOutputStream}'s source for
+     * details.
+     */
+    private static final byte[] GZIP_HEADER = new byte[] {
+            0x1f, (byte) 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+    // 'true' (nowrap) => Deflater will handle raw deflate stream only
+    private Deflater deflater = new Deflater(Deflater.DEFAULT_COMPRESSION, true);

Review comment:
       this initialization is redundant.

##########
File path: hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipCompressor.java
##########
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress.zlib;
+
+import java.io.IOException;
+import java.util.zip.Checksum;
+import java.util.zip.Deflater;
+import java.util.zip.GZIPOutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.DoNotPool;
+import org.apache.hadoop.util.DataChecksum;
+
+/**
+ * A {@link Compressor} based on the popular gzip compressed file format.
+ * http://www.gzip.org/
+ *
+ */
+@DoNotPool
+public class BuiltInGzipCompressor implements Compressor {

Review comment:
       will it be easier if we extend `Deflater` here instead?

##########
File path: hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipCompressor.java
##########
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress.zlib;
+
+import java.io.IOException;
+import java.util.zip.Checksum;
+import java.util.zip.Deflater;
+import java.util.zip.GZIPOutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.DoNotPool;
+import org.apache.hadoop.util.DataChecksum;
+
+/**
+ * A {@link Compressor} based on the popular gzip compressed file format.
+ * http://www.gzip.org/
+ *
+ */
+@DoNotPool
+public class BuiltInGzipCompressor implements Compressor {
+
+    /**
+     * Fixed ten-byte gzip header. See {@link GZIPOutputStream}'s source for
+     * details.
+     */
+    private static final byte[] GZIP_HEADER = new byte[] {

Review comment:
       can we share the header parsing logic between the compressor and decompressor, e.g., `processBasicHeader`?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: common-issues-unsubscribe@hadoop.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-issues-help@hadoop.apache.org