You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@ozone.apache.org by GitBox <gi...@apache.org> on 2019/11/05 22:06:13 UTC

[GitHub] [hadoop-ozone] szetszwo commented on a change in pull request #97: HDDS-2375. Refactor BlockOutputStream to allow flexible buffering.

szetszwo commented on a change in pull request #97: HDDS-2375. Refactor BlockOutputStream to allow flexible buffering.
URL: https://github.com/apache/hadoop-ozone/pull/97#discussion_r342822304
 
 

 ##########
 File path: hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java
 ##########
 @@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
+
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+
+import java.nio.ByteBuffer;
+import java.util.Iterator;
+import java.util.function.Function;
+
+/** Buffer for a block chunk. */
+public interface ChunkBuffer {
+  /** {@link ChunkBuffer} implementation using a single {@link ByteBuffer}. */
+  class ImplWithByteBuffer implements ChunkBuffer {
+    private final ByteBuffer buffer;
+
+    private ImplWithByteBuffer(ByteBuffer buffer) {
+      this.buffer = buffer;
+    }
+
+    @Override
+    public int position() {
+      return buffer.position();
+    }
+
+    @Override
+    public int remaining() {
+      return buffer.remaining();
+    }
+
+    @Override
+    public Iterable<ByteBuffer> iterate(int bytesPerChecksum) {
+      return () -> new Iterator<ByteBuffer>() {
+        @Override
+        public boolean hasNext() {
+          return buffer.hasRemaining();
+        }
+
+        @Override
+        public ByteBuffer next() {
+          final ByteBuffer duplicated = buffer.duplicate();
+          final int min = Math.min(
+              buffer.position() + bytesPerChecksum, buffer.limit());
+          duplicated.limit(min);
+          buffer.position(min);
+          return duplicated;
+        }
+      };
+    }
+
+    @Override
+    public ChunkBuffer duplicate(int newPosition, int newLimit) {
+      final ByteBuffer duplicated = buffer.duplicate();
+      duplicated.position(newPosition).limit(newLimit);
+      return new ImplWithByteBuffer(duplicated);
+    }
+
+    @Override
+    public void put(ByteBuffer b) {
+      buffer.put(b);
+    }
+
+    @Override
+    public void clear() {
+      buffer.clear();
+    }
+
+    @Override
+    public ByteString toByteString(Function<ByteBuffer, ByteString> f) {
+      return f.apply(buffer);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (this == obj) {
+        return true;
+      } else if (!(obj instanceof ImplWithByteBuffer)) {
+        return false;
+      }
+      final ImplWithByteBuffer that = (ImplWithByteBuffer)obj;
+      return this.buffer.equals(that.buffer);
+    }
+
+    @Override
+    public int hashCode() {
+      return buffer.hashCode();
+    }
+
+    @Override
+    public String toString() {
+      return getClass().getSimpleName() + ":limit=" + buffer.limit();
+    }
+  }
+
+  /** Similar to {@link ByteBuffer#allocate(int)}. */
+  static ChunkBuffer allocate(int capacity) {
+    return new ImplWithByteBuffer(ByteBuffer.allocate(capacity));
+  }
+
+  /** Warp the given {@link ByteBuffer} as a {@link ChunkBuffer}. */
+  static ChunkBuffer warp(ByteBuffer buffer) {
+    return new ImplWithByteBuffer(buffer);
+  }
+
+  /** Similar to {@link ByteBuffer#position()}. */
+  int position();
+
+  /** Similar to {@link ByteBuffer#remaining()}. */
+  int remaining();
+
+  /** Similar to {@link ByteBuffer#hasRemaining()}. */
+  default boolean hasRemaining() {
+    return remaining() > 0;
+  }
+
+  /** Similar to {@link ByteBuffer#clear()}. */
+  void clear();
+
+  /** Similar to {@link ByteBuffer#put(ByteBuffer)}. */
+  void put(ByteBuffer b);
+
+  /** Similar to {@link ByteBuffer#put(byte[], int, int)}. */
+  default void put(byte[] b, int offset, int length) {
+    put(ByteBuffer.wrap(b, offset, length));
+  }
+
+  /** The same as put(b.asReadOnlyByteBuffer()). */
+  default void put(ByteString b) {
+    put(b.asReadOnlyByteBuffer());
+  }
+
+  /**
+   * Duplicate and then set the position and limit on the duplicated buffer.
+   *
+   * @see ByteBuffer#duplicate()
+   */
+  ChunkBuffer duplicate(int newPosition, int newLimit);
+
+  /**
+   * Iterate the buffer from the current position to the current limit.
+   *
+   * Upon the iteration complete,
+   * the buffer's position will be equal to its limit.
+   */
+  Iterable<ByteBuffer> iterate(int bytesPerChecksum);
+
+  /**
+   * Convert this buffer to a {@link ByteString}.
+   * The position of this buffer remains unchanged.
 
 Review comment:
   It is a bug.  The position should be preserved so that the buffer can be read again for checksum computation.  Thanks.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-issues-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-issues-help@hadoop.apache.org