You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sh...@apache.org on 2019/06/06 13:23:44 UTC

[hadoop] branch trunk updated: HDDS-1621. writeData in ChunkUtils should not use AsynchronousFileChannel. Contributed by Supratim Deka (#917)

This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 9fded67  HDDS-1621. writeData in ChunkUtils should not use AsynchronousFileChannel. Contributed by Supratim Deka (#917)
9fded67 is described below

commit 9fded678ffcda12b980ab8f01914168de38a8c73
Author: supratimdeka <46...@users.noreply.github.com>
AuthorDate: Thu Jun 6 18:53:37 2019 +0530

    HDDS-1621. writeData in ChunkUtils should not use AsynchronousFileChannel. Contributed by Supratim Deka (#917)
---
 .../container/keyvalue/helpers/ChunkUtils.java     | 22 ++++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 2781bfa..2993bbb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
@@ -43,6 +43,7 @@ import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.channels.AsynchronousFileChannel;
+import java.nio.channels.FileChannel;
 import java.nio.channels.FileLock;
 import java.nio.file.StandardOpenOption;
 import java.security.NoSuchAlgorithmException;
@@ -84,23 +85,20 @@ public final class ChunkUtils {
       throw new StorageContainerException(err, INVALID_WRITE_SIZE);
     }
 
-    AsynchronousFileChannel file = null;
+    FileChannel file = null;
     FileLock lock = null;
 
     try {
       long writeTimeStart = Time.monotonicNow();
-      file = sync ?
-          AsynchronousFileChannel.open(chunkFile.toPath(),
-              StandardOpenOption.CREATE,
-              StandardOpenOption.WRITE,
-              StandardOpenOption.SPARSE,
-              StandardOpenOption.SYNC) :
-          AsynchronousFileChannel.open(chunkFile.toPath(),
+
+      // skip SYNC and DSYNC to reduce contention on file.lock
+      file = FileChannel.open(chunkFile.toPath(),
               StandardOpenOption.CREATE,
               StandardOpenOption.WRITE,
               StandardOpenOption.SPARSE);
-      lock = file.lock().get();
-      int size = file.write(data, chunkInfo.getOffset()).get();
+
+      lock = file.lock();
+      int size = file.write(data, chunkInfo.getOffset());
       // Increment volumeIO stats here.
       volumeIOStats.incWriteTime(Time.monotonicNow() - writeTimeStart);
       volumeIOStats.incWriteOpCount();
@@ -128,6 +126,10 @@ public final class ChunkUtils {
       }
       if (file != null) {
         try {
+          if (sync) {
+            // ensure data and metadata is persisted. Outside the lock
+            file.force(true);
+          }
           file.close();
         } catch (IOException e) {
           throw new StorageContainerException("Error closing chunk file",


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org