You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vv...@apache.org on 2015/09/07 17:46:03 UTC

[01/50] [abbrv] hadoop git commit: HADOOP-12346. Increase some default timeouts / retries for S3a connector. (Sean Mackrory via Lei (Eddy) Xu)

Repository: hadoop
Updated Branches:
  refs/heads/YARN-3926 c95993cba -> 1dbd8e34a


HADOOP-12346. Increase some default timeouts / retries for S3a connector. (Sean Mackrory via Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ab2d19f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ab2d19f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ab2d19f

Branch: refs/heads/YARN-3926
Commit: 6ab2d19f5c010ab1d318214916ba95daa91a4dbf
Parents: bdbe53c
Author: Lei Xu <le...@cloudera.com>
Authored: Sat Aug 29 09:57:27 2015 -0700
Committer: Lei Xu <le...@cloudera.com>
Committed: Sat Aug 29 09:59:30 2015 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/main/resources/core-default.xml            | 4 ++--
 .../src/main/java/org/apache/hadoop/fs/s3a/Constants.java        | 4 ++--
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md       | 4 ++--
 3 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ab2d19f/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index cef32d3..b813aa9 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -793,7 +793,7 @@ for ldap providers in the same way as above does.
 
 <property>
   <name>fs.s3a.attempts.maximum</name>
-  <value>10</value>
+  <value>20</value>
   <description>How many times we should retry commands on transient errors.</description>
 </property>
 
@@ -805,7 +805,7 @@ for ldap providers in the same way as above does.
 
 <property>
   <name>fs.s3a.connection.timeout</name>
-  <value>50000</value>
+  <value>200000</value>
   <description>Socket connection timeout in milliseconds.</description>
 </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ab2d19f/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index fe8dd77..fa81d93 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -45,7 +45,7 @@ public class Constants {
 
   // number of times we should retry errors
   public static final String MAX_ERROR_RETRIES = "fs.s3a.attempts.maximum";
-  public static final int DEFAULT_MAX_ERROR_RETRIES = 10;
+  public static final int DEFAULT_MAX_ERROR_RETRIES = 20;
 
   // seconds until we give up trying to establish a connection to s3
   public static final String ESTABLISH_TIMEOUT = "fs.s3a.connection.establish.timeout";
@@ -53,7 +53,7 @@ public class Constants {
   
   // seconds until we give up on a connection to s3
   public static final String SOCKET_TIMEOUT = "fs.s3a.connection.timeout";
-  public static final int DEFAULT_SOCKET_TIMEOUT = 50000;
+  public static final int DEFAULT_SOCKET_TIMEOUT = 200000;
 
   // number of records to get while paging through a directory listing
   public static final String MAX_PAGING_KEYS = "fs.s3a.paging.maximum";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ab2d19f/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 5d45e0a..6df15e6 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -206,7 +206,7 @@ If you do any of these: change your credentials immediately!
 
     <property>
       <name>fs.s3a.attempts.maximum</name>
-      <value>10</value>
+      <value>20</value>
       <description>How many times we should retry commands on transient errors.</description>
     </property>
 
@@ -218,7 +218,7 @@ If you do any of these: change your credentials immediately!
 
     <property>
       <name>fs.s3a.connection.timeout</name>
-      <value>50000</value>
+      <value>200000</value>
       <description>Socket connection timeout in milliseconds.</description>
     </property>
 


[29/50] [abbrv] hadoop git commit: HDFS-8885. ByteRangeInputStream used in webhdfs does not override available(). Contributed by Shradha Revankar.

Posted by vv...@apache.org.
HDFS-8885. ByteRangeInputStream used in webhdfs does not override available(). Contributed by Shradha Revankar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c92e31bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c92e31bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c92e31bd

Branch: refs/heads/YARN-3926
Commit: c92e31bd659e95c8baa0f3b2bf0cd7f6f72278e6
Parents: 09c64ba
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Sep 3 19:40:34 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Sep 3 19:40:34 2015 +0900

----------------------------------------------------------------------
 .../hadoop/hdfs/web/ByteRangeInputStream.java   | 11 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hdfs/web/TestByteRangeInputStream.java      | 79 ++++++++++++++++++++
 3 files changed, 93 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c92e31bd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
index bb581db..911d6a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
@@ -274,4 +274,15 @@ public abstract class ByteRangeInputStream extends FSInputStream {
     }
     status = StreamStatus.CLOSED;
   }
+
+  @Override
+  public synchronized int available() throws IOException{
+    getInputStream();
+    if(fileLength != null){
+      long remaining = fileLength - currentPos;
+      return remaining <= Integer.MAX_VALUE ? (int) remaining : Integer.MAX_VALUE;
+    }else {
+      return Integer.MAX_VALUE;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c92e31bd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e68c011..22b80a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1284,6 +1284,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-9003. ForkJoin thread pool leaks. (Kihwal Lee via jing9)
 
+    HDFS-8885. ByteRangeInputStream used in webhdfs does not override
+    available(). (Shradha Revankar via aajisaka)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c92e31bd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
index 40f2b9c..7f1f00f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
@@ -210,4 +210,83 @@ public class TestByteRangeInputStream {
 
     verify(mockStream.in, times(isCloses)).close();
   }
+
+
+  @Test
+  public void testAvailable() throws IOException {
+    ByteRangeInputStream bris =
+            mock(ByteRangeInputStream.class, CALLS_REAL_METHODS);
+    InputStreamAndFileLength mockStream = new InputStreamAndFileLength(65535L,
+            mock(InputStream.class));
+    doReturn(mockStream).when(bris).openInputStream(Mockito.anyLong());
+    Whitebox.setInternalState(bris, "status",
+            ByteRangeInputStream.StreamStatus.SEEK);
+
+
+    assertEquals("Before read or seek, available should be same as filelength",
+            65535, bris.available());
+    verify(bris, times(1)).openInputStream(Mockito.anyLong());
+
+    bris.seek(10);
+    assertEquals("Seek 10 bytes, available should return filelength - 10"
+            , 65525,
+            bris.available());
+
+    //no more bytes available
+    bris.seek(65535);
+    assertEquals("Seek till end of file, available should return 0 bytes", 0,
+            bris.available());
+
+    //test reads, seek back to 0 and start reading
+    bris.seek(0);
+    bris.read();
+    assertEquals("Read 1 byte, available must return  filelength - 1",
+            65534, bris.available());
+
+    bris.read();
+    assertEquals("Read another 1 byte, available must return  filelength - 2",
+            65533, bris.available());
+
+    //seek and read
+    bris.seek(100);
+    bris.read();
+    assertEquals("Seek to offset 100 and read 1 byte, available should return filelength - 101",
+            65434, bris.available());
+    bris.close();
+  }
+
+  @Test
+  public void testAvailableLengthNotKnown() throws IOException {
+    ByteRangeInputStream bris =
+            mock(ByteRangeInputStream.class, CALLS_REAL_METHODS);
+    //Length is null for chunked transfer-encoding
+    InputStreamAndFileLength mockStream = new InputStreamAndFileLength(null,
+            mock(InputStream.class));
+    doReturn(mockStream).when(bris).openInputStream(Mockito.anyLong());
+    Whitebox.setInternalState(bris, "status",
+            ByteRangeInputStream.StreamStatus.SEEK);
+
+    assertEquals(Integer.MAX_VALUE, bris.available());
+  }
+
+  @Test
+  public void testAvailableStreamClosed() throws IOException {
+    ByteRangeInputStream bris =
+            mock(ByteRangeInputStream.class, CALLS_REAL_METHODS);
+    InputStreamAndFileLength mockStream = new InputStreamAndFileLength(null,
+            mock(InputStream.class));
+    doReturn(mockStream).when(bris).openInputStream(Mockito.anyLong());
+    Whitebox.setInternalState(bris, "status",
+            ByteRangeInputStream.StreamStatus.SEEK);
+
+    bris.close();
+    try{
+      bris.available();
+      fail("Exception should be thrown when stream is closed");
+    }catch(IOException e){
+      assertTrue("Exception when stream is closed",
+              e.getMessage().equals("Stream closed"));
+    }
+  }
+
 }


[12/50] [abbrv] hadoop git commit: HADOOP-12367. Move TestFileUtil's test resources to resources folder. (wang via yliu)

Posted by vv...@apache.org.
HADOOP-12367. Move TestFileUtil's test resources to resources folder. (wang via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4d96be6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4d96be6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4d96be6

Branch: refs/heads/YARN-3926
Commit: f4d96be6c637ff54903615cff04b365e25bb3229
Parents: 7ad3556
Author: yliu <yl...@apache.org>
Authored: Tue Sep 1 16:20:56 2015 +0800
Committer: yliu <yl...@apache.org>
Committed: Tue Sep 1 16:20:56 2015 +0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +++
 hadoop-common-project/hadoop-common/pom.xml     |  19 +------------------
 .../java/org/apache/hadoop/fs/test-untar.tar    | Bin 20480 -> 0 bytes
 .../java/org/apache/hadoop/fs/test-untar.tgz    | Bin 2024 -> 0 bytes
 .../src/test/resources/test-untar.tar           | Bin 0 -> 20480 bytes
 .../src/test/resources/test-untar.tgz           | Bin 0 -> 2024 bytes
 6 files changed, 4 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0f52d22..14e6fda 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -759,6 +759,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.
     (wang)
 
+    HADOOP-12367. Move TestFileUtil's test resources to resources folder.
+    (wang via yliu)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 282735d..3ae09a0 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -454,23 +454,6 @@
             </configuration>
           </execution>
           <execution>
-            <id>copy-test-tarballs</id>
-            <phase>process-test-resources</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target>
-                <copy toDir="${test.cache.data}">
-                  <fileset dir="${basedir}/src/test/java/org/apache/hadoop/fs">
-                    <include name="test-untar.tar"/>
-                    <include name="test-untar.tgz"/>
-                  </fileset>
-                </copy>
-              </target>
-            </configuration>
-          </execution>
-          <execution>
             <phase>pre-site</phase>
             <goals>
               <goal>run</goal>
@@ -505,7 +488,7 @@
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h</exclude>
-            <exclude>src/test/java/org/apache/hadoop/fs/test-untar.tgz</exclude>
+            <exclude>src/test/resources/test-untar.tgz</exclude>
             <exclude>src/test/resources/test.har/_SUCCESS</exclude>
             <exclude>src/test/resources/test.har/_index</exclude>
             <exclude>src/test/resources/test.har/_masterindex</exclude>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
deleted file mode 100644
index 949e985..0000000
Binary files a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
deleted file mode 100644
index 9e9ef40..0000000
Binary files a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar b/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar
new file mode 100644
index 0000000..949e985
Binary files /dev/null and b/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tgz
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tgz b/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tgz
new file mode 100644
index 0000000..9e9ef40
Binary files /dev/null and b/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tgz differ


[50/50] [abbrv] hadoop git commit: YARN-3591. Resource localization on a bad disk causes subsequent containers failure. Contributed by Lavkesh Lahngir.

Posted by vv...@apache.org.
YARN-3591. Resource localization on a bad disk causes subsequent containers failure. Contributed by Lavkesh Lahngir.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1dbd8e34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1dbd8e34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1dbd8e34

Branch: refs/heads/YARN-3926
Commit: 1dbd8e34a7d97c4d8586da79c980d8f2e0aad61d
Parents: 9b68577
Author: Varun Vasudev <vv...@apache.org>
Authored: Mon Sep 7 11:32:12 2015 +0530
Committer: Varun Vasudev <vv...@apache.org>
Committed: Mon Sep 7 11:32:12 2015 +0530

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../localizer/LocalResourcesTrackerImpl.java    | 64 +++++++++++++--
 .../localizer/ResourceLocalizationService.java  |  2 +-
 .../TestLocalResourcesTrackerImpl.java          | 86 ++++++++++++++++++--
 .../localizer/TestResourceRetention.java        |  2 +-
 5 files changed, 138 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1dbd8e34/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 98cc98f..fede891 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -816,6 +816,9 @@ Release 2.8.0 - UNRELEASED
     YARN-4024. YARN RM should avoid unnecessary resolving IP when NMs doing heartbeat.
     (Hong Zhiguo via wangda)
 
+    YARN-3591. Resource localization on a bad disk causes subsequent containers failure.
+    (Lavkesh Lahngir via vvasudev)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1dbd8e34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
index 7cf6b15..a1e6817 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
 import java.io.File;
 import java.io.IOException;
 import java.util.Iterator;
+import java.util.List;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicLong;
@@ -38,6 +39,7 @@ import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRecoveredEvent;
@@ -65,6 +67,7 @@ class LocalResourcesTrackerImpl implements LocalResourcesTracker {
   private final Dispatcher dispatcher;
   private final ConcurrentMap<LocalResourceRequest,LocalizedResource> localrsrc;
   private Configuration conf;
+  private LocalDirsHandlerService dirsHandler;
   /*
    * This flag controls whether this resource tracker uses hierarchical
    * directories or not. For PRIVATE and PUBLIC resource trackers it
@@ -92,27 +95,38 @@ class LocalResourcesTrackerImpl implements LocalResourcesTracker {
       Dispatcher dispatcher, boolean useLocalCacheDirectoryManager,
       Configuration conf, NMStateStoreService stateStore) {
     this(user, appId, dispatcher,
-      new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>(),
-      useLocalCacheDirectoryManager, conf, stateStore);
+        new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>(),
+        useLocalCacheDirectoryManager, conf, stateStore, null);
+  }
+
+  public LocalResourcesTrackerImpl(String user, ApplicationId appId,
+      Dispatcher dispatcher, boolean useLocalCacheDirectoryManager,
+      Configuration conf, NMStateStoreService stateStore,
+      LocalDirsHandlerService dirHandler) {
+    this(user, appId, dispatcher,
+        new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>(),
+        useLocalCacheDirectoryManager, conf, stateStore, dirHandler);
   }
 
   LocalResourcesTrackerImpl(String user, ApplicationId appId,
       Dispatcher dispatcher,
-      ConcurrentMap<LocalResourceRequest,LocalizedResource> localrsrc,
+      ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc,
       boolean useLocalCacheDirectoryManager, Configuration conf,
-      NMStateStoreService stateStore) {
+      NMStateStoreService stateStore, LocalDirsHandlerService dirHandler) {
     this.appId = appId;
     this.user = user;
     this.dispatcher = dispatcher;
     this.localrsrc = localrsrc;
     this.useLocalCacheDirectoryManager = useLocalCacheDirectoryManager;
-    if ( this.useLocalCacheDirectoryManager) {
-      directoryManagers = new ConcurrentHashMap<Path, LocalCacheDirectoryManager>();
+    if (this.useLocalCacheDirectoryManager) {
+      directoryManagers =
+          new ConcurrentHashMap<Path, LocalCacheDirectoryManager>();
       inProgressLocalResourcesMap =
-        new ConcurrentHashMap<LocalResourceRequest, Path>();
+          new ConcurrentHashMap<LocalResourceRequest, Path>();
     }
     this.conf = conf;
     this.stateStore = stateStore;
+    this.dirsHandler = dirHandler;
   }
 
   /*
@@ -312,11 +326,45 @@ class LocalResourcesTrackerImpl implements LocalResourcesTracker {
         toString());
       if (!file.exists()) {
         ret = false;
+      } else if (dirsHandler != null) {
+        ret = checkLocalResource(rsrc);
       }
     }
     return ret;
   }
-  
+
+  /**
+   * Check if the rsrc is Localized on a good dir.
+   *
+   * @param rsrc
+   * @return
+   */
+  @VisibleForTesting
+  boolean checkLocalResource(LocalizedResource rsrc) {
+    List<String> localDirs = dirsHandler.getLocalDirsForRead();
+    for (String dir : localDirs) {
+      if (isParent(rsrc.getLocalPath().toUri().getPath(), dir)) {
+        return true;
+      } else {
+        continue;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * @param path
+   * @param parentdir
+   * @return true if parentdir is parent of path else false.
+   */
+  private boolean isParent(String path, String parentdir) {
+    // Add separator if not present.
+    if (path.charAt(path.length() - 1) != File.separatorChar) {
+      path += File.separator;
+    }
+    return path.startsWith(parentdir);
+  }
+
   @Override
   public boolean remove(LocalizedResource rem, DeletionService delService) {
  // current synchronization guaranteed by crude RLS event for cleanup

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1dbd8e34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index b417c5e..e239e34 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -229,7 +229,7 @@ public class ResourceLocalizationService extends CompositeService
   public void serviceInit(Configuration conf) throws Exception {
     this.validateConf(conf);
     this.publicRsrc = new LocalResourcesTrackerImpl(null, null, dispatcher,
-        true, conf, stateStore);
+        true, conf, stateStore, dirsHandler);
     this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
 
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1dbd8e34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
index 5695254..350cecb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
@@ -18,22 +18,22 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
 
-import static org.mockito.Mockito.any;
-import static org.mockito.Matchers.isA;
+import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
+import static org.mockito.Matchers.isA;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.timeout;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.timeout;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.List;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
-import org.junit.Assert;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -48,6 +48,7 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceFailedEvent;
@@ -64,8 +65,10 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.even
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
+import org.mockito.Mockito;
 
 public class TestLocalResourcesTrackerImpl {
 
@@ -103,7 +106,7 @@ public class TestLocalResourcesTrackerImpl {
       localrsrc.put(req2, lr2);
       LocalResourcesTracker tracker =
           new LocalResourcesTrackerImpl(user, null, dispatcher, localrsrc,
-              false, conf, new NMNullStateStoreService());
+              false, conf, new NMNullStateStoreService(),null);
 
       ResourceEvent req11Event =
           new ResourceRequestEvent(req1, LocalResourceVisibility.PUBLIC, lc1);
@@ -187,7 +190,7 @@ public class TestLocalResourcesTrackerImpl {
       localrsrc.put(req1, lr1);
       LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
           null, dispatcher, localrsrc, false, conf,
-          new NMNullStateStoreService());
+          new NMNullStateStoreService(), null);
 
       ResourceEvent req11Event = new ResourceRequestEvent(req1,
           LocalResourceVisibility.PUBLIC, lc1);
@@ -258,7 +261,7 @@ public class TestLocalResourcesTrackerImpl {
           new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>();
       LocalResourcesTracker tracker =
           new LocalResourcesTrackerImpl(user, null, dispatcher, localrsrc,
-              true, conf, new NMNullStateStoreService());
+              true, conf, new NMNullStateStoreService(), null);
 
       LocalResourceRequest lr =
           createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC);
@@ -405,7 +408,7 @@ public class TestLocalResourcesTrackerImpl {
           new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>();
       LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
           null, dispatcher, localrsrc, true, conf,
-          new NMNullStateStoreService());
+          new NMNullStateStoreService(), null);
 
       // This is a random path. NO File creation will take place at this place.
       Path localDir = new Path("/tmp");
@@ -782,6 +785,71 @@ public class TestLocalResourcesTrackerImpl {
     }
   }
 
+  @SuppressWarnings("unchecked")
+  @Test
+  public void testResourcePresentInGoodDir() throws IOException {
+    String user = "testuser";
+    DrainDispatcher dispatcher = null;
+    try {
+      Configuration conf = new Configuration();
+      dispatcher = createDispatcher(conf);
+      EventHandler<LocalizerEvent> localizerEventHandler =
+          mock(EventHandler.class);
+      EventHandler<LocalizerEvent> containerEventHandler =
+          mock(EventHandler.class);
+      dispatcher.register(LocalizerEventType.class, localizerEventHandler);
+      dispatcher.register(ContainerEventType.class, containerEventHandler);
+
+      ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1);
+      LocalizerContext lc1 = new LocalizerContext(user, cId1, null);
+      LocalResourceRequest req1 =
+          createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC);
+      LocalResourceRequest req2 =
+          createLocalResourceRequest(user, 2, 1, LocalResourceVisibility.PUBLIC);
+      LocalizedResource lr1 = createLocalizedResource(req1, dispatcher);
+      LocalizedResource lr2 = createLocalizedResource(req2, dispatcher);
+      ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc =
+          new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>();
+      localrsrc.put(req1, lr1);
+      localrsrc.put(req2, lr2);
+      LocalDirsHandlerService dirsHandler = mock(LocalDirsHandlerService.class);
+      List<String> goodDirs = new ArrayList<String>();
+      // /tmp/somedir2 is bad
+      goodDirs.add("/tmp/somedir1/");
+      goodDirs.add("/tmp/somedir2");
+      Mockito.when(dirsHandler.getLocalDirs()).thenReturn(goodDirs);
+      Mockito.when(dirsHandler.getLocalDirsForRead()).thenReturn(goodDirs);
+      LocalResourcesTrackerImpl tracker =
+          new LocalResourcesTrackerImpl(user, null, dispatcher, localrsrc,
+              true , conf, new NMNullStateStoreService(), dirsHandler);
+      ResourceEvent req11Event =
+          new ResourceRequestEvent(req1, LocalResourceVisibility.PUBLIC, lc1);
+      ResourceEvent req21Event =
+          new ResourceRequestEvent(req2, LocalResourceVisibility.PUBLIC, lc1);
+      // Localize R1 for C1
+      tracker.handle(req11Event);
+      // Localize R2 for C1
+      tracker.handle(req21Event);
+      dispatcher.await();
+      // Localize resource1
+      Path p1 = tracker.getPathForLocalization(req1, new Path("/tmp/somedir1"));
+      Path p2 = tracker.getPathForLocalization(req2, new Path("/tmp/somedir2"));
+      ResourceLocalizedEvent rle1 = new ResourceLocalizedEvent(req1, p1, 1);
+      tracker.handle(rle1);
+      ResourceLocalizedEvent rle2 = new ResourceLocalizedEvent(req2, p2, 1);
+      tracker.handle(rle2);
+      dispatcher.await();
+      // Remove somedir2 from gooddirs
+      Assert.assertTrue(tracker.checkLocalResource(lr2));
+      goodDirs.remove(1);
+      Assert.assertFalse(tracker.checkLocalResource(lr2));
+    } finally {
+      if (dispatcher != null) {
+        dispatcher.stop();
+      }
+    }
+  }
+
   private boolean createdummylocalizefile(Path path) {
     boolean ret = false;
     File file = new File(path.toUri().getRawPath().toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1dbd8e34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java
index 0e3bf86..81e69e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java
@@ -83,7 +83,7 @@ public class TestResourceRetention {
     ConcurrentMap<LocalResourceRequest,LocalizedResource> trackerResources =
       new ConcurrentHashMap<LocalResourceRequest,LocalizedResource>();
     LocalResourcesTracker ret = spy(new LocalResourcesTrackerImpl(user, null,
-      null, trackerResources, false, conf, new NMNullStateStoreService()));
+      null, trackerResources, false, conf, new NMNullStateStoreService(),null));
     for (int i = 0; i < nRsrcs; ++i) {
       final LocalResourceRequest req = new LocalResourceRequest(
           new Path("file:///" + user + "/rsrc" + i), timestamp + i * tsstep,


[17/50] [abbrv] hadoop git commit: HADOOP-12369. Point hadoop-project/pom.xml java.security.krb5.conf within target folder.

Posted by vv...@apache.org.
HADOOP-12369. Point hadoop-project/pom.xml java.security.krb5.conf within target folder.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0eb9b193
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0eb9b193
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0eb9b193

Branch: refs/heads/YARN-3926
Commit: 0eb9b1932590ba6c60ac996d03269dffb4e095cc
Parents: dd149ad
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Sep 1 10:57:32 2015 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Sep 1 10:57:32 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt              | 3 +++
 hadoop-project/pom.xml                                       | 2 +-
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eb9b193/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 70252d6..e915290 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -762,6 +762,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12367. Move TestFileUtil's test resources to resources folder.
     (wang via yliu)
 
+    HADOOP-12369. Point hadoop-project/pom.xml java.security.krb5.conf
+    within target folder. (wang)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eb9b193/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 86102c6..9863475 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1132,7 +1132,7 @@
             <test.build.classes>${test.build.classes}</test.build.classes>
 
             <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
-            <java.security.krb5.conf>${basedir}/src/test/resources/krb5.conf</java.security.krb5.conf>
+            <java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
             <java.security.egd>${java.security.egd}</java.security.egd>
             <require.test.libhadoop>${require.test.libhadoop}</require.test.libhadoop>
           </systemPropertyVariables>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eb9b193/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
index 52994e7..635f693 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -198,7 +198,7 @@
           <test.build.classes>${test.build.classes}</test.build.classes>
 
           <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
-          <java.security.krb5.conf>${basedir}/src/test/resources/krb5.conf</java.security.krb5.conf>
+          <java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
           <java.security.egd>${java.security.egd}</java.security.egd>
           <require.test.libhadoop>${require.test.libhadoop}</require.test.libhadoop>
         </systemPropertyVariables>


[42/50] [abbrv] hadoop git commit: YARN-4105. Capacity Scheduler headroom for DRF is wrong. Contributed by Chang Li

Posted by vv...@apache.org.
YARN-4105. Capacity Scheduler headroom for DRF is wrong. Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6eaca2e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6eaca2e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6eaca2e3

Branch: refs/heads/YARN-3926
Commit: 6eaca2e3634a88dc55689e8960352d6248c424d9
Parents: 40d222e
Author: Jason Lowe <jl...@apache.org>
Authored: Fri Sep 4 15:30:53 2015 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Fri Sep 4 15:30:53 2015 +0000

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../scheduler/capacity/LeafQueue.java           |   2 +-
 .../capacity/TestCapacityScheduler.java         | 112 ++++++++++++++++++-
 3 files changed, 115 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eaca2e3/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 29eabb5..662106b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -880,6 +880,9 @@ Release 2.7.2 - UNRELEASED
     YARN-4103. RM WebServices missing scheme for appattempts logLinks.
     (Jonathan Eagles via vvasudeb)
 
+    YARN-4105. Capacity Scheduler headroom for DRF is wrong (Chang Li via
+    jlowe)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eaca2e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 658eae1..b43f658 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -878,7 +878,7 @@ public class LeafQueue extends AbstractCSQueue {
      *
      */
     Resource headroom = 
-      Resources.min(resourceCalculator, clusterResource,
+      Resources.componentwiseMin(
         Resources.subtract(userLimit, user.getUsed()),
         Resources.subtract(currentResourceLimit, queueUsage.getUsed())
         );

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eaca2e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 279299e..44773be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -2916,7 +2916,117 @@ public class TestCapacityScheduler {
 
     rm.stop();
   }
-  
+
+  @Test
+  public void testHeadRoomCalculationWithDRC() throws Exception {
+    // test with total cluster resource of 20GB memory and 20 vcores.
+    // the queue where two apps running has user limit 0.8
+    // allocate 10GB memory and 1 vcore to app 1.
+    // app 1 should have headroom
+    // 20GB*0.8 - 10GB = 6GB memory available and 15 vcores.
+    // allocate 1GB memory and 1 vcore to app2.
+    // app 2 should have headroom 20GB - 10 - 1 = 1GB memory,
+    // and 20*0.8 - 1 = 15 vcores.
+
+    CapacitySchedulerConfiguration csconf =
+        new CapacitySchedulerConfiguration();
+    csconf.setResourceComparator(DominantResourceCalculator.class);
+
+    YarnConfiguration conf = new YarnConfiguration(csconf);
+        conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+
+    MockRM rm = new MockRM(conf);
+    rm.start();
+
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+    LeafQueue qb = (LeafQueue)cs.getQueue("default");
+    qb.setUserLimitFactor((float)0.8);
+
+    // add app 1
+    ApplicationId appId = BuilderUtils.newApplicationId(100, 1);
+    ApplicationAttemptId appAttemptId =
+    BuilderUtils.newApplicationAttemptId(appId, 1);
+
+    RMAppAttemptMetrics attemptMetric =
+        new RMAppAttemptMetrics(appAttemptId, rm.getRMContext());
+    RMAppImpl app = mock(RMAppImpl.class);
+    when(app.getApplicationId()).thenReturn(appId);
+    RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
+    when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
+    when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
+    when(app.getCurrentAppAttempt()).thenReturn(attempt);
+
+    rm.getRMContext().getRMApps().put(appId, app);
+
+    SchedulerEvent addAppEvent =
+        new AppAddedSchedulerEvent(appId, "default", "user1");
+    cs.handle(addAppEvent);
+    SchedulerEvent addAttemptEvent =
+        new AppAttemptAddedSchedulerEvent(appAttemptId, false);
+    cs.handle(addAttemptEvent);
+
+    // add app 2
+    ApplicationId appId2 = BuilderUtils.newApplicationId(100, 2);
+    ApplicationAttemptId appAttemptId2 =
+    BuilderUtils.newApplicationAttemptId(appId2, 1);
+
+    RMAppAttemptMetrics attemptMetric2 =
+        new RMAppAttemptMetrics(appAttemptId2, rm.getRMContext());
+    RMAppImpl app2 = mock(RMAppImpl.class);
+    when(app2.getApplicationId()).thenReturn(appId2);
+    RMAppAttemptImpl attempt2 = mock(RMAppAttemptImpl.class);
+    when(attempt2.getAppAttemptId()).thenReturn(appAttemptId2);
+    when(attempt2.getRMAppAttemptMetrics()).thenReturn(attemptMetric2);
+    when(app2.getCurrentAppAttempt()).thenReturn(attempt2);
+
+    rm.getRMContext().getRMApps().put(appId2, app2);
+    addAppEvent =
+        new AppAddedSchedulerEvent(appId2, "default", "user2");
+    cs.handle(addAppEvent);
+    addAttemptEvent =
+        new AppAttemptAddedSchedulerEvent(appAttemptId2, false);
+    cs.handle(addAttemptEvent);
+
+    // add nodes  to cluster, so cluster have 20GB and 20 vcores
+    Resource newResource = Resource.newInstance(10 * GB, 10);
+    RMNode node = MockNodes.newNodeInfo(0, newResource, 1, "127.0.0.1");
+    cs.handle(new NodeAddedSchedulerEvent(node));
+
+    Resource newResource2 = Resource.newInstance(10 * GB, 10);
+    RMNode node2 = MockNodes.newNodeInfo(0, newResource2, 1, "127.0.0.2");
+    cs.handle(new NodeAddedSchedulerEvent(node2));
+
+    FiCaSchedulerApp fiCaApp1 =
+            cs.getSchedulerApplications().get(app.getApplicationId())
+                .getCurrentAppAttempt();
+
+    FiCaSchedulerApp fiCaApp2 =
+            cs.getSchedulerApplications().get(app2.getApplicationId())
+                .getCurrentAppAttempt();
+    Priority u0Priority = TestUtils.createMockPriority(1);
+    RecordFactory recordFactory =
+    RecordFactoryProvider.getRecordFactory(null);
+
+    // allocate container for app1 with 10GB memory and 1 vcore
+    fiCaApp1.updateResourceRequests(Collections.singletonList(
+        TestUtils.createResourceRequest(ResourceRequest.ANY, 10*GB, 1, true,
+            u0Priority, recordFactory)));
+    cs.handle(new NodeUpdateSchedulerEvent(node));
+    cs.handle(new NodeUpdateSchedulerEvent(node2));
+    assertEquals(6*GB, fiCaApp1.getHeadroom().getMemory());
+    assertEquals(15, fiCaApp1.getHeadroom().getVirtualCores());
+
+    // allocate container for app2 with 1GB memory and 1 vcore
+    fiCaApp2.updateResourceRequests(Collections.singletonList(
+        TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, true,
+            u0Priority, recordFactory)));
+    cs.handle(new NodeUpdateSchedulerEvent(node));
+    cs.handle(new NodeUpdateSchedulerEvent(node2));
+    assertEquals(9*GB, fiCaApp2.getHeadroom().getMemory());
+    assertEquals(15, fiCaApp2.getHeadroom().getVirtualCores());
+  }
+
   @Test
   public void testDefaultNodeLabelExpressionQueueConfig() throws Exception {
     CapacityScheduler cs = new CapacityScheduler();


[26/50] [abbrv] hadoop git commit: HDFS-9003. ForkJoin thread pool leaks. Contributed by Kihwal Lee.

Posted by vv...@apache.org.
HDFS-9003. ForkJoin thread pool leaks. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de928d56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de928d56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de928d56

Branch: refs/heads/YARN-3926
Commit: de928d566a119f0b7fa5f171719642cd86be0af7
Parents: afc88b3
Author: Jing Zhao <ji...@apache.org>
Authored: Wed Sep 2 15:19:04 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Wed Sep 2 15:19:04 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                        | 2 ++
 .../main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java  | 1 +
 2 files changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de928d56/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 78bbf26..7a685f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1279,6 +1279,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8388. Time and Date format need to be in sync in NameNode UI page.
     (Surendra Singh Lilhore via aajisaka)
 
+    HDFS-9003. ForkJoin thread pool leaks. (Kihwal Lee via jing9)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de928d56/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 853c30c..0dd007d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -880,6 +880,7 @@ public class FSImage implements Closeable {
         root, counts);
     p.execute(task);
     task.join();
+    p.shutdown();
     LOG.info("Quota initialization completed in " + (Time.now() - start) +
         " milliseconds\n" + counts);
   }


[39/50] [abbrv] hadoop git commit: HDFS-9002. Move o.a.h.hdfs.net/*Peer classes to hdfs-client. Contributed by Mingliang Liu.

Posted by vv...@apache.org.
HDFS-9002. Move o.a.h.hdfs.net/*Peer classes to hdfs-client. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed78b14e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed78b14e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed78b14e

Branch: refs/heads/YARN-3926
Commit: ed78b14ebc9a21bb57ccd088e8b49bfa457a396f
Parents: c2d2c18
Author: Haohui Mai <wh...@apache.org>
Authored: Thu Sep 3 15:32:53 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Thu Sep 3 15:32:53 2015 -0700

----------------------------------------------------------------------
 .../dev-support/findbugsExcludeFile.xml         |   1 +
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  64 +++
 .../hdfs/client/HdfsClientConfigKeys.java       |  14 +
 .../apache/hadoop/hdfs/net/BasicInetPeer.java   | 133 +++++
 .../apache/hadoop/hdfs/net/EncryptedPeer.java   | 142 +++++
 .../org/apache/hadoop/hdfs/net/NioInetPeer.java | 136 +++++
 .../java/org/apache/hadoop/hdfs/net/Peer.java   |   8 +-
 .../protocol/datatransfer/IOStreamPair.java     |  37 ++
 .../datatransfer/TrustedChannelResolver.java    |  81 +++
 .../sasl/DataEncryptionKeyFactory.java          |  38 ++
 .../datatransfer/sasl/DataTransferSaslUtil.java | 519 +++++++++++++++++++
 .../sasl/SaslDataTransferClient.java            | 498 ++++++++++++++++++
 .../datatransfer/sasl/SaslParticipant.java      | 210 ++++++++
 .../SaslResponseWithNegotiatedCipherOption.java |  33 ++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 102 ++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   3 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  28 +-
 .../apache/hadoop/hdfs/net/BasicInetPeer.java   | 133 -----
 .../apache/hadoop/hdfs/net/EncryptedPeer.java   | 142 -----
 .../org/apache/hadoop/hdfs/net/NioInetPeer.java | 136 -----
 .../apache/hadoop/hdfs/net/TcpPeerServer.java   |  65 +--
 .../protocol/datatransfer/IOStreamPair.java     |  37 --
 .../datatransfer/TrustedChannelResolver.java    |  81 ---
 .../sasl/DataEncryptionKeyFactory.java          |  38 --
 .../datatransfer/sasl/DataTransferSaslUtil.java | 519 -------------------
 .../sasl/SaslDataTransferClient.java            | 498 ------------------
 .../sasl/SaslDataTransferServer.java            |   2 +-
 .../datatransfer/sasl/SaslParticipant.java      | 210 --------
 .../SaslResponseWithNegotiatedCipherOption.java |  33 --
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 111 +---
 .../hdfs/server/namenode/FSDirXAttrOp.java      |   8 +-
 .../hdfs/server/namenode/FSDirectory.java       |   3 +-
 .../hdfs/server/namenode/NamenodeFsck.java      |   4 +-
 .../apache/hadoop/hdfs/BlockReaderTestUtil.java |   3 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   2 +-
 .../hadoop/hdfs/TestEncryptedTransfer.java      |   4 +-
 .../sasl/SaslDataTransferTestCase.java          |   2 +-
 .../datatransfer/sasl/TestSaslDataTransfer.java |   2 +-
 .../hdfs/qjournal/TestSecureNNWithQJM.java      |   2 +-
 .../blockmanagement/TestBlockTokenWithDFS.java  |   4 +-
 .../datanode/TestDataNodeVolumeFailure.java     |   4 +-
 42 files changed, 2064 insertions(+), 2029 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 036ac09..515da24 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -14,6 +14,7 @@
       <Class name="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport$DiffReportEntry"/>
       <Class name="org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus"/>
       <Class name="org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport"/>
+      <Class name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslResponseWithNegotiatedCipherOption"/>
     </Or>
     <Bug pattern="EI_EXPOSE_REP,EI_EXPOSE_REP2" />
   </Match>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index a89f556..b032250 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -27,16 +27,24 @@ import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.net.BasicInetPeer;
+import org.apache.hadoop.hdfs.net.NioInetPeer;
+import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.util.IOUtilsClient;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -46,8 +54,10 @@ import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
+import java.net.Socket;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.nio.channels.SocketChannel;
 import java.text.SimpleDateFormat;
 import java.util.Collection;
 import java.util.Collections;
@@ -523,4 +533,58 @@ public class DFSUtilClient {
     }
     return keyProvider;
   }
+
+  public static Peer peerFromSocket(Socket socket)
+      throws IOException {
+    Peer peer = null;
+    boolean success = false;
+    try {
+      // TCP_NODELAY is crucial here because of bad interactions between
+      // Nagle's Algorithm and Delayed ACKs. With connection keepalive
+      // between the client and DN, the conversation looks like:
+      //   1. Client -> DN: Read block X
+      //   2. DN -> Client: data for block X
+      //   3. Client -> DN: Status OK (successful read)
+      //   4. Client -> DN: Read block Y
+      // The fact that step #3 and #4 are both in the client->DN direction
+      // triggers Nagling. If the DN is using delayed ACKs, this results
+      // in a delay of 40ms or more.
+      //
+      // TCP_NODELAY disables nagling and thus avoids this performance
+      // disaster.
+      socket.setTcpNoDelay(true);
+      SocketChannel channel = socket.getChannel();
+      if (channel == null) {
+        peer = new BasicInetPeer(socket);
+      } else {
+        peer = new NioInetPeer(socket);
+      }
+      success = true;
+      return peer;
+    } finally {
+      if (!success) {
+        if (peer != null) peer.close();
+        socket.close();
+      }
+    }
+  }
+
+  public static Peer peerFromSocketAndKey(
+        SaslDataTransferClient saslClient, Socket s,
+        DataEncryptionKeyFactory keyFactory,
+        Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
+        throws IOException {
+    Peer peer = null;
+    boolean success = false;
+    try {
+      peer = peerFromSocket(s);
+      peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
+      success = true;
+      return peer;
+    } finally {
+      if (!success) {
+        IOUtilsClient.cleanup(null, peer);
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 96bc8d3..e417fbe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -126,6 +126,20 @@ public interface HdfsClientConfigKeys {
   long    DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 4 * 1024 * 1024; // 4MB
   String  DFS_ENCRYPTION_KEY_PROVIDER_URI = "dfs.encryption.key.provider.uri";
 
+  String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY =
+      "dfs.encrypt.data.transfer.cipher.suites";
+
+  String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";
+  String DFS_DATA_TRANSFER_PROTECTION_DEFAULT = "";
+  String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY =
+      "dfs.data.transfer.saslproperties.resolver.class";
+
+  String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY =
+      "dfs.encrypt.data.transfer.cipher.key.bitlength";
+  int    DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT = 128;
+
+  String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
+
   String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
       PREFIX + "replica.accessor.builder.classes";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
new file mode 100644
index 0000000..212dbef
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.net;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.Socket;
+import java.nio.channels.ReadableByteChannel;
+
+import org.apache.hadoop.net.unix.DomainSocket;
+
+/**
+ * Represents a peer that we communicate with by using a basic Socket
+ * that has no associated Channel.
+ *
+ */
+public class BasicInetPeer implements Peer {
+  private final Socket socket;
+  private final OutputStream out;
+  private final InputStream in;
+  private final boolean isLocal;
+
+  public BasicInetPeer(Socket socket) throws IOException {
+    this.socket = socket;
+    this.out = socket.getOutputStream();
+    this.in = socket.getInputStream();
+    this.isLocal = socket.getInetAddress().equals(socket.getLocalAddress());
+  }
+
+  @Override
+  public ReadableByteChannel getInputStreamChannel() {
+    /*
+     * This Socket has no channel, so there's nothing to return here.
+     */
+    return null;
+  }
+
+  @Override
+  public void setReadTimeout(int timeoutMs) throws IOException {
+    socket.setSoTimeout(timeoutMs);
+  }
+
+  @Override
+  public int getReceiveBufferSize() throws IOException {
+    return socket.getReceiveBufferSize();
+  }
+
+  @Override
+  public boolean getTcpNoDelay() throws IOException {
+    return socket.getTcpNoDelay();
+  }
+
+  @Override
+  public void setWriteTimeout(int timeoutMs) {
+   /* 
+    * We can't implement write timeouts. :(
+    * 
+    * Java provides no facility to set a blocking write timeout on a Socket.
+    * You can simulate a blocking write with a timeout by using
+    * non-blocking I/O.  However, we can't use nio here, because this Socket
+    * doesn't have an associated Channel.
+    * 
+    * See http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4031100 for
+    * more details.
+    */
+  }
+
+  @Override
+  public boolean isClosed() {
+    return socket.isClosed();
+  }
+
+  @Override
+  public void close() throws IOException {
+    socket.close();
+  }
+
+  @Override
+  public String getRemoteAddressString() {
+    return socket.getRemoteSocketAddress().toString();
+  }
+
+  @Override
+  public String getLocalAddressString() {
+    return socket.getLocalSocketAddress().toString();
+  }
+  
+  @Override
+  public InputStream getInputStream() throws IOException {
+    return in;
+  }
+
+  @Override
+  public OutputStream getOutputStream() throws IOException {
+    return out;
+  }
+
+  @Override
+  public boolean isLocal() {
+    return isLocal;
+  }
+
+  @Override
+  public String toString() {
+    return "BasicInetPeer(" + socket.toString() + ")";
+  }
+
+  @Override
+  public DomainSocket getDomainSocket() {
+    return null;
+  }
+
+  @Override
+  public boolean hasSecureChannel() {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
new file mode 100644
index 0000000..da660c7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.net;
+
+import java.io.IOException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
+import org.apache.hadoop.net.unix.DomainSocket;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.channels.ReadableByteChannel;
+
+/**
+ * Represents a peer that we communicate with by using an encrypted
+ * communications medium.
+ */
+@InterfaceAudience.Private
+public class EncryptedPeer implements Peer {
+  private final Peer enclosedPeer;
+
+  /**
+   * An encrypted InputStream.
+   */
+  private final InputStream in;
+  
+  /**
+   * An encrypted OutputStream.
+   */
+  private final OutputStream out;
+  
+  /**
+   * An encrypted ReadableByteChannel.
+   */
+  private final ReadableByteChannel channel;
+
+  public EncryptedPeer(Peer enclosedPeer, IOStreamPair ios) {
+    this.enclosedPeer = enclosedPeer;
+    this.in = ios.in;
+    this.out = ios.out;
+    this.channel = ios.in instanceof ReadableByteChannel ? 
+        (ReadableByteChannel)ios.in : null;
+  }
+
+  @Override
+  public ReadableByteChannel getInputStreamChannel() {
+    return channel;
+  }
+
+  @Override
+  public void setReadTimeout(int timeoutMs) throws IOException {
+    enclosedPeer.setReadTimeout(timeoutMs);
+  }
+
+  @Override
+  public int getReceiveBufferSize() throws IOException {
+    return enclosedPeer.getReceiveBufferSize();
+  }
+
+  @Override
+  public boolean getTcpNoDelay() throws IOException {
+    return enclosedPeer.getTcpNoDelay();
+  }
+
+  @Override
+  public void setWriteTimeout(int timeoutMs) throws IOException {
+    enclosedPeer.setWriteTimeout(timeoutMs);
+  }
+
+  @Override
+  public boolean isClosed() {
+    return enclosedPeer.isClosed();
+  }
+
+  @Override
+  public void close() throws IOException {
+    try {
+      in.close();
+    } finally {
+      try {
+        out.close();
+      } finally {
+        enclosedPeer.close();
+      }
+    }
+  }
+
+  @Override
+  public String getRemoteAddressString() {
+    return enclosedPeer.getRemoteAddressString();
+  }
+
+  @Override
+  public String getLocalAddressString() {
+    return enclosedPeer.getLocalAddressString();
+  }
+
+  @Override
+  public InputStream getInputStream() throws IOException {
+    return in;
+  }
+
+  @Override
+  public OutputStream getOutputStream() throws IOException {
+    return out;
+  }
+
+  @Override
+  public boolean isLocal() {
+    return enclosedPeer.isLocal();
+  }
+
+  @Override
+  public String toString() {
+    return "EncryptedPeer(" + enclosedPeer + ")";
+  }
+
+  @Override
+  public DomainSocket getDomainSocket() {
+    return enclosedPeer.getDomainSocket();
+  }
+
+  @Override
+  public boolean hasSecureChannel() {
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
new file mode 100644
index 0000000..a12a69b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.net;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.Socket;
+import java.nio.channels.ReadableByteChannel;
+
+import org.apache.hadoop.net.SocketInputStream;
+import org.apache.hadoop.net.SocketOutputStream;
+import org.apache.hadoop.net.unix.DomainSocket;
+
+/**
+ * Represents a peer that we communicate with by using non-blocking I/O 
+ * on a Socket.
+ */
+public class NioInetPeer implements Peer {
+  private final Socket socket;
+
+  /**
+   * An InputStream which simulates blocking I/O with timeouts using NIO.
+   */
+  private final SocketInputStream in;
+  
+  /**
+   * An OutputStream which simulates blocking I/O with timeouts using NIO.
+   */
+  private final SocketOutputStream out;
+
+  private final boolean isLocal;
+
+  public NioInetPeer(Socket socket) throws IOException {
+    this.socket = socket;
+    this.in = new SocketInputStream(socket.getChannel(), 0);
+    this.out = new SocketOutputStream(socket.getChannel(), 0);
+    this.isLocal = socket.getInetAddress().equals(socket.getLocalAddress());
+  }
+
+  @Override
+  public ReadableByteChannel getInputStreamChannel() {
+    return in;
+  }
+
+  @Override
+  public void setReadTimeout(int timeoutMs) throws IOException {
+    in.setTimeout(timeoutMs);
+  }
+
+  @Override
+  public int getReceiveBufferSize() throws IOException {
+    return socket.getReceiveBufferSize();
+  }
+
+  @Override
+  public boolean getTcpNoDelay() throws IOException {
+    return socket.getTcpNoDelay();
+  }
+
+  @Override
+  public void setWriteTimeout(int timeoutMs) throws IOException {
+    out.setTimeout(timeoutMs);
+  }
+
+  @Override
+  public boolean isClosed() {
+    return socket.isClosed();
+  }
+
+  @Override
+  public void close() throws IOException {
+    // We always close the outermost streams-- in this case, 'in' and 'out'
+    // Closing either one of these will also close the Socket.
+    try {
+      in.close();
+    } finally {
+      out.close();
+    }
+  }
+
+  @Override
+  public String getRemoteAddressString() {
+    return socket.getRemoteSocketAddress().toString();
+  }
+
+  @Override
+  public String getLocalAddressString() {
+    return socket.getLocalSocketAddress().toString();
+  }
+
+  @Override
+  public InputStream getInputStream() throws IOException {
+    return in;
+  }
+
+  @Override
+  public OutputStream getOutputStream() throws IOException {
+    return out;
+  }
+
+  @Override
+  public boolean isLocal() {
+    return isLocal;
+  }
+
+  @Override
+  public String toString() {
+    return "NioInetPeer(" + socket.toString() + ")";
+  }
+
+  @Override
+  public DomainSocket getDomainSocket() {
+    return null;
+  }
+
+  @Override
+  public boolean hasSecureChannel() {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java
index 42cf287..3c38d5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java
@@ -57,8 +57,8 @@ public interface Peer extends Closeable {
    * Set the write timeout on this peer.
    *
    * Note: this is not honored for BasicInetPeer.
-   * See {@link BasicSocketPeer#setWriteTimeout} for details.
-   * 
+   * See {@link BasicInetPeer#setWriteTimeout} for details.
+   *
    * @param timeoutMs       The timeout in milliseconds.
    */
   public void setWriteTimeout(int timeoutMs) throws IOException;
@@ -76,13 +76,13 @@ public interface Peer extends Closeable {
   public void close() throws IOException;
 
   /**
-   * @return               A string representing the remote end of our 
+   * @return               A string representing the remote end of our
    *                       connection to the peer.
    */
   public String getRemoteAddressString();
 
   /**
-   * @return               A string representing the local end of our 
+   * @return               A string representing the local end of our
    *                       connection to the peer.
    */
   public String getLocalAddressString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java
new file mode 100644
index 0000000..23407f8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A little struct class to wrap an InputStream and an OutputStream.
+ */
+@InterfaceAudience.Private
+public class IOStreamPair {
+  public final InputStream in;
+  public final OutputStream out;
+  
+  public IOStreamPair(InputStream in, OutputStream out) {
+    this.in = in;
+    this.out = out;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
new file mode 100644
index 0000000..3846f4a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import java.net.InetAddress;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/**
+ * Class used to indicate whether a channel is trusted or not.
+ * The default implementation is to return false indicating that
+ * the channel is not trusted.
+ * This class can be overridden to provide custom logic to determine
+ * whether a channel is trusted or not. 
+ * The custom class can be specified via configuration.
+ *
+ */
+public class TrustedChannelResolver implements Configurable {
+  Configuration conf;
+
+  /**
+   * Returns an instance of TrustedChannelResolver.
+   * Looks up the configuration to see if there is custom class specified.
+   * @param conf
+   * @return TrustedChannelResolver
+   */
+  public static TrustedChannelResolver getInstance(Configuration conf) {
+    Class<? extends TrustedChannelResolver> clazz =
+      conf.getClass(
+          HdfsClientConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS,
+          TrustedChannelResolver.class, TrustedChannelResolver.class);
+    return ReflectionUtils.newInstance(clazz, conf);
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  /**
+   * Return boolean value indicating whether a channel is trusted or not
+   * from a client's perspective.
+   * @return true if the channel is trusted and false otherwise.
+   */
+  public boolean isTrusted() {
+    return false;
+  }
+
+
+  /**
+   * Identify boolean value indicating whether a channel is trusted or not.
+   * @param peerAddress address of the peer
+   * @return true if the channel is trusted and false otherwise.
+   */
+  public boolean isTrusted(InetAddress peerAddress) {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java
new file mode 100644
index 0000000..959cba0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+
+/**
+ * Creates a new {@link DataEncryptionKey} on demand.
+ */
+@InterfaceAudience.Private
+public interface DataEncryptionKeyFactory {
+
+  /**
+   * Creates a new DataEncryptionKey.
+   *
+   * @return DataEncryptionKey newly created
+   * @throws IOException for any error
+   */
+  DataEncryptionKey newDataEncryptionKey() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
new file mode 100644
index 0000000..256caff
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
@@ -0,0 +1,519 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.InetAddress;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import javax.security.sasl.Sasl;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherOption;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.CryptoInputStream;
+import org.apache.hadoop.crypto.CryptoOutputStream;
+import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
+import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
+import org.apache.hadoop.security.SaslPropertiesResolver;
+import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+import com.google.common.net.InetAddresses;
+import com.google.protobuf.ByteString;
+
+/**
+ * Utility methods implementing SASL negotiation for DataTransferProtocol.
+ */
+@InterfaceAudience.Private
+public final class DataTransferSaslUtil {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+    DataTransferSaslUtil.class);
+
+  /**
+   * Delimiter for the three-part SASL username string.
+   */
+  public static final String NAME_DELIMITER = " ";
+
+  /**
+   * Sent by clients and validated by servers. We use a number that's unlikely
+   * to ever be sent as the value of the DATA_TRANSFER_VERSION.
+   */
+  public static final int SASL_TRANSFER_MAGIC_NUMBER = 0xDEADBEEF;
+
+  /**
+   * Checks that SASL negotiation has completed for the given participant, and
+   * the negotiated quality of protection is included in the given SASL
+   * properties and therefore acceptable.
+   *
+   * @param sasl participant to check
+   * @param saslProps properties of SASL negotiation
+   * @throws IOException for any error
+   */
+  public static void checkSaslComplete(SaslParticipant sasl,
+      Map<String, String> saslProps) throws IOException {
+    if (!sasl.isComplete()) {
+      throw new IOException("Failed to complete SASL handshake");
+    }
+    Set<String> requestedQop = ImmutableSet.copyOf(Arrays.asList(
+      saslProps.get(Sasl.QOP).split(",")));
+    String negotiatedQop = sasl.getNegotiatedQop();
+    LOG.debug("Verifying QOP, requested QOP = {}, negotiated QOP = {}",
+      requestedQop, negotiatedQop);
+    if (!requestedQop.contains(negotiatedQop)) {
+      throw new IOException(String.format("SASL handshake completed, but " +
+        "channel does not have acceptable quality of protection, " +
+        "requested = %s, negotiated = %s", requestedQop, negotiatedQop));
+    }
+  }
+  
+  /**
+   * Check whether requested SASL Qop contains privacy.
+   * 
+   * @param saslProps properties of SASL negotiation
+   * @return boolean true if privacy exists
+   */
+  public static boolean requestedQopContainsPrivacy(
+      Map<String, String> saslProps) {
+    Set<String> requestedQop = ImmutableSet.copyOf(Arrays.asList(
+        saslProps.get(Sasl.QOP).split(",")));
+    return requestedQop.contains("auth-conf");
+  }
+
+  /**
+   * Creates SASL properties required for an encrypted SASL negotiation.
+   *
+   * @param encryptionAlgorithm to use for SASL negotation
+   * @return properties of encrypted SASL negotiation
+   */
+  public static Map<String, String> createSaslPropertiesForEncryption(
+      String encryptionAlgorithm) {
+    Map<String, String> saslProps = Maps.newHashMapWithExpectedSize(3);
+    saslProps.put(Sasl.QOP, QualityOfProtection.PRIVACY.getSaslQop());
+    saslProps.put(Sasl.SERVER_AUTH, "true");
+    saslProps.put("com.sun.security.sasl.digest.cipher", encryptionAlgorithm);
+    return saslProps;
+  }
+
+  /**
+   * For an encrypted SASL negotiation, encodes an encryption key to a SASL
+   * password.
+   *
+   * @param encryptionKey to encode
+   * @return key encoded as SASL password
+   */
+  public static char[] encryptionKeyToPassword(byte[] encryptionKey) {
+    return new String(Base64.encodeBase64(encryptionKey, false), Charsets.UTF_8)
+      .toCharArray();
+  }
+
+  /**
+   * Returns InetAddress from peer.  The getRemoteAddressString has the form
+   * [host][/ip-address]:port.  The host may be missing.  The IP address (and
+   * preceding '/') may be missing.  The port preceded by ':' is always present.
+   *
+   * @param peer
+   * @return InetAddress from peer
+   */
+  public static InetAddress getPeerAddress(Peer peer) {
+    String remoteAddr = peer.getRemoteAddressString().split(":")[0];
+    int slashIdx = remoteAddr.indexOf('/');
+    return InetAddresses.forString(slashIdx != -1 ?
+        remoteAddr.substring(slashIdx + 1, remoteAddr.length()) :
+        remoteAddr);
+  }
+
+  /**
+   * Creates a SaslPropertiesResolver from the given configuration.  This method
+   * works by cloning the configuration, translating configuration properties
+   * specific to DataTransferProtocol to what SaslPropertiesResolver expects,
+   * and then delegating to SaslPropertiesResolver for initialization.  This
+   * method returns null if SASL protection has not been configured for
+   * DataTransferProtocol.
+   *
+   * @param conf configuration to read
+   * @return SaslPropertiesResolver for DataTransferProtocol, or null if not
+   *   configured
+   */
+  public static SaslPropertiesResolver getSaslPropertiesResolver(
+      Configuration conf) {
+    String qops = conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY);
+    if (qops == null || qops.isEmpty()) {
+      LOG.debug("DataTransferProtocol not using SaslPropertiesResolver, no " +
+        "QOP found in configuration for {}", DFS_DATA_TRANSFER_PROTECTION_KEY);
+      return null;
+    }
+    Configuration saslPropsResolverConf = new Configuration(conf);
+    saslPropsResolverConf.set(HADOOP_RPC_PROTECTION, qops);
+    Class<? extends SaslPropertiesResolver> resolverClass = conf.getClass(
+      HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
+      SaslPropertiesResolver.class, SaslPropertiesResolver.class);
+    resolverClass = conf.getClass(DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY,
+      resolverClass, SaslPropertiesResolver.class);
+    saslPropsResolverConf.setClass(HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
+      resolverClass, SaslPropertiesResolver.class);
+    SaslPropertiesResolver resolver = SaslPropertiesResolver.getInstance(
+      saslPropsResolverConf);
+    LOG.debug("DataTransferProtocol using SaslPropertiesResolver, configured " +
+      "QOP {} = {}, configured class {} = {}", DFS_DATA_TRANSFER_PROTECTION_KEY, qops, 
+      DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY, resolverClass);
+    return resolver;
+  }
+
+  /**
+   * Reads a SASL negotiation message.
+   *
+   * @param in stream to read
+   * @return bytes of SASL negotiation messsage
+   * @throws IOException for any error
+   */
+  public static byte[] readSaslMessage(InputStream in) throws IOException {
+    DataTransferEncryptorMessageProto proto =
+        DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in));
+    if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) {
+      throw new InvalidEncryptionKeyException(proto.getMessage());
+    } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) {
+      throw new IOException(proto.getMessage());
+    } else {
+      return proto.getPayload().toByteArray();
+    }
+  }
+  
+  /**
+   * Reads a SASL negotiation message and negotiation cipher options. 
+   * 
+   * @param in stream to read
+   * @param cipherOptions list to store negotiation cipher options
+   * @return byte[] SASL negotiation message
+   * @throws IOException for any error
+   */
+  public static byte[] readSaslMessageAndNegotiationCipherOptions(
+      InputStream in, List<CipherOption> cipherOptions) throws IOException {
+    DataTransferEncryptorMessageProto proto =
+        DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in));
+    if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) {
+      throw new InvalidEncryptionKeyException(proto.getMessage());
+    } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) {
+      throw new IOException(proto.getMessage());
+    } else {
+      List<CipherOptionProto> optionProtos = proto.getCipherOptionList();
+      if (optionProtos != null) {
+        for (CipherOptionProto optionProto : optionProtos) {
+          cipherOptions.add(PBHelperClient.convert(optionProto));
+        }
+      }
+      return proto.getPayload().toByteArray();
+    }
+  }
+  
+  /**
+   * Negotiate a cipher option which server supports.
+   * 
+   * @param conf the configuration
+   * @param options the cipher options which client supports
+   * @return CipherOption negotiated cipher option
+   */
+  public static CipherOption negotiateCipherOption(Configuration conf,
+      List<CipherOption> options) throws IOException {
+    // Negotiate cipher suites if configured.  Currently, the only supported
+    // cipher suite is AES/CTR/NoPadding, but the protocol allows multiple
+    // values for future expansion.
+    String cipherSuites = conf.get(DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY);
+    if (cipherSuites == null || cipherSuites.isEmpty()) {
+      return null;
+    }
+    if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) {
+      throw new IOException(String.format("Invalid cipher suite, %s=%s",
+          DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites));
+    }
+    if (options != null) {
+      for (CipherOption option : options) {
+        CipherSuite suite = option.getCipherSuite();
+        if (suite == CipherSuite.AES_CTR_NOPADDING) {
+          int keyLen = conf.getInt(
+              DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY,
+              DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT) / 8;
+          CryptoCodec codec = CryptoCodec.getInstance(conf, suite);
+          byte[] inKey = new byte[keyLen];
+          byte[] inIv = new byte[suite.getAlgorithmBlockSize()];
+          byte[] outKey = new byte[keyLen];
+          byte[] outIv = new byte[suite.getAlgorithmBlockSize()];
+          codec.generateSecureRandom(inKey);
+          codec.generateSecureRandom(inIv);
+          codec.generateSecureRandom(outKey);
+          codec.generateSecureRandom(outIv);
+          return new CipherOption(suite, inKey, inIv, outKey, outIv);
+        }
+      }
+    }
+    return null;
+  }
+  
+  /**
+   * Send SASL message and negotiated cipher option to client.
+   * 
+   * @param out stream to receive message
+   * @param payload to send
+   * @param option negotiated cipher option
+   * @throws IOException for any error
+   */
+  public static void sendSaslMessageAndNegotiatedCipherOption(
+      OutputStream out, byte[] payload, CipherOption option) 
+          throws IOException {
+    DataTransferEncryptorMessageProto.Builder builder =
+        DataTransferEncryptorMessageProto.newBuilder();
+    
+    builder.setStatus(DataTransferEncryptorStatus.SUCCESS);
+    if (payload != null) {
+      builder.setPayload(ByteString.copyFrom(payload));
+    }
+    if (option != null) {
+      builder.addCipherOption(PBHelperClient.convert(option));
+    }
+    
+    DataTransferEncryptorMessageProto proto = builder.build();
+    proto.writeDelimitedTo(out);
+    out.flush();
+  }
+  
+  /**
+   * Create IOStreamPair of {@link org.apache.hadoop.crypto.CryptoInputStream}
+   * and {@link org.apache.hadoop.crypto.CryptoOutputStream}
+   * 
+   * @param conf the configuration
+   * @param cipherOption negotiated cipher option
+   * @param out underlying output stream
+   * @param in underlying input stream
+   * @param isServer is server side
+   * @return IOStreamPair the stream pair
+   * @throws IOException for any error
+   */
+  public static IOStreamPair createStreamPair(Configuration conf,
+      CipherOption cipherOption, OutputStream out, InputStream in, 
+      boolean isServer) throws IOException {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Creating IOStreamPair of CryptoInputStream and " +
+          "CryptoOutputStream.");
+    }
+    CryptoCodec codec = CryptoCodec.getInstance(conf, 
+        cipherOption.getCipherSuite());
+    byte[] inKey = cipherOption.getInKey();
+    byte[] inIv = cipherOption.getInIv();
+    byte[] outKey = cipherOption.getOutKey();
+    byte[] outIv = cipherOption.getOutIv();
+    InputStream cIn = new CryptoInputStream(in, codec, 
+        isServer ? inKey : outKey, isServer ? inIv : outIv);
+    OutputStream cOut = new CryptoOutputStream(out, codec, 
+        isServer ? outKey : inKey, isServer ? outIv : inIv);
+    return new IOStreamPair(cIn, cOut);
+  }
+
+  /**
+   * Sends a SASL negotiation message indicating an error.
+   *
+   * @param out stream to receive message
+   * @param message to send
+   * @throws IOException for any error
+   */
+  public static void sendGenericSaslErrorMessage(OutputStream out,
+      String message) throws IOException {
+    sendSaslMessage(out, DataTransferEncryptorStatus.ERROR, null, message);
+  }
+
+  /**
+   * Sends a SASL negotiation message.
+   *
+   * @param out stream to receive message
+   * @param payload to send
+   * @throws IOException for any error
+   */
+  public static void sendSaslMessage(OutputStream out, byte[] payload)
+      throws IOException {
+    sendSaslMessage(out, DataTransferEncryptorStatus.SUCCESS, payload, null);
+  }
+  
+  /**
+   * Send a SASL negotiation message and negotiation cipher options to server.
+   * 
+   * @param out stream to receive message
+   * @param payload to send
+   * @param options cipher options to negotiate
+   * @throws IOException for any error
+   */
+  public static void sendSaslMessageAndNegotiationCipherOptions(
+      OutputStream out, byte[] payload, List<CipherOption> options)
+          throws IOException {
+    DataTransferEncryptorMessageProto.Builder builder =
+        DataTransferEncryptorMessageProto.newBuilder();
+    
+    builder.setStatus(DataTransferEncryptorStatus.SUCCESS);
+    if (payload != null) {
+      builder.setPayload(ByteString.copyFrom(payload));
+    }
+    if (options != null) {
+      builder.addAllCipherOption(PBHelperClient.convertCipherOptions(options));
+    }
+    
+    DataTransferEncryptorMessageProto proto = builder.build();
+    proto.writeDelimitedTo(out);
+    out.flush();
+  }
+  
+  /**
+   * Read SASL message and negotiated cipher option from server.
+   * 
+   * @param in stream to read
+   * @return SaslResponseWithNegotiatedCipherOption SASL message and 
+   * negotiated cipher option
+   * @throws IOException for any error
+   */
+  public static SaslResponseWithNegotiatedCipherOption
+      readSaslMessageAndNegotiatedCipherOption(InputStream in)
+          throws IOException {
+    DataTransferEncryptorMessageProto proto =
+        DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in));
+    if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) {
+      throw new InvalidEncryptionKeyException(proto.getMessage());
+    } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) {
+      throw new IOException(proto.getMessage());
+    } else {
+      byte[] response = proto.getPayload().toByteArray();
+      List<CipherOption> options = PBHelperClient.convertCipherOptionProtos(
+          proto.getCipherOptionList());
+      CipherOption option = null;
+      if (options != null && !options.isEmpty()) {
+        option = options.get(0);
+      }
+      return new SaslResponseWithNegotiatedCipherOption(response, option);
+    }
+  }
+  
+  /**
+   * Encrypt the key and iv of the negotiated cipher option.
+   * 
+   * @param option negotiated cipher option
+   * @param sasl SASL participant representing server
+   * @return CipherOption negotiated cipher option which contains the 
+   * encrypted key and iv
+   * @throws IOException for any error
+   */
+  public static CipherOption wrap(CipherOption option, SaslParticipant sasl) 
+      throws IOException {
+    if (option != null) {
+      byte[] inKey = option.getInKey();
+      if (inKey != null) {
+        inKey = sasl.wrap(inKey, 0, inKey.length);
+      }
+      byte[] outKey = option.getOutKey();
+      if (outKey != null) {
+        outKey = sasl.wrap(outKey, 0, outKey.length);
+      }
+      return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(),
+          outKey, option.getOutIv());
+    }
+    
+    return null;
+  }
+  
+  /**
+   * Decrypt the key and iv of the negotiated cipher option.
+   * 
+   * @param option negotiated cipher option
+   * @param sasl SASL participant representing client
+   * @return CipherOption negotiated cipher option which contains the 
+   * decrypted key and iv
+   * @throws IOException for any error
+   */
+  public static CipherOption unwrap(CipherOption option, SaslParticipant sasl)
+      throws IOException {
+    if (option != null) {
+      byte[] inKey = option.getInKey();
+      if (inKey != null) {
+        inKey = sasl.unwrap(inKey, 0, inKey.length);
+      }
+      byte[] outKey = option.getOutKey();
+      if (outKey != null) {
+        outKey = sasl.unwrap(outKey, 0, outKey.length);
+      }
+      return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(),
+          outKey, option.getOutIv());
+    }
+    
+    return null;
+  }
+
+  /**
+   * Sends a SASL negotiation message.
+   *
+   * @param out stream to receive message
+   * @param status negotiation status
+   * @param payload to send
+   * @param message to send
+   * @throws IOException for any error
+   */
+  public static void sendSaslMessage(OutputStream out,
+      DataTransferEncryptorStatus status, byte[] payload, String message)
+          throws IOException {
+    DataTransferEncryptorMessageProto.Builder builder =
+        DataTransferEncryptorMessageProto.newBuilder();
+    
+    builder.setStatus(status);
+    if (payload != null) {
+      builder.setPayload(ByteString.copyFrom(payload));
+    }
+    if (message != null) {
+      builder.setMessage(message);
+    }
+    
+    DataTransferEncryptorMessageProto proto = builder.build();
+    proto.writeDelimitedTo(out);
+    out.flush();
+  }
+
+  /**
+   * There is no reason to instantiate this class.
+   */
+  private DataTransferSaslUtil() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
new file mode 100644
index 0000000..913203c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
@@ -0,0 +1,498 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.InetAddress;
+import java.net.Socket;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import javax.security.auth.callback.Callback;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.auth.callback.NameCallback;
+import javax.security.auth.callback.PasswordCallback;
+import javax.security.auth.callback.UnsupportedCallbackException;
+import javax.security.sasl.RealmCallback;
+import javax.security.sasl.RealmChoiceCallback;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherOption;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.hdfs.net.EncryptedPeer;
+import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
+import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import org.apache.hadoop.security.SaslPropertiesResolver;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.Lists;
+
+/**
+ * Negotiates SASL for DataTransferProtocol on behalf of a client.  There are
+ * two possible supported variants of SASL negotiation: either a general-purpose
+ * negotiation supporting any quality of protection, or a specialized
+ * negotiation that enforces privacy as the quality of protection using a
+ * cryptographically strong encryption key.
+ *
+ * This class is used in both the HDFS client and the DataNode.  The DataNode
+ * needs it, because it acts as a client to other DataNodes during write
+ * pipelines and block transfers.
+ */
+@InterfaceAudience.Private
+public class SaslDataTransferClient {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+    SaslDataTransferClient.class);
+
+  private final Configuration conf;
+  private final AtomicBoolean fallbackToSimpleAuth;
+  private final SaslPropertiesResolver saslPropsResolver;
+  private final TrustedChannelResolver trustedChannelResolver;
+
+  /**
+   * Creates a new SaslDataTransferClient.  This constructor is used in cases
+   * where it is not relevant to track if a secure client did a fallback to
+   * simple auth.  For intra-cluster connections between data nodes in the same
+   * cluster, we can assume that all run under the same security configuration.
+   *
+   * @param conf the configuration
+   * @param saslPropsResolver for determining properties of SASL negotiation
+   * @param trustedChannelResolver for identifying trusted connections that do
+   *   not require SASL negotiation
+   */
+  public SaslDataTransferClient(Configuration conf, 
+      SaslPropertiesResolver saslPropsResolver,
+      TrustedChannelResolver trustedChannelResolver) {
+    this(conf, saslPropsResolver, trustedChannelResolver, null);
+  }
+
+  /**
+   * Creates a new SaslDataTransferClient.
+   *
+   * @param conf the configuration
+   * @param saslPropsResolver for determining properties of SASL negotiation
+   * @param trustedChannelResolver for identifying trusted connections that do
+   *   not require SASL negotiation
+   * @param fallbackToSimpleAuth checked on each attempt at general SASL
+   *   handshake, if true forces use of simple auth
+   */
+  public SaslDataTransferClient(Configuration conf, 
+      SaslPropertiesResolver saslPropsResolver,
+      TrustedChannelResolver trustedChannelResolver,
+      AtomicBoolean fallbackToSimpleAuth) {
+    this.conf = conf;
+    this.fallbackToSimpleAuth = fallbackToSimpleAuth;
+    this.saslPropsResolver = saslPropsResolver;
+    this.trustedChannelResolver = trustedChannelResolver;
+  }
+
+  /**
+   * Sends client SASL negotiation for a newly allocated socket if required.
+   *
+   * @param socket connection socket
+   * @param underlyingOut connection output stream
+   * @param underlyingIn connection input stream
+   * @param encryptionKeyFactory for creation of an encryption key
+   * @param accessToken connection block access token
+   * @param datanodeId ID of destination DataNode
+   * @return new pair of streams, wrapped after SASL negotiation
+   * @throws IOException for any error
+   */
+  public IOStreamPair newSocketSend(Socket socket, OutputStream underlyingOut,
+      InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory,
+      Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
+      throws IOException {
+    // The encryption key factory only returns a key if encryption is enabled.
+    DataEncryptionKey encryptionKey = !trustedChannelResolver.isTrusted() ?
+      encryptionKeyFactory.newDataEncryptionKey() : null;
+    IOStreamPair ios = send(socket.getInetAddress(), underlyingOut,
+      underlyingIn, encryptionKey, accessToken, datanodeId);
+    return ios != null ? ios : new IOStreamPair(underlyingIn, underlyingOut);
+  }
+
+  /**
+   * Sends client SASL negotiation for a peer if required.
+   *
+   * @param peer connection peer
+   * @param encryptionKeyFactory for creation of an encryption key
+   * @param accessToken connection block access token
+   * @param datanodeId ID of destination DataNode
+   * @return new pair of streams, wrapped after SASL negotiation
+   * @throws IOException for any error
+   */
+  public Peer peerSend(Peer peer, DataEncryptionKeyFactory encryptionKeyFactory,
+      Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
+      throws IOException {
+    IOStreamPair ios = checkTrustAndSend(getPeerAddress(peer),
+      peer.getOutputStream(), peer.getInputStream(), encryptionKeyFactory,
+      accessToken, datanodeId);
+    // TODO: Consider renaming EncryptedPeer to SaslPeer.
+    return ios != null ? new EncryptedPeer(peer, ios) : peer;
+  }
+
+  /**
+   * Sends client SASL negotiation for a socket if required.
+   *
+   * @param socket connection socket
+   * @param underlyingOut connection output stream
+   * @param underlyingIn connection input stream
+   * @param encryptionKeyFactory for creation of an encryption key
+   * @param accessToken connection block access token
+   * @param datanodeId ID of destination DataNode
+   * @return new pair of streams, wrapped after SASL negotiation
+   * @throws IOException for any error
+   */
+  public IOStreamPair socketSend(Socket socket, OutputStream underlyingOut,
+      InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory,
+      Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
+      throws IOException {
+    IOStreamPair ios = checkTrustAndSend(socket.getInetAddress(), underlyingOut,
+      underlyingIn, encryptionKeyFactory, accessToken, datanodeId);
+    return ios != null ? ios : new IOStreamPair(underlyingIn, underlyingOut);
+  }
+
+  /**
+   * Checks if an address is already trusted and then sends client SASL
+   * negotiation if required.
+   *
+   * @param addr connection address
+   * @param underlyingOut connection output stream
+   * @param underlyingIn connection input stream
+   * @param encryptionKeyFactory for creation of an encryption key
+   * @param accessToken connection block access token
+   * @param datanodeId ID of destination DataNode
+   * @return new pair of streams, wrapped after SASL negotiation
+   * @throws IOException for any error
+   */
+  private IOStreamPair checkTrustAndSend(InetAddress addr,
+      OutputStream underlyingOut, InputStream underlyingIn,
+      DataEncryptionKeyFactory encryptionKeyFactory,
+      Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
+      throws IOException {
+    if (!trustedChannelResolver.isTrusted() &&
+        !trustedChannelResolver.isTrusted(addr)) {
+      // The encryption key factory only returns a key if encryption is enabled.
+      DataEncryptionKey encryptionKey =
+        encryptionKeyFactory.newDataEncryptionKey();
+      return send(addr, underlyingOut, underlyingIn, encryptionKey, accessToken,
+        datanodeId);
+    } else {
+      LOG.debug(
+        "SASL client skipping handshake on trusted connection for addr = {}, "
+        + "datanodeId = {}", addr, datanodeId);
+      return null;
+    }
+  }
+
+  /**
+   * Sends client SASL negotiation if required.  Determines the correct type of
+   * SASL handshake based on configuration.
+   *
+   * @param addr connection address
+   * @param underlyingOut connection output stream
+   * @param underlyingIn connection input stream
+   * @param encryptionKey for an encrypted SASL handshake
+   * @param accessToken connection block access token
+   * @param datanodeId ID of destination DataNode
+   * @return new pair of streams, wrapped after SASL negotiation
+   * @throws IOException for any error
+   */
+  private IOStreamPair send(InetAddress addr, OutputStream underlyingOut,
+      InputStream underlyingIn, DataEncryptionKey encryptionKey,
+      Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
+      throws IOException {
+    if (encryptionKey != null) {
+      LOG.debug(
+        "SASL client doing encrypted handshake for addr = {}, datanodeId = {}",
+        addr, datanodeId);
+      return getEncryptedStreams(underlyingOut, underlyingIn,
+        encryptionKey);
+    } else if (!UserGroupInformation.isSecurityEnabled()) {
+      LOG.debug(
+        "SASL client skipping handshake in unsecured configuration for "
+        + "addr = {}, datanodeId = {}", addr, datanodeId);
+      return null;
+    } else if (SecurityUtil.isPrivilegedPort(datanodeId.getXferPort())) {
+      LOG.debug(
+        "SASL client skipping handshake in secured configuration with "
+        + "privileged port for addr = {}, datanodeId = {}", addr, datanodeId);
+      return null;
+    } else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) {
+      LOG.debug(
+        "SASL client skipping handshake in secured configuration with "
+        + "unsecured cluster for addr = {}, datanodeId = {}", addr, datanodeId);
+      return null;
+    } else if (saslPropsResolver != null) {
+      LOG.debug(
+        "SASL client doing general handshake for addr = {}, datanodeId = {}",
+        addr, datanodeId);
+      return getSaslStreams(addr, underlyingOut, underlyingIn, accessToken,
+        datanodeId);
+    } else {
+      // It's a secured cluster using non-privileged ports, but no SASL.  The
+      // only way this can happen is if the DataNode has
+      // ignore.secure.ports.for.testing configured, so this is a rare edge case.
+      LOG.debug(
+        "SASL client skipping handshake in secured configuration with no SASL "
+        + "protection configured for addr = {}, datanodeId = {}",
+        addr, datanodeId);
+      return null;
+    }
+  }
+
+  /**
+   * Sends client SASL negotiation for specialized encrypted handshake.
+   *
+   * @param underlyingOut connection output stream
+   * @param underlyingIn connection input stream
+   * @param encryptionKey for an encrypted SASL handshake
+   * @return new pair of streams, wrapped after SASL negotiation
+   * @throws IOException for any error
+   */
+  private IOStreamPair getEncryptedStreams(OutputStream underlyingOut,
+      InputStream underlyingIn, DataEncryptionKey encryptionKey)
+      throws IOException {
+    Map<String, String> saslProps = createSaslPropertiesForEncryption(
+      encryptionKey.encryptionAlgorithm);
+
+    LOG.debug("Client using encryption algorithm {}",
+      encryptionKey.encryptionAlgorithm);
+
+    String userName = getUserNameFromEncryptionKey(encryptionKey);
+    char[] password = encryptionKeyToPassword(encryptionKey.encryptionKey);
+    CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName,
+      password);
+    return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps,
+      callbackHandler);
+  }
+
+  /**
+   * The SASL username for an encrypted handshake consists of the keyId,
+   * blockPoolId, and nonce with the first two encoded as Strings, and the third
+   * encoded using Base64. The fields are each separated by a single space.
+   * 
+   * @param encryptionKey the encryption key to encode as a SASL username.
+   * @return encoded username containing keyId, blockPoolId, and nonce
+   */
+  private static String getUserNameFromEncryptionKey(
+      DataEncryptionKey encryptionKey) {
+    return encryptionKey.keyId + NAME_DELIMITER +
+        encryptionKey.blockPoolId + NAME_DELIMITER +
+        new String(Base64.encodeBase64(encryptionKey.nonce, false), Charsets.UTF_8);
+  }
+
+  /**
+   * Sets user name and password when asked by the client-side SASL object.
+   */
+  private static final class SaslClientCallbackHandler
+      implements CallbackHandler {
+
+    private final char[] password;
+    private final String userName;
+
+    /**
+     * Creates a new SaslClientCallbackHandler.
+     *
+     * @param userName SASL user name
+     * @Param password SASL password
+     */
+    public SaslClientCallbackHandler(String userName, char[] password) {
+      this.password = password;
+      this.userName = userName;
+    }
+
+    @Override
+    public void handle(Callback[] callbacks) throws IOException,
+        UnsupportedCallbackException {
+      NameCallback nc = null;
+      PasswordCallback pc = null;
+      RealmCallback rc = null;
+      for (Callback callback : callbacks) {
+        if (callback instanceof RealmChoiceCallback) {
+          continue;
+        } else if (callback instanceof NameCallback) {
+          nc = (NameCallback) callback;
+        } else if (callback instanceof PasswordCallback) {
+          pc = (PasswordCallback) callback;
+        } else if (callback instanceof RealmCallback) {
+          rc = (RealmCallback) callback;
+        } else {
+          throw new UnsupportedCallbackException(callback,
+              "Unrecognized SASL client callback");
+        }
+      }
+      if (nc != null) {
+        nc.setName(userName);
+      }
+      if (pc != null) {
+        pc.setPassword(password);
+      }
+      if (rc != null) {
+        rc.setText(rc.getDefaultText());
+      }
+    }
+  }
+
+  /**
+   * Sends client SASL negotiation for general-purpose handshake.
+   *
+   * @param addr connection address
+   * @param underlyingOut connection output stream
+   * @param underlyingIn connection input stream
+   * @param accessToken connection block access token
+   * @param datanodeId ID of destination DataNode
+   * @return new pair of streams, wrapped after SASL negotiation
+   * @throws IOException for any error
+   */
+  private IOStreamPair getSaslStreams(InetAddress addr,
+      OutputStream underlyingOut, InputStream underlyingIn,
+      Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
+      throws IOException {
+    Map<String, String> saslProps = saslPropsResolver.getClientProperties(addr);
+
+    String userName = buildUserName(accessToken);
+    char[] password = buildClientPassword(accessToken);
+    CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName,
+      password);
+    return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps,
+      callbackHandler);
+  }
+
+  /**
+   * Builds the client's user name for the general-purpose handshake, consisting
+   * of the base64-encoded serialized block access token identifier.  Note that
+   * this includes only the token identifier, not the token itself, which would
+   * include the password.  The password is a shared secret, and we must not
+   * write it on the network during the SASL authentication exchange.
+   *
+   * @param blockToken for block access
+   * @return SASL user name
+   */
+  private static String buildUserName(Token<BlockTokenIdentifier> blockToken) {
+    return new String(Base64.encodeBase64(blockToken.getIdentifier(), false),
+      Charsets.UTF_8);
+  }
+
+  /**
+   * Calculates the password on the client side for the general-purpose
+   * handshake.  The password consists of the block access token's password.
+   *
+   * @param blockToken for block access
+   * @return SASL password
+   */    
+  private char[] buildClientPassword(Token<BlockTokenIdentifier> blockToken) {
+    return new String(Base64.encodeBase64(blockToken.getPassword(), false),
+      Charsets.UTF_8).toCharArray();
+  }
+
+  /**
+   * This method actually executes the client-side SASL handshake.
+   *
+   * @param underlyingOut connection output stream
+   * @param underlyingIn connection input stream
+   * @param userName SASL user name
+   * @param saslProps properties of SASL negotiation
+   * @param callbackHandler for responding to SASL callbacks
+   * @return new pair of streams, wrapped after SASL negotiation
+   * @throws IOException for any error
+   */
+  private IOStreamPair doSaslHandshake(OutputStream underlyingOut,
+      InputStream underlyingIn, String userName, Map<String, String> saslProps,
+      CallbackHandler callbackHandler) throws IOException {
+
+    DataOutputStream out = new DataOutputStream(underlyingOut);
+    DataInputStream in = new DataInputStream(underlyingIn);
+
+    SaslParticipant sasl= SaslParticipant.createClientSaslParticipant(userName,
+      saslProps, callbackHandler);
+
+    out.writeInt(SASL_TRANSFER_MAGIC_NUMBER);
+    out.flush();
+
+    try {
+      // Start of handshake - "initial response" in SASL terminology.
+      sendSaslMessage(out, new byte[0]);
+
+      // step 1
+      byte[] remoteResponse = readSaslMessage(in);
+      byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
+      List<CipherOption> cipherOptions = null;
+      if (requestedQopContainsPrivacy(saslProps)) {
+        // Negotiate cipher suites if configured.  Currently, the only supported
+        // cipher suite is AES/CTR/NoPadding, but the protocol allows multiple
+        // values for future expansion.
+        String cipherSuites = conf.get(
+            DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY);
+        if (cipherSuites != null && !cipherSuites.isEmpty()) {
+          if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) {
+            throw new IOException(String.format("Invalid cipher suite, %s=%s",
+                DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites));
+          }
+          CipherOption option = new CipherOption(CipherSuite.AES_CTR_NOPADDING);
+          cipherOptions = Lists.newArrayListWithCapacity(1);
+          cipherOptions.add(option);
+        }
+      }
+      sendSaslMessageAndNegotiationCipherOptions(out, localResponse, 
+          cipherOptions);
+
+      // step 2 (client-side only)
+      SaslResponseWithNegotiatedCipherOption response = 
+          readSaslMessageAndNegotiatedCipherOption(in);
+      localResponse = sasl.evaluateChallengeOrResponse(response.payload);
+      assert localResponse == null;
+
+      // SASL handshake is complete
+      checkSaslComplete(sasl, saslProps);
+
+      CipherOption cipherOption = null;
+      if (sasl.isNegotiatedQopPrivacy()) {
+        // Unwrap the negotiated cipher option
+        cipherOption = unwrap(response.cipherOption, sasl);
+      }
+
+      // If negotiated cipher option is not null, we will use it to create 
+      // stream pair.
+      return cipherOption != null ? createStreamPair(
+          conf, cipherOption, underlyingOut, underlyingIn, false) : 
+            sasl.createStreamPair(out, in);
+    } catch (IOException ioe) {
+      sendGenericSaslErrorMessage(out, ioe.getMessage());
+      throw ioe;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java
new file mode 100644
index 0000000..f14a075
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java
@@ -0,0 +1,210 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.util.Map;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.sasl.Sasl;
+import javax.security.sasl.SaslClient;
+import javax.security.sasl.SaslException;
+import javax.security.sasl.SaslServer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
+import org.apache.hadoop.security.SaslInputStream;
+import org.apache.hadoop.security.SaslOutputStream;
+
+/**
+ * Strongly inspired by Thrift's TSaslTransport class.
+ *
+ * Used to abstract over the <code>SaslServer</code> and
+ * <code>SaslClient</code> classes, which share a lot of their interface, but
+ * unfortunately don't share a common superclass.
+ */
+@InterfaceAudience.Private
+class SaslParticipant {
+
+  // This has to be set as part of the SASL spec, but it don't matter for
+  // our purposes, but may not be empty. It's sent over the wire, so use
+  // a short string.
+  private static final String SERVER_NAME = "0";
+  private static final String PROTOCOL = "hdfs";
+  private static final String MECHANISM = "DIGEST-MD5";
+
+  // One of these will always be null.
+  private final SaslServer saslServer;
+  private final SaslClient saslClient;
+
+  /**
+   * Creates a SaslParticipant wrapping a SaslServer.
+   *
+   * @param saslProps properties of SASL negotiation
+   * @param callbackHandler for handling all SASL callbacks
+   * @return SaslParticipant wrapping SaslServer
+   * @throws SaslException for any error
+   */
+  public static SaslParticipant createServerSaslParticipant(
+      Map<String, String> saslProps, CallbackHandler callbackHandler)
+      throws SaslException {
+    return new SaslParticipant(Sasl.createSaslServer(MECHANISM,
+      PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
+  }
+
+  /**
+   * Creates a SaslParticipant wrapping a SaslClient.
+   *
+   * @param userName SASL user name
+   * @param saslProps properties of SASL negotiation
+   * @param callbackHandler for handling all SASL callbacks
+   * @return SaslParticipant wrapping SaslClient
+   * @throws SaslException for any error
+   */
+  public static SaslParticipant createClientSaslParticipant(String userName,
+      Map<String, String> saslProps, CallbackHandler callbackHandler)
+      throws SaslException {
+    return new SaslParticipant(Sasl.createSaslClient(new String[] { MECHANISM },
+      userName, PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
+  }
+
+  /**
+   * Private constructor wrapping a SaslServer.
+   *
+   * @param saslServer to wrap
+   */
+  private SaslParticipant(SaslServer saslServer) {
+    this.saslServer = saslServer;
+    this.saslClient = null;
+  }
+
+  /**
+   * Private constructor wrapping a SaslClient.
+   *
+   * @param saslClient to wrap
+   */
+  private SaslParticipant(SaslClient saslClient) {
+    this.saslServer = null;
+    this.saslClient = saslClient;
+  }
+
+  /**
+   * @see {@link SaslServer#evaluateResponse}
+   * @see {@link SaslClient#evaluateChallenge}
+   */
+  public byte[] evaluateChallengeOrResponse(byte[] challengeOrResponse)
+      throws SaslException {
+    if (saslClient != null) {
+      return saslClient.evaluateChallenge(challengeOrResponse);
+    } else {
+      return saslServer.evaluateResponse(challengeOrResponse);
+    }
+  }
+
+  /**
+   * After successful SASL negotation, returns the negotiated quality of
+   * protection.
+   *
+   * @return negotiated quality of protection
+   */
+  public String getNegotiatedQop() {
+    if (saslClient != null) {
+      return (String) saslClient.getNegotiatedProperty(Sasl.QOP);
+    } else {
+      return (String) saslServer.getNegotiatedProperty(Sasl.QOP);
+    }
+  }
+  
+  /**
+   * After successful SASL negotiation, returns whether it's QOP privacy
+   * 
+   * @return boolean whether it's QOP privacy
+   */
+  public boolean isNegotiatedQopPrivacy() {
+    String qop = getNegotiatedQop();
+    return qop != null && "auth-conf".equalsIgnoreCase(qop);
+  }
+  
+  /**
+   * Wraps a byte array.
+   * 
+   * @param bytes The array containing the bytes to wrap.
+   * @param off The starting position at the array
+   * @param len The number of bytes to wrap
+   * @return byte[] wrapped bytes
+   * @throws SaslException if the bytes cannot be successfully wrapped
+   */
+  public byte[] wrap(byte[] bytes, int off, int len) throws SaslException {
+    if (saslClient != null) {
+      return saslClient.wrap(bytes, off, len);
+    } else {
+      return saslServer.wrap(bytes, off, len);
+    }
+  }
+  
+  /**
+   * Unwraps a byte array.
+   * 
+   * @param bytes The array containing the bytes to unwrap.
+   * @param off The starting position at the array
+   * @param len The number of bytes to unwrap
+   * @return byte[] unwrapped bytes
+   * @throws SaslException if the bytes cannot be successfully unwrapped
+   */
+  public byte[] unwrap(byte[] bytes, int off, int len) throws SaslException {
+    if (saslClient != null) {
+      return saslClient.unwrap(bytes, off, len);
+    } else {
+      return saslServer.unwrap(bytes, off, len);
+    }
+  }
+
+  /**
+   * Returns true if SASL negotiation is complete.
+   *
+   * @return true if SASL negotiation is complete
+   */
+  public boolean isComplete() {
+    if (saslClient != null) {
+      return saslClient.isComplete();
+    } else {
+      return saslServer.isComplete();
+    }
+  }
+
+  /**
+   * Return some input/output streams that may henceforth have their
+   * communication encrypted, depending on the negotiated quality of protection.
+   *
+   * @param out output stream to wrap
+   * @param in input stream to wrap
+   * @return IOStreamPair wrapping the streams
+   */
+  public IOStreamPair createStreamPair(DataOutputStream out,
+      DataInputStream in) {
+    if (saslClient != null) {
+      return new IOStreamPair(
+          new SaslInputStream(in, saslClient),
+          new SaslOutputStream(out, saslClient));
+    } else {
+      return new IOStreamPair(
+          new SaslInputStream(in, saslServer),
+          new SaslOutputStream(out, saslServer));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslResponseWithNegotiatedCipherOption.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslResponseWithNegotiatedCipherOption.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslResponseWithNegotiatedCipherOption.java
new file mode 100644
index 0000000..f69441b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslResponseWithNegotiatedCipherOption.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.crypto.CipherOption;
+
+@InterfaceAudience.Private
+public class SaslResponseWithNegotiatedCipherOption {
+  final byte[] payload;
+  final CipherOption cipherOption;
+  
+  public SaslResponseWithNegotiatedCipherOption(byte[] payload, 
+      CipherOption cipherOption) {
+    this.payload = payload;
+    this.cipherOption = cipherOption;
+  }
+}
\ No newline at end of file


[45/50] [abbrv] hadoop git commit: HDFS-8984. Move replication queues related methods in FSNamesystem to BlockManager. Contributed by Haohui Mai.

Posted by vv...@apache.org.
HDFS-8984. Move replication queues related methods in FSNamesystem to BlockManager. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/715b9c64
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/715b9c64
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/715b9c64

Branch: refs/heads/YARN-3926
Commit: 715b9c649982bff91d1f9eae656ba3b82178e1a3
Parents: 8928729
Author: Haohui Mai <wh...@apache.org>
Authored: Fri Sep 4 11:39:58 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Fri Sep 4 11:45:31 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../server/blockmanagement/BlockManager.java    | 54 +++++++++++++++----
 .../server/blockmanagement/DatanodeManager.java |  4 +-
 .../blockmanagement/DecommissionManager.java    |  2 +-
 .../hdfs/server/namenode/FSNamesystem.java      | 57 ++++++--------------
 .../hadoop/hdfs/server/namenode/NameNode.java   |  1 +
 .../hadoop/hdfs/server/namenode/Namesystem.java |  2 +
 .../hadoop/hdfs/server/namenode/SafeMode.java   |  3 --
 .../blockmanagement/TestReplicationPolicy.java  |  3 --
 .../hdfs/server/namenode/NameNodeAdapter.java   |  2 +-
 .../hdfs/server/namenode/TestFSNamesystem.java  | 11 ++--
 .../namenode/TestListCorruptFileBlocks.java     |  3 +-
 12 files changed, 80 insertions(+), 65 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/715b9c64/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b1e53da..5226d33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -894,6 +894,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8384. Allow NN to startup if there are files having a lease but are not
     under construction. (jing9)
 
+    HDFS-8984. Move replication queues related methods in FSNamesystem to
+    BlockManager. (wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/715b9c64/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 08fbd4f..b0a11fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -127,6 +128,10 @@ public class BlockManager implements BlockStatsMXBean {
   private volatile long corruptReplicaBlocksCount = 0L;
   private volatile long underReplicatedBlocksCount = 0L;
   private volatile long scheduledReplicationBlocksCount = 0L;
+
+  /** flag indicating whether replication queues have been initialized */
+  private boolean initializedReplQueues;
+
   private final AtomicLong excessBlocksCount = new AtomicLong(0L);
   private final AtomicLong postponedMisreplicatedBlocksCount = new AtomicLong(0L);
   private final long startupDelayBlockDeletionInMs;
@@ -1092,7 +1097,7 @@ public class BlockManager implements BlockStatsMXBean {
    * datanode and log the operation
    */
   void addToInvalidates(final Block block, final DatanodeInfo datanode) {
-    if (!namesystem.isPopulatingReplQueues()) {
+    if (!isPopulatingReplQueues()) {
       return;
     }
     invalidateBlocks.add(block, datanode, true);
@@ -1103,7 +1108,7 @@ public class BlockManager implements BlockStatsMXBean {
    * datanodes.
    */
   private void addToInvalidates(Block b) {
-    if (!namesystem.isPopulatingReplQueues()) {
+    if (!isPopulatingReplQueues()) {
       return;
     }
     StringBuilder datanodes = new StringBuilder();
@@ -1124,7 +1129,7 @@ public class BlockManager implements BlockStatsMXBean {
    * is wiped.
    */
   void removeFromInvalidates(final DatanodeInfo datanode) {
-    if (!namesystem.isPopulatingReplQueues()) {
+    if (!isPopulatingReplQueues()) {
       return;
     }
     invalidateBlocks.remove(datanode);
@@ -1211,7 +1216,7 @@ public class BlockManager implements BlockStatsMXBean {
         || corruptedDuringWrite) {
       // the block is over-replicated so invalidate the replicas immediately
       invalidateBlock(b, node);
-    } else if (namesystem.isPopulatingReplQueues()) {
+    } else if (isPopulatingReplQueues()) {
       // add the block to neededReplication
       updateNeededReplications(b.getStored(), -1, 0);
     }
@@ -2484,7 +2489,7 @@ public class BlockManager implements BlockStatsMXBean {
   throws IOException {
     assert (storedBlock != null && namesystem.hasWriteLock());
     if (!namesystem.isInStartupSafeMode() 
-        || namesystem.isPopulatingReplQueues()) {
+        || isPopulatingReplQueues()) {
       addStoredBlock(storedBlock, storageInfo, null, false);
       return;
     }
@@ -2586,7 +2591,7 @@ public class BlockManager implements BlockStatsMXBean {
     }
 
     // do not try to handle over/under-replicated blocks during first safe mode
-    if (!namesystem.isPopulatingReplQueues()) {
+    if (!isPopulatingReplQueues()) {
       return storedBlock;
     }
 
@@ -3323,7 +3328,7 @@ public class BlockManager implements BlockStatsMXBean {
    */
   void processOverReplicatedBlocksOnReCommission(
       final DatanodeDescriptor srcNode) {
-    if (!namesystem.isPopulatingReplQueues()) {
+    if (!isPopulatingReplQueues()) {
       return;
     }
     final Iterator<BlockInfo> it = srcNode.getBlockIterator();
@@ -3417,7 +3422,7 @@ public class BlockManager implements BlockStatsMXBean {
       final int curReplicasDelta, int expectedReplicasDelta) {
     namesystem.writeLock();
     try {
-      if (!namesystem.isPopulatingReplQueues()) {
+      if (!isPopulatingReplQueues()) {
         return;
       }
       NumberReplicas repl = countNodes(block);
@@ -3662,7 +3667,7 @@ public class BlockManager implements BlockStatsMXBean {
       while (namesystem.isRunning()) {
         try {
           // Process replication work only when active NN is out of safe mode.
-          if (namesystem.isPopulatingReplQueues()) {
+          if (isPopulatingReplQueues()) {
             computeDatanodeWork();
             processPendingReplications();
             rescanPostponedMisreplicatedBlocks();
@@ -3790,4 +3795,35 @@ public class BlockManager implements BlockStatsMXBean {
   public Map<StorageType, StorageTypeStats> getStorageTypeStats() {
     return  datanodeManager.getDatanodeStatistics().getStorageTypeStats();
   }
+
+  /**
+   * Initialize replication queues.
+   */
+  public void initializeReplQueues() {
+    LOG.info("initializing replication queues");
+    processMisReplicatedBlocks();
+    initializedReplQueues = true;
+  }
+
+  /**
+   * Check if replication queues are to be populated
+   * @return true when node is HAState.Active and not in the very first safemode
+   */
+  public boolean isPopulatingReplQueues() {
+    if (!shouldPopulateReplQueues()) {
+      return false;
+    }
+    return initializedReplQueues;
+  }
+
+  public void setInitializedReplQueues(boolean v) {
+    this.initializedReplQueues = v;
+  }
+
+  public boolean shouldPopulateReplQueues() {
+    HAContext haContext = namesystem.getHAContext();
+    if (haContext == null || haContext.getState() == null)
+      return false;
+    return haContext.getState().shouldPopulateReplQueues();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/715b9c64/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 95ec648..3114937 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1200,7 +1200,7 @@ public class DatanodeManager {
     if (!hasClusterEverBeenMultiRack && networktopology.getNumOfRacks() > 1) {
       String message = "DN " + node + " joining cluster has expanded a formerly " +
           "single-rack cluster to be multi-rack. ";
-      if (namesystem.isPopulatingReplQueues()) {
+      if (blockManager.isPopulatingReplQueues()) {
         message += "Re-checking all blocks for replication, since they should " +
             "now be replicated cross-rack";
         LOG.info(message);
@@ -1210,7 +1210,7 @@ public class DatanodeManager {
         LOG.debug(message);
       }
       hasClusterEverBeenMultiRack = true;
-      if (namesystem.isPopulatingReplQueues()) {
+      if (blockManager.isPopulatingReplQueues()) {
         blockManager.processMisReplicatedBlocks();
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/715b9c64/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 1a20ab0..6d199d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -546,7 +546,7 @@ public class DecommissionManager {
         if (blockManager.isNeededReplication(block, liveReplicas)) {
           if (!blockManager.neededReplications.contains(block) &&
               blockManager.pendingReplications.getNumReplicas(block) == 0 &&
-              namesystem.isPopulatingReplQueues()) {
+              blockManager.isPopulatingReplQueues()) {
             // Process these blocks only when active NN is out of safe mode.
             blockManager.neededReplications.add(block,
                 curReplicas,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/715b9c64/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 25b6928..1b770b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -506,9 +506,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   private final boolean haEnabled;
 
-  /** flag indicating whether replication queues have been initialized */
-  boolean initializedReplQueues = false;
-
   /**
    * Whether the namenode is in the middle of starting the active service
    */
@@ -1038,7 +1035,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     try {
       nnResourceChecker = new NameNodeResourceChecker(conf);
       checkAvailableResources();
-      assert safeMode != null && !isPopulatingReplQueues();
+      assert safeMode != null && !blockManager.isPopulatingReplQueues();
       StartupProgress prog = NameNode.getStartupProgress();
       prog.beginPhase(Phase.SAFEMODE);
       long completeBlocksTotal = getCompleteBlocksTotal();
@@ -1105,7 +1102,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         // Only need to re-process the queue, If not in SafeMode.
         if (!isInSafeMode()) {
           LOG.info("Reprocessing replication and invalidation queues");
-          initializeReplQueues();
+          blockManager.initializeReplQueues();
         }
 
         if (LOG.isDebugEnabled()) {
@@ -1164,15 +1161,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return haContext != null &&
         haContext.getState().getServiceState() == HAServiceState.ACTIVE;
   }
-  
-  /**
-   * Initialize replication queues.
-   */
-  private void initializeReplQueues() {
-    LOG.info("initializing replication queues");
-    blockManager.processMisReplicatedBlocks();
-    initializedReplQueues = true;
-  }
 
   /**
    * @return Whether the namenode is transitioning to active state and is in the
@@ -1225,8 +1213,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         blockManager.getDatanodeManager().setShouldSendCachingCommands(false);
         // Don't want to keep replication queues when not in Active.
         blockManager.clearQueues();
+        blockManager.setInitializedReplQueues(false);
       }
-      initializedReplQueues = false;
     } finally {
       writeUnlock();
     }
@@ -4237,8 +4225,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     private synchronized void leave() {
       // if not done yet, initialize replication queues.
       // In the standby, do not populate repl queues
-      if (!isPopulatingReplQueues() && shouldPopulateReplQueues()) {
-        initializeReplQueues();
+      if (!blockManager.isPopulatingReplQueues() && blockManager.shouldPopulateReplQueues()) {
+        blockManager.initializeReplQueues();
       }
       long timeInSafemode = now() - startTime;
       NameNode.stateChangeLog.info("STATE* Leaving safe mode after " 
@@ -4274,7 +4262,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
      * initializing replication queues.
      */
     private synchronized boolean canInitializeReplQueues() {
-      return shouldPopulateReplQueues()
+      return blockManager.shouldPopulateReplQueues()
           && blockSafe >= blockReplQueueThreshold;
     }
       
@@ -4327,9 +4315,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       if (smmthread == null && needEnter()) {
         enter();
         // check if we are ready to initialize replication queues
-        if (canInitializeReplQueues() && !isPopulatingReplQueues()
+        if (canInitializeReplQueues() && !blockManager.isPopulatingReplQueues()
             && !haEnabled) {
-          initializeReplQueues();
+          blockManager.initializeReplQueues();
         }
         reportStatus("STATE* Safe mode ON.", false);
         return;
@@ -4354,8 +4342,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       }
 
       // check if we are ready to initialize replication queues
-      if (canInitializeReplQueues() && !isPopulatingReplQueues() && !haEnabled) {
-        initializeReplQueues();
+      if (canInitializeReplQueues() && !blockManager.isPopulatingReplQueues() && !haEnabled) {
+        blockManager.initializeReplQueues();
       }
     }
       
@@ -4658,24 +4646,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       && safeMode.isOn();
   }
 
-  /**
-   * Check if replication queues are to be populated
-   * @return true when node is HAState.Active and not in the very first safemode
-   */
-  @Override
-  public boolean isPopulatingReplQueues() {
-    if (!shouldPopulateReplQueues()) {
-      return false;
-    }
-    return initializedReplQueues;
-  }
-
-  private boolean shouldPopulateReplQueues() {
-    if(haContext == null || haContext.getState() == null)
-      return false;
-    return haContext.getState().shouldPopulateReplQueues();
-  }
-
   @Override
   public void incrementSafeBlockCount(int replication) {
     // safeMode is volatile, and may be set to null at any time
@@ -5493,7 +5463,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      if (!isPopulatingReplQueues()) {
+      if (!blockManager.isPopulatingReplQueues()) {
         throw new IOException("Cannot run listCorruptFileBlocks because " +
                               "replication queues have not been initialized.");
       }
@@ -6169,6 +6139,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return cacheManager;
   }
 
+  @Override
+  public HAContext getHAContext() {
+    return haContext;
+  }
+
   @Override  // NameNodeMXBean
   public String getCorruptFiles() {
     List<String> list = new ArrayList<String>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/715b9c64/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index fac2d37..6e32066 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/715b9c64/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
index 4a208d8..5bc4033 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
+import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.util.RwLock;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.AccessControlException;
@@ -51,4 +52,5 @@ public interface Namesystem extends RwLock, SafeMode {
   boolean isInSnapshot(BlockInfo blockUC);
 
   CacheManager getCacheManager();
+  HAContext getHAContext();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/715b9c64/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeMode.java
index 1428482..06a8219 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeMode.java
@@ -39,9 +39,6 @@ public interface SafeMode {
    */
   public boolean isInStartupSafeMode();
 
-  /** Check whether replication queues are being populated. */
-  public boolean isPopulatingReplQueues();
-    
   /**
    * Increment number of blocks that reached minimal replication.
    * @param replication current replication 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/715b9c64/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 27d647c..278b105 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -1221,7 +1221,6 @@ public class TestReplicationPolicy {
   public void testAddStoredBlockDoesNotCauseSkippedReplication()
       throws IOException {
     Namesystem mockNS = mock(Namesystem.class);
-    when(mockNS.isPopulatingReplQueues()).thenReturn(true);
     when(mockNS.hasWriteLock()).thenReturn(true);
     when(mockNS.hasReadLock()).thenReturn(true);
     BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
@@ -1271,7 +1270,6 @@ public class TestReplicationPolicy {
       testConvertLastBlockToUnderConstructionDoesNotCauseSkippedReplication()
           throws IOException {
     Namesystem mockNS = mock(Namesystem.class);
-    when(mockNS.isPopulatingReplQueues()).thenReturn(true);
     when(mockNS.hasReadLock()).thenReturn(true);
 
     BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
@@ -1334,7 +1332,6 @@ public class TestReplicationPolicy {
   public void testupdateNeededReplicationsDoesNotCauseSkippedReplication()
       throws IOException {
     Namesystem mockNS = mock(Namesystem.class);
-    when(mockNS.isPopulatingReplQueues()).thenReturn(true);
     when(mockNS.hasReadLock()).thenReturn(true);
 
     BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/715b9c64/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index b314584..64f614d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -243,7 +243,7 @@ public class NameNodeAdapter {
    * @return Replication queue initialization status
    */
   public static boolean safeModeInitializedReplQueues(NameNode nn) {
-    return nn.getNamesystem().isPopulatingReplQueues();
+    return nn.getNamesystem().getBlockManager().isPopulatingReplQueues();
   }
   
   public static File getInProgressEditsFile(StorageDirectory sd, long startTxId) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/715b9c64/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index 7f72797..c2d8c57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
@@ -123,13 +124,15 @@ public class TestFSNamesystem {
 
     FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
     FSNamesystem fsn = Mockito.spy(fsNamesystem);
+    BlockManager bm = fsn.getBlockManager();
+    Whitebox.setInternalState(bm, "namesystem", fsn);
 
     //Make shouldPopulaeReplQueues return true
     HAContext haContext = Mockito.mock(HAContext.class);
     HAState haState = Mockito.mock(HAState.class);
     Mockito.when(haContext.getState()).thenReturn(haState);
     Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
-    Whitebox.setInternalState(fsn, "haContext", haContext);
+    Mockito.when(fsn.getHAContext()).thenReturn(haContext);
 
     //Make NameNode.getNameNodeMetrics() not return null
     NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
@@ -137,15 +140,15 @@ public class TestFSNamesystem {
     fsn.enterSafeMode(false);
     assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
     assertTrue("Replication queues were being populated during very first "
-        + "safemode", !fsn.isPopulatingReplQueues());
+        + "safemode", !bm.isPopulatingReplQueues());
     fsn.leaveSafeMode();
     assertTrue("FSNamesystem didn't leave safemode", !fsn.isInSafeMode());
     assertTrue("Replication queues weren't being populated even after leaving "
-      + "safemode", fsn.isPopulatingReplQueues());
+      + "safemode", bm.isPopulatingReplQueues());
     fsn.enterSafeMode(false);
     assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
     assertTrue("Replication queues weren't being populated after entering "
-      + "safemode 2nd time", fsn.isPopulatingReplQueues());
+      + "safemode 2nd time", bm.isPopulatingReplQueues());
   }
   
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/715b9c64/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
index 3afdd0e..99dce1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
@@ -210,7 +210,8 @@ public class TestListCorruptFileBlocks {
       fs = cluster.getFileSystem();
 
       // wait until replication queues have been initialized
-      while (!cluster.getNameNode().namesystem.isPopulatingReplQueues()) {
+      while (!cluster.getNameNode().namesystem.getBlockManager()
+          .isPopulatingReplQueues()) {
         try {
           LOG.info("waiting for replication queues");
           Thread.sleep(1000);


[22/50] [abbrv] hadoop git commit: YARN-4073. Removed unused ApplicationACLsManager in ContainerManagerImpl constructor. (Naganarasimha G R via rohithsharmaks)

Posted by vv...@apache.org.
YARN-4073. Removed unused ApplicationACLsManager in ContainerManagerImpl constructor. (Naganarasimha G R via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/095ab9ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/095ab9ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/095ab9ab

Branch: refs/heads/YARN-3926
Commit: 095ab9ab5fa5f415fba41fee81b6008cf0455af8
Parents: 65ccf2b
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Wed Sep 2 14:13:33 2015 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Wed Sep 2 14:13:33 2015 +0530

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                               | 3 +++
 .../apache/hadoop/yarn/server/nodemanager/NodeManager.java    | 2 +-
 .../nodemanager/containermanager/ContainerManagerImpl.java    | 6 +-----
 .../hadoop/yarn/server/nodemanager/DummyContainerManager.java | 7 ++-----
 .../apache/hadoop/yarn/server/nodemanager/TestEventFlow.java  | 3 +--
 .../hadoop/yarn/server/nodemanager/TestNodeManagerResync.java | 2 +-
 .../hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java | 6 +++---
 .../containermanager/BaseContainerManagerTest.java            | 3 +--
 .../nodemanager/containermanager/TestContainerManager.java    | 5 ++---
 .../containermanager/TestContainerManagerRecovery.java        | 3 +--
 .../yarn/server/nodemanager/containermanager/TestNMProxy.java | 3 +--
 11 files changed, 17 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ab9ab/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b33111c..aade2d7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -807,6 +807,9 @@ Release 2.8.0 - UNRELEASED
     YARN-4082. Container shouldn't be killed when node's label updated.
     (Wangda Tan via vvasudev)
 
+    YARN-4073. Removed unused ApplicationACLsManager in ContainerManagerImpl constructor.
+    (Naganarasimha G R via rohithsharmaks)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ab9ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 68820a7..3cf9f1a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -166,7 +166,7 @@ public class NodeManager extends CompositeService
       NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager,
       LocalDirsHandlerService dirsHandler) {
     return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
-      metrics, aclsManager, dirsHandler);
+      metrics, dirsHandler);
   }
 
   protected WebServer createWebServer(Context nmContext,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ab9ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 494fa8f..68c7f2c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -134,7 +134,6 @@ import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.Re
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus;
 import org.apache.hadoop.yarn.server.nodemanager.security.authorize.NMPolicyProvider;
-import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -167,7 +166,6 @@ public class ContainerManagerImpl extends CompositeService implements
 
   protected LocalDirsHandlerService dirsHandler;
   protected final AsyncDispatcher dispatcher;
-  private final ApplicationACLsManager aclsManager;
 
   private final DeletionService deletionService;
   private AtomicBoolean blockNewContainerRequests = new AtomicBoolean(false);
@@ -179,8 +177,7 @@ public class ContainerManagerImpl extends CompositeService implements
 
   public ContainerManagerImpl(Context context, ContainerExecutor exec,
       DeletionService deletionContext, NodeStatusUpdater nodeStatusUpdater,
-      NodeManagerMetrics metrics, ApplicationACLsManager aclsManager,
-      LocalDirsHandlerService dirsHandler) {
+      NodeManagerMetrics metrics, LocalDirsHandlerService dirsHandler) {
     super(ContainerManagerImpl.class.getName());
     this.context = context;
     this.dirsHandler = dirsHandler;
@@ -198,7 +195,6 @@ public class ContainerManagerImpl extends CompositeService implements
     addService(containersLauncher);
 
     this.nodeStatusUpdater = nodeStatusUpdater;
-    this.aclsManager = aclsManager;
 
     // Start configurable services
     auxiliaryServices = new AuxServices();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ab9ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
index f872a55..349340b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.even
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.LogHandler;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent;
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
-import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 
 public class DummyContainerManager extends ContainerManagerImpl {
 
@@ -63,11 +62,9 @@ public class DummyContainerManager extends ContainerManagerImpl {
   
   public DummyContainerManager(Context context, ContainerExecutor exec,
       DeletionService deletionContext, NodeStatusUpdater nodeStatusUpdater,
-      NodeManagerMetrics metrics,
-      ApplicationACLsManager applicationACLsManager,
-      LocalDirsHandlerService dirsHandler) {
+      NodeManagerMetrics metrics, LocalDirsHandlerService dirsHandler) {
     super(context, exec, deletionContext, nodeStatusUpdater, metrics,
-      applicationACLsManager, dirsHandler);
+        dirsHandler);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ab9ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
index c878e5c..3dc62bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
-import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.junit.Test;
 
 
@@ -128,7 +127,7 @@ public class TestEventFlow {
 
     DummyContainerManager containerManager =
         new DummyContainerManager(context, exec, del, nodeStatusUpdater,
-          metrics, new ApplicationACLsManager(conf), dirsHandler);
+          metrics, dirsHandler);
     nodeStatusUpdater.init(conf);
     ((NMContext)context).setContainerManager(containerManager);
     nodeStatusUpdater.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ab9ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index 611e671..c22d475 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -435,7 +435,7 @@ public class TestNodeManagerResync {
         NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager,
         LocalDirsHandlerService dirsHandler) {
       return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
-        metrics, aclsManager, dirsHandler){
+        metrics, dirsHandler){
         @Override
         public void setBlockNewContainerRequests(
             boolean blockNewContainerRequests) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ab9ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index a9ef72f..3c0368b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -1160,7 +1160,7 @@ public class TestNodeStatusUpdater {
           ApplicationACLsManager aclsManager,
           LocalDirsHandlerService dirsHandler) {
         return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
-            metrics, aclsManager, dirsHandler) {
+            metrics, dirsHandler) {
 
           @Override
           public void cleanUpApplicationsOnNMShutDown() {
@@ -1373,7 +1373,7 @@ public class TestNodeStatusUpdater {
           ApplicationACLsManager aclsManager,
           LocalDirsHandlerService diskhandler) {
         return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
-          metrics, aclsManager, diskhandler) {
+          metrics, diskhandler) {
           @Override
           protected void serviceStart() {
             // Simulating failure of starting RPC server
@@ -1523,7 +1523,7 @@ public class TestNodeStatusUpdater {
           ApplicationACLsManager aclsManager,
           LocalDirsHandlerService dirsHandler) {
         return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
-            metrics, aclsManager, dirsHandler) {
+            metrics, dirsHandler) {
 
           @Override
           public void cleanUpApplicationsOnNMShutDown() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ab9ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index a8e723d..2810662 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.hadoop.util.NodeHealthScriptRunner;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -192,7 +191,7 @@ public abstract class BaseContainerManagerTest {
       createContainerManager(DeletionService delSrvc) {
     
     return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater,
-      metrics, new ApplicationACLsManager(conf), dirsHandler) {
+      metrics, dirsHandler) {
       @Override
       public void
           setBlockNewContainerRequests(boolean blockNewContainerRequests) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ab9ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index 7bdfdfb..e508424 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -80,7 +80,6 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
 import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager;
-import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.junit.Assert;
@@ -116,7 +115,7 @@ public class TestContainerManager extends BaseContainerManagerTest {
   protected ContainerManagerImpl
       createContainerManager(DeletionService delSrvc) {
     return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater,
-      metrics, new ApplicationACLsManager(conf), dirsHandler) {
+      metrics, dirsHandler) {
       @Override
       public void
           setBlockNewContainerRequests(boolean blockNewContainerRequests) {
@@ -801,7 +800,7 @@ public class TestContainerManager extends BaseContainerManagerTest {
   public void testNullTokens() throws Exception {
     ContainerManagerImpl cMgrImpl =
         new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater,
-        metrics, new ApplicationACLsManager(conf), dirsHandler);
+        metrics, dirsHandler);
     String strExceptionMsg = "";
     try {
       cMgrImpl.authorizeStartRequest(null, new ContainerTokenIdentifier());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ab9ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
index 781950e..4d0aacd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
@@ -394,8 +394,7 @@ public class TestContainerManagerRecovery {
 
     return new ContainerManagerImpl(context,
         mock(ContainerExecutor.class), mock(DeletionService.class),
-        mock(NodeStatusUpdater.class), metrics,
-        context.getApplicationACLsManager(), null) {
+        mock(NodeStatusUpdater.class), metrics, null) {
           @Override
           protected LogHandler createLogHandler(Configuration conf,
               Context context, DeletionService deletionService) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ab9ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
index 102c9c6..152b92c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.NMTokenIdentifier;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
-import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.Assert;
@@ -67,7 +66,7 @@ public class TestNMProxy extends BaseContainerManagerTest {
   protected ContainerManagerImpl
       createContainerManager(DeletionService delSrvc) {
     return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater,
-      metrics, new ApplicationACLsManager(conf), dirsHandler) {
+      metrics, dirsHandler) {
 
       @Override
       public StartContainersResponse startContainers(


[31/50] [abbrv] hadoop git commit: HADOOP-5323. Trash documentation should describe its directory structure and configurations. Contributed by Weiwei Yang.

Posted by vv...@apache.org.
HADOOP-5323. Trash documentation should describe its directory structure and configurations. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a87f81c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a87f81c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a87f81c

Branch: refs/heads/YARN-3926
Commit: 9a87f81ce6839d1b5ab3d663c1e82c7e572bdeb8
Parents: b469ac5
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Thu Sep 3 22:34:25 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Thu Sep 3 22:34:25 2015 +0900

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                    | 3 +++
 .../hadoop-hdfs/src/site/markdown/HdfsDesign.md                | 6 ++++--
 2 files changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a87f81c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 22b80a6..275beb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1297,6 +1297,9 @@ Release 2.7.2 - UNRELEASED
 
     HDFS-8659. Block scanner INFO message is spamming logs. (Yongjun Zhang)
 
+    HADOOP-5323. Trash documentation should describe its directory structure and
+    configurations. (Weiwei Yang via ozawa)
+
   OPTIMIZATIONS
 
     HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a87f81c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
index c441ae8..5b1f66e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
@@ -226,9 +226,11 @@ Space Reclamation
 
 ### File Deletes and Undeletes
 
-When a file is deleted by a user or an application, it is not immediately removed from HDFS. Instead, HDFS first renames it to a file in the trash directory(`/user/<username>/.Trash`). The file can be restored quickly as long as it remains in trash. A file remains in trash for a configurable amount of time. After the expiry of its life in trash, the NameNode deletes the file from the HDFS namespace. The deletion of a file causes the blocks associated with the file to be freed. Note that there could be an appreciable time delay between the time a file is deleted by a user and the time of the corresponding increase in free space in HDFS.
+When a file is deleted by a user or an application, it is not immediately removed from HDFS. Instead, HDFS moves it to a trash directory (each user has its own trash directory under `/user/<username>/.Trash`).
+The file can be restored quickly as long as it remains in trash. Most recent deleted files are moved to the current trash directory (`/user/<username>/.Trash/Current`), and in a configurable interval, HDFS creates checkpoints (under `/user/<username>/.Trash/<date>`) for files in current trash directory and deletes old checkpoints when they are expired.
+After the expiry of its life in trash, the NameNode deletes the file from the HDFS namespace. The deletion of a file causes the blocks associated with the file to be freed. Note that there could be an appreciable time delay between the time a file is deleted by a user and the time of the corresponding increase in free space in HDFS.
 
-A user can Undelete a file after deleting it as long as it remains in the trash directory. If a user wants to undelete a file that he/she has deleted, he/she can navigate the trash directory and retrieve the file. The trash directory contains only the latest copy of the file that was deleted. The trash directory is just like any other directory with one special feature: HDFS applies specified policies to automatically delete files from this directory. Current default trash interval is set to 0 (Deletes file without storing in trash). This value is configurable parameter stored as `fs.trash.interval` stored in core-site.xml.
+Currently, the trash feature is disabled by default (deleting files without storing in trash). User can enable this feature by setting a value greater than zero for parameter `fs.trash.interval` (in core-site.xml). This value tells the NameNode how long a checkpoint will be expired and removed from HDFS. In addition, user can configure an appropriate time to tell NameNode how often to create checkpoints in trash (the parameter stored as `fs.trash.checkpoint.interval` in core-site.xml), this value should be smaller or equal to fs.trash.interval.
 
 ### Decrease Replication Factor
 


[13/50] [abbrv] hadoop git commit: YARN-4082. Container shouldn't be killed when node's label updated. Contributed by Wangda Tan.

Posted by vv...@apache.org.
YARN-4082. Container shouldn't be killed when node's label updated. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf669b6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf669b6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf669b6d

Branch: refs/heads/YARN-3926
Commit: bf669b6d9f8ba165e30b8823218d625a49958925
Parents: f4d96be
Author: Varun Vasudev <vv...@apache.org>
Authored: Tue Sep 1 14:19:11 2015 +0530
Committer: Varun Vasudev <vv...@apache.org>
Committed: Tue Sep 1 14:19:11 2015 +0530

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../scheduler/capacity/AbstractCSQueue.java     |  27 ++
 .../scheduler/capacity/CSQueue.java             |  26 ++
 .../scheduler/capacity/CapacityScheduler.java   |  40 +--
 .../scheduler/capacity/LeafQueue.java           |  16 ++
 .../scheduler/common/fica/FiCaSchedulerApp.java |   9 +
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 249 ++++++++++++++++---
 7 files changed, 314 insertions(+), 56 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf669b6d/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 80cf793..999654d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -804,6 +804,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3896. RMNode transitioned from RUNNING to REBOOTED because its response id 
     has not been reset synchronously. (Jun Gong via rohithsharmaks)
 
+    YARN-4082. Container shouldn't be killed when node's label updated.
+    (Wangda Tan via vvasudev)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf669b6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 792c25c..0ae4d1a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsMana
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -543,6 +544,32 @@ public abstract class AbstractCSQueue implements CSQueue {
     }
   }
   
+  @Override
+  public void incUsedResource(String nodeLabel, Resource resourceToInc,
+      SchedulerApplicationAttempt application) {
+    if (nodeLabel == null) {
+      nodeLabel = RMNodeLabelsManager.NO_LABEL;
+    }
+    // ResourceUsage has its own lock, no addition lock needs here.
+    queueUsage.incUsed(nodeLabel, resourceToInc);
+    if (null != parent) {
+      parent.incUsedResource(nodeLabel, resourceToInc, null);
+    }
+  }
+
+  @Override
+  public void decUsedResource(String nodeLabel, Resource resourceToDec,
+      SchedulerApplicationAttempt application) {
+    if (nodeLabel == null) {
+      nodeLabel = RMNodeLabelsManager.NO_LABEL;
+    }
+    // ResourceUsage has its own lock, no addition lock needs here.
+    queueUsage.decUsed(nodeLabel, resourceToDec);
+    if (null != parent) {
+      parent.decUsedResource(nodeLabel, resourceToDec, null);
+    }
+  }
+
   /**
    * Return if the queue has pending resource on given nodePartition and
    * schedulingMode. 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf669b6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
index b06a646..9855dd4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEven
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 
@@ -287,4 +288,29 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
    * @return resourceUsage
    */
   public ResourceUsage getQueueResourceUsage();
+
+  /**
+   * When partition of node updated, we will update queue's resource usage if it
+   * has container(s) running on that.
+   */
+  public void incUsedResource(String nodePartition, Resource resourceToInc,
+      SchedulerApplicationAttempt application);
+
+  /**
+   * When partition of node updated, we will update queue's resource usage if it
+   * has container(s) running on that.
+   */
+  public void decUsedResource(String nodePartition, Resource resourceToDec,
+      SchedulerApplicationAttempt application);
+
+  /**
+   * When an outstanding resource is fulfilled or canceled, calling this will
+   * decrease pending resource in a queue.
+   *
+   * @param nodeLabel
+   *          asked by application
+   * @param resourceToDec
+   *          new resource asked
+   */
+  public void decPendingResource(String nodeLabel, Resource resourceToDec);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf669b6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index cff1fe5..b5ccbd9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -1040,12 +1040,6 @@ public class CapacityScheduler extends
   
   /**
    * Process node labels update on a node.
-   * 
-   * TODO: Currently capacity scheduler will kill containers on a node when
-   * labels on the node changed. It is a simply solution to ensure guaranteed
-   * capacity on labels of queues. When YARN-2498 completed, we can let
-   * preemption policy to decide if such containers need to be killed or just
-   * keep them running.
    */
   private synchronized void updateLabelsOnNode(NodeId nodeId,
       Set<String> newLabels) {
@@ -1060,17 +1054,31 @@ public class CapacityScheduler extends
       return;
     }
     
-    // Kill running containers since label is changed
+    // Get new partition, we have only one partition per node
+    String newPartition;
+    if (newLabels.isEmpty()) {
+      newPartition = RMNodeLabelsManager.NO_LABEL;
+    } else {
+      newPartition = newLabels.iterator().next();
+    }
+
+    // old partition as well
+    String oldPartition = node.getPartition();
+
+    // Update resources of these containers
     for (RMContainer rmContainer : node.getRunningContainers()) {
-      ContainerId containerId = rmContainer.getContainerId();
-      completedContainer(rmContainer, 
-          ContainerStatus.newInstance(containerId,
-              ContainerState.COMPLETE, 
-              String.format(
-                  "Container=%s killed since labels on the node=%s changed",
-                  containerId.toString(), nodeId.toString()),
-              ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),
-          RMContainerEventType.KILL);
+      FiCaSchedulerApp application =
+          getApplicationAttempt(rmContainer.getApplicationAttemptId());
+      if (null != application) {
+        application.nodePartitionUpdated(rmContainer, oldPartition,
+            newPartition);
+      } else {
+        LOG.warn("There's something wrong, some RMContainers running on"
+            + " a node, but we cannot find SchedulerApplicationAttempt for it. Node="
+            + node.getNodeID() + " applicationAttemptId="
+            + rmContainer.getApplicationAttemptId());
+        continue;
+      }
     }
     
     // Unreserve container on this node

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf669b6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index ff1baff..658eae1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -1262,6 +1262,22 @@ public class LeafQueue extends AbstractCSQueue {
     }
   }
 
+  @Override
+  public void incUsedResource(String nodeLabel, Resource resourceToInc,
+      SchedulerApplicationAttempt application) {
+    getUser(application.getUser()).getResourceUsage().incUsed(nodeLabel,
+        resourceToInc);
+    super.incUsedResource(nodeLabel, resourceToInc, application);
+  }
+
+  @Override
+  public void decUsedResource(String nodeLabel, Resource resourceToDec,
+      SchedulerApplicationAttempt application) {
+    getUser(application.getUser()).getResourceUsage().decUsed(nodeLabel,
+        resourceToDec);
+    super.decUsedResource(nodeLabel, resourceToDec, application);
+  }
+
   @VisibleForTesting
   public static class User {
     ResourceUsage userResourceUsage = new ResourceUsage();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf669b6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 74d77f5..300cba9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -443,4 +443,13 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
           schedulingMode, currentResourceLimits, reservedContainer);
     }
   }
+
+  public void nodePartitionUpdated(RMContainer rmContainer, String oldPartition,
+      String newPartition) {
+    Resource containerResource = rmContainer.getAllocatedResource();
+    this.attemptResourceUsage.decUsed(oldPartition, containerResource);
+    this.attemptResourceUsage.incUsed(newPartition, containerResource);
+    getCSLeafQueue().decUsedResource(oldPartition, containerResource, this);
+    getCSLeafQueue().incUsedResource(newPartition, containerResource, this);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf669b6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
index 0a701d8..94af4e0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
@@ -19,22 +19,29 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
+import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeLabelsUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
 import org.junit.Assert;
 import org.junit.Before;
@@ -97,8 +104,18 @@ public class TestCapacitySchedulerNodeLabelUpdate {
         .getMemory());
   }
 
+  private void checkUserUsedResource(MockRM rm, String queueName,
+      String userName, String partition, int memory) {
+    CapacityScheduler scheduler = (CapacityScheduler) rm.getResourceScheduler();
+    LeafQueue queue = (LeafQueue) scheduler.getQueue(queueName);
+    LeafQueue.User user = queue.getUser(userName);
+    Assert.assertEquals(memory,
+        user.getResourceUsage().getUsed(partition).getMemory());
+  }
+
   @Test(timeout = 60000)
-  public void testResourceUsage() throws Exception {
+  public void testRequestContainerAfterNodePartitionUpdated()
+      throws Exception {
     // set node -> label
     mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y",
         "z"));
@@ -160,7 +177,8 @@ public class TestCapacitySchedulerNodeLabelUpdate {
   }
 
   @Test (timeout = 60000)
-  public void testNodeUpdate() throws Exception {
+  public void testResourceUsageWhenNodeUpdatesPartition()
+      throws Exception {
     // set node -> label
     mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y", "z"));
     
@@ -183,8 +201,9 @@ public class TestCapacitySchedulerNodeLabelUpdate {
     MockNM nm1 = rm.registerNode("h1:1234", 8000);
     MockNM nm2 = rm.registerNode("h2:1234", 8000);
     MockNM nm3 = rm.registerNode("h3:1234", 8000);
-    
-    ContainerId containerId;
+
+    ContainerId containerId1;
+    ContainerId containerId2;
 
     // launch an app to queue a1 (label = x), and check all container will
     // be allocated in h1
@@ -193,9 +212,9 @@ public class TestCapacitySchedulerNodeLabelUpdate {
 
     // request a container.
     am1.allocate("*", GB, 1, new ArrayList<ContainerId>(), "x");
-    containerId =
-        ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
-    Assert.assertTrue(rm.waitForState(nm1, containerId,
+    containerId1 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
+    containerId2 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
+    Assert.assertTrue(rm.waitForState(nm1, containerId2,
         RMContainerState.ALLOCATED, 10 * 1000));
     
     // check used resource:
@@ -203,55 +222,205 @@ public class TestCapacitySchedulerNodeLabelUpdate {
     checkUsedResource(rm, "a", 1024, "x");
     checkUsedResource(rm, "a", 1024);
     
-    // change h1's label to z, container should be killed
-    mgr.replaceLabelsOnNode(ImmutableMap.of(NodeId.newInstance("h1", 0),
-        toSet("z")));
-    Assert.assertTrue(rm.waitForState(nm1, containerId,
-        RMContainerState.KILLED, 10 * 1000));
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+    FiCaSchedulerApp app = cs.getApplicationAttempt(am1.getApplicationAttemptId());
     
-    // check used resource:
-    // queue-a used x=0G, ""=1G ("" not changed)
+    // change h1's label to z
+    cs.handle(new NodeLabelsUpdateSchedulerEvent(ImmutableMap.of(nm1.getNodeId(),
+        toSet("z"))));
     checkUsedResource(rm, "a", 0, "x");
+    checkUsedResource(rm, "a", 1024, "z");
     checkUsedResource(rm, "a", 1024);
+    checkUsedResource(rm, "root", 0, "x");
+    checkUsedResource(rm, "root", 1024, "z");
+    checkUsedResource(rm, "root", 1024);
+    checkUserUsedResource(rm, "a", "user", "x", 0);
+    checkUserUsedResource(rm, "a", "user", "z", 1024);
+    Assert.assertEquals(0,
+        app.getAppAttemptResourceUsage().getUsed("x").getMemory());
+    Assert.assertEquals(1024,
+        app.getAppAttemptResourceUsage().getUsed("z").getMemory());
     
-    // request a container with label = y
-    am1.allocate("*", GB, 1, new ArrayList<ContainerId>(), "y");
-    containerId =
-        ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
-    Assert.assertTrue(rm.waitForState(nm2, containerId,
-        RMContainerState.ALLOCATED, 10 * 1000));
-    
-    // check used resource:
-    // queue-a used y=1G, ""=1G
+    // change h1's label to y
+    cs.handle(new NodeLabelsUpdateSchedulerEvent(ImmutableMap.of(nm1.getNodeId(),
+        toSet("y"))));
+    checkUsedResource(rm, "a", 0, "x");
     checkUsedResource(rm, "a", 1024, "y");
+    checkUsedResource(rm, "a", 0, "z");
     checkUsedResource(rm, "a", 1024);
+    checkUsedResource(rm, "root", 0, "x");
+    checkUsedResource(rm, "root", 1024, "y");
+    checkUsedResource(rm, "root", 0, "z");
+    checkUsedResource(rm, "root", 1024);
+    checkUserUsedResource(rm, "a", "user", "x", 0);
+    checkUserUsedResource(rm, "a", "user", "y", 1024);
+    checkUserUsedResource(rm, "a", "user", "z", 0);
+    Assert.assertEquals(0,
+        app.getAppAttemptResourceUsage().getUsed("x").getMemory());
+    Assert.assertEquals(1024,
+        app.getAppAttemptResourceUsage().getUsed("y").getMemory());
+    Assert.assertEquals(0,
+        app.getAppAttemptResourceUsage().getUsed("z").getMemory());
     
-    // change h2's label to no label, container should be killed
-    mgr.replaceLabelsOnNode(ImmutableMap.of(NodeId.newInstance("h2", 0),
-        CommonNodeLabelsManager.EMPTY_STRING_SET));
-    Assert.assertTrue(rm.waitForState(nm1, containerId,
-        RMContainerState.KILLED, 10 * 1000));
+    // change h1's label to no label
+    Set<String> emptyLabels = new HashSet<>();
+    Map<NodeId,Set<String>> map = ImmutableMap.of(nm1.getNodeId(),
+        emptyLabels);
+    cs.handle(new NodeLabelsUpdateSchedulerEvent(map));
+    checkUsedResource(rm, "a", 0, "x");
+    checkUsedResource(rm, "a", 0, "y");
+    checkUsedResource(rm, "a", 0, "z");
+    checkUsedResource(rm, "a", 2048);
+    checkUsedResource(rm, "root", 0, "x");
+    checkUsedResource(rm, "root", 0, "y");
+    checkUsedResource(rm, "root", 0, "z");
+    checkUsedResource(rm, "root", 2048);
+    checkUserUsedResource(rm, "a", "user", "x", 0);
+    checkUserUsedResource(rm, "a", "user", "y", 0);
+    checkUserUsedResource(rm, "a", "user", "z", 0);
+    checkUserUsedResource(rm, "a", "user", "", 2048);
+    Assert.assertEquals(0,
+        app.getAppAttemptResourceUsage().getUsed("x").getMemory());
+    Assert.assertEquals(0,
+        app.getAppAttemptResourceUsage().getUsed("y").getMemory());
+    Assert.assertEquals(0,
+        app.getAppAttemptResourceUsage().getUsed("z").getMemory());
+    Assert.assertEquals(2048,
+        app.getAppAttemptResourceUsage().getUsed("").getMemory());
+
+    // Finish the two containers, we should see used resource becomes 0
+    cs.completedContainer(cs.getRMContainer(containerId2),
+        ContainerStatus.newInstance(containerId2, ContainerState.COMPLETE, "",
+            ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),
+        RMContainerEventType.KILL);
+    cs.completedContainer(cs.getRMContainer(containerId1),
+        ContainerStatus.newInstance(containerId1, ContainerState.COMPLETE, "",
+            ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),
+        RMContainerEventType.KILL);
     
-    // check used resource:
-    // queue-a used x=0G, y=0G, ""=1G ("" not changed)
     checkUsedResource(rm, "a", 0, "x");
     checkUsedResource(rm, "a", 0, "y");
-    checkUsedResource(rm, "a", 1024);
+    checkUsedResource(rm, "a", 0, "z");
+    checkUsedResource(rm, "a", 0);
+    checkUsedResource(rm, "root", 0, "x");
+    checkUsedResource(rm, "root", 0, "y");
+    checkUsedResource(rm, "root", 0, "z");
+    checkUsedResource(rm, "root", 0);
+    checkUserUsedResource(rm, "a", "user", "x", 0);
+    checkUserUsedResource(rm, "a", "user", "y", 0);
+    checkUserUsedResource(rm, "a", "user", "z", 0);
+    checkUserUsedResource(rm, "a", "user", "", 0);
+
+    rm.close();
+  }
+
+
+  @Test (timeout = 60000)
+  public void testComplexResourceUsageWhenNodeUpdatesPartition()
+      throws Exception {
+    /*
+     * This test is similar to testResourceUsageWhenNodeUpdatesPartition, this
+     * will include multiple applications, multiple users and multiple
+     * containers running on a single node, size of each container is 1G
+     *
+     * Node 1
+     * ------
+     * App1-container3
+     * App2-container2
+     * App2-Container3
+     *
+     * Node 2
+     * ------
+     * App2-container1
+     * App1-container1
+     * App1-container2
+     */
+    // set node -> label
+    mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y", "z"));
     
+    // set mapping:
+    // h1 -> x
+    // h2 -> y
+    mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
+
+    // inject node label manager
+    MockRM rm = new MockRM(getConfigurationWithQueueLabels(conf)) {
+      @Override
+      public RMNodeLabelsManager createNodeLabelManager() {
+        return mgr;
+      }
+    };
+
+    rm.getRMContext().setNodeLabelManager(mgr);
+    rm.start();
+    MockNM nm1 = rm.registerNode("h1:1234", 80000);
+    MockNM nm2 = rm.registerNode("h2:1234", 80000);
+
+    // app1
+    RMApp app1 = rm.submitApp(GB, "app", "u1", null, "a");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
+
+    // c2 on n1, c3 on n2
+    am1.allocate("*", GB, 1, new ArrayList<ContainerId>(), "x");
+    ContainerId containerId =
+        ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
+    Assert.assertTrue(rm.waitForState(nm1, containerId,
+        RMContainerState.ALLOCATED, 10 * 1000));
+    am1.allocate("*", GB, 1, new ArrayList<ContainerId>());
     containerId =
-        ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
+        ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
+    Assert.assertTrue(rm.waitForState(nm2, containerId,
+        RMContainerState.ALLOCATED, 10 * 1000));
     
-    // change h3's label to z, AM container should be killed
-    mgr.replaceLabelsOnNode(ImmutableMap.of(NodeId.newInstance("h3", 0),
-        toSet("z")));
+    // app2
+    RMApp app2 = rm.submitApp(GB, "app", "u2", null, "a");
+    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm2);
+
+    // c2/c3 on n1
+    am2.allocate("*", GB, 2, new ArrayList<ContainerId>(), "x");
+    containerId =
+        ContainerId.newContainerId(am2.getApplicationAttemptId(), 3);
     Assert.assertTrue(rm.waitForState(nm1, containerId,
-        RMContainerState.KILLED, 10 * 1000));
+        RMContainerState.ALLOCATED, 10 * 1000));
     
     // check used resource:
-    // queue-a used x=0G, y=0G, ""=1G ("" not changed)
+    // queue-a used x=1G, ""=1G
+    checkUsedResource(rm, "a", 3 * GB, "x");
+    checkUsedResource(rm, "a", 3 * GB);
+
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+    FiCaSchedulerApp application1 =
+        cs.getApplicationAttempt(am1.getApplicationAttemptId());
+    FiCaSchedulerApp application2 =
+        cs.getApplicationAttempt(am2.getApplicationAttemptId());
+
+    // change h1's label to z
+    cs.handle(new NodeLabelsUpdateSchedulerEvent(ImmutableMap.of(nm1.getNodeId(),
+        toSet("z"))));
     checkUsedResource(rm, "a", 0, "x");
-    checkUsedResource(rm, "a", 0, "y");
-    checkUsedResource(rm, "a", 0);
+    checkUsedResource(rm, "a", 3 * GB, "z");
+    checkUsedResource(rm, "a", 3 * GB);
+    checkUsedResource(rm, "root", 0, "x");
+    checkUsedResource(rm, "root", 3 * GB, "z");
+    checkUsedResource(rm, "root", 3 * GB);
+    checkUserUsedResource(rm, "a", "u1", "x", 0 * GB);
+    checkUserUsedResource(rm, "a", "u1", "z", 1 * GB);
+    checkUserUsedResource(rm, "a", "u1", "", 2 * GB);
+    checkUserUsedResource(rm, "a", "u2", "x", 0 * GB);
+    checkUserUsedResource(rm, "a", "u2", "z", 2 * GB);
+    checkUserUsedResource(rm, "a", "u2", "", 1 * GB);
+    Assert.assertEquals(0,
+        application1.getAppAttemptResourceUsage().getUsed("x").getMemory());
+    Assert.assertEquals(1 * GB,
+        application1.getAppAttemptResourceUsage().getUsed("z").getMemory());
+    Assert.assertEquals(2 * GB,
+        application1.getAppAttemptResourceUsage().getUsed("").getMemory());
+    Assert.assertEquals(0,
+        application2.getAppAttemptResourceUsage().getUsed("x").getMemory());
+    Assert.assertEquals(2 * GB,
+        application2.getAppAttemptResourceUsage().getUsed("z").getMemory());
+    Assert.assertEquals(1 * GB,
+        application2.getAppAttemptResourceUsage().getUsed("").getMemory());
 
     rm.close();
   }


[32/50] [abbrv] hadoop git commit: HADOOP-10318. Incorrect reference to nodeFile in RumenToSLSConverter error message. Contributed by Wei Yan.

Posted by vv...@apache.org.
HADOOP-10318. Incorrect reference to nodeFile in RumenToSLSConverter error message. Contributed by Wei Yan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/355eaaa3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/355eaaa3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/355eaaa3

Branch: refs/heads/YARN-3926
Commit: 355eaaa33d01f06e9efe960b8888fb925e03ffb9
Parents: 9a87f81
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Thu Sep 3 22:48:53 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Thu Sep 3 22:48:53 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/355eaaa3/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index e915290..20474be 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1091,6 +1091,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12362. Set hadoop.tmp.dir and hadoop.log.dir in pom.
     (Charlie Helin via wang)
 
+    HADOOP-10318. Incorrect reference to nodeFile in RumenToSLSConverter
+    error message. (Wei Yan via ozawa)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/355eaaa3/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java
index 63d022b..0d0745c 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java
@@ -110,7 +110,7 @@ public class RumenToSLSConverter {
     if (! nodeFile.getParentFile().exists()
             && ! nodeFile.getParentFile().mkdirs()) {
       System.err.println("ERROR: Cannot create output directory in path: "
-              + jsonFile.getParentFile().getAbsoluteFile());
+              + nodeFile.getParentFile().getAbsoluteFile());
       System.exit(1);
     }
 


[46/50] [abbrv] hadoop git commit: HDFS-8981. Adding revision to data node jmx getVersion() method. (Siqi Li via mingma)

Posted by vv...@apache.org.
HDFS-8981. Adding revision to data node jmx getVersion() method. (Siqi Li via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30db1ada
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30db1ada
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30db1ada

Branch: refs/heads/YARN-3926
Commit: 30db1adac31b07b34ce8e8d426cc139fb8cfad02
Parents: 715b9c6
Author: Ming Ma <mi...@apache.org>
Authored: Fri Sep 4 11:55:58 2015 -0700
Committer: Ming Ma <mi...@apache.org>
Committed: Fri Sep 4 11:55:58 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt          |  3 +++
 .../apache/hadoop/hdfs/server/datanode/DataNode.java | 15 ++++++++++++---
 .../hadoop/hdfs/server/datanode/DataNodeMXBean.java  |  9 ++++++++-
 .../hdfs/server/datanode/TestDataNodeMXBean.java     |  6 ++++++
 4 files changed, 29 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30db1ada/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5226d33..de44324 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -36,6 +36,9 @@ Trunk (Unreleased)
 
     HDFS-8895. Remove deprecated BlockStorageLocation APIs. (wang)
 
+    HDFS-8981. Adding revision to data node jmx getVersion() method. (Siqi Li
+    via mingma)
+
   NEW FEATURES
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30db1ada/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index e0adc6d..f72455d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2900,11 +2900,16 @@ public class DataNode extends ReconfigurableBase
     return NetUtils.createSocketAddr(
         conf.getTrimmed(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT));
   }
-  
+
   @Override // DataNodeMXBean
-  public String getVersion() {
+  public String getSoftwareVersion() {
     return VersionInfo.getVersion();
   }
+
+  @Override // DataNodeMXBean
+  public String getVersion() {
+    return VersionInfo.getVersion() + ", r" + VersionInfo.getRevision();
+  }
   
   @Override // DataNodeMXBean
   public String getRpcPort(){
@@ -2917,7 +2922,11 @@ public class DataNode extends ReconfigurableBase
   public String getHttpPort(){
     return this.getConf().get("dfs.datanode.info.port");
   }
-  
+
+  public String getRevision() {
+    return VersionInfo.getRevision();
+  }
+
   /**
    * @return the datanode's http port
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30db1ada/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
index 92abd88..0119dc8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
@@ -36,7 +36,14 @@ public interface DataNodeMXBean {
    * @return the version of Hadoop
    */
   public String getVersion();
-  
+
+  /**
+   * Get the version of software running on the DataNode
+   *
+   * @return a string representing the version
+   */
+  public String getSoftwareVersion();
+
   /**
    * Gets the rpc port.
    * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30db1ada/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index b461e3a..a7d0a5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -51,6 +51,12 @@ public class TestDataNodeMXBean {
       // get attribute "Version"
       String version = (String)mbs.getAttribute(mxbeanName, "Version");
       Assert.assertEquals(datanode.getVersion(),version);
+      // get attribute "SotfwareVersion"
+      String softwareVersion =
+          (String)mbs.getAttribute(mxbeanName, "SoftwareVersion");
+      Assert.assertEquals(datanode.getSoftwareVersion(),softwareVersion);
+      Assert.assertEquals(version, softwareVersion
+          + ", r" + datanode.getRevision());
       // get attribute "RpcPort"
       String rpcPort = (String)mbs.getAttribute(mxbeanName, "RpcPort");
       Assert.assertEquals(datanode.getRpcPort(),rpcPort);


[47/50] [abbrv] hadoop git commit: HADOOP-12358. Add -safely flag to rm to prompt when deleting many files. Contributed by Xiaoyu Yao.

Posted by vv...@apache.org.
HADOOP-12358. Add -safely flag to rm to prompt when deleting many files. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1feaf6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1feaf6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1feaf6d

Branch: refs/heads/YARN-3926
Commit: e1feaf6db03451068c660a863926032b35a569f8
Parents: 30db1ad
Author: Andrew Wang <wa...@apache.org>
Authored: Fri Sep 4 13:42:55 2015 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Fri Sep 4 13:42:55 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../fs/CommonConfigurationKeysPublic.java       |  6 ++
 .../java/org/apache/hadoop/fs/shell/Delete.java | 61 ++++++++++---
 .../src/main/resources/core-default.xml         | 11 +++
 .../java/org/apache/hadoop/cli/TestCLI.java     |  2 +-
 .../org/apache/hadoop/cli/util/CLICommand.java  |  5 +-
 .../org/apache/hadoop/cli/util/CLITestCmd.java  |  6 +-
 .../src/test/resources/testConf.xml             |  6 +-
 .../org/apache/hadoop/cli/CLITestCmdDFS.java    |  8 +-
 .../java/org/apache/hadoop/cli/TestAclCLI.java  |  2 +-
 .../apache/hadoop/cli/TestCacheAdminCLI.java    |  7 +-
 .../apache/hadoop/cli/TestCryptoAdminCLI.java   |  6 +-
 .../org/apache/hadoop/cli/TestDeleteCLI.java    | 92 ++++++++++++++++++++
 .../java/org/apache/hadoop/cli/TestHDFSCLI.java |  4 +-
 .../org/apache/hadoop/cli/TestXAttrCLI.java     |  2 +-
 .../server/namenode/TestStorageRestore.java     |  3 +-
 .../src/test/resources/testDeleteConf.xml       | 83 ++++++++++++++++++
 .../org/apache/hadoop/cli/CLITestCmdMR.java     |  3 +-
 18 files changed, 275 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 512ca1b..5118747 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -765,6 +765,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12369. Point hadoop-project/pom.xml java.security.krb5.conf
     within target folder. (wang)
 
+    HADOOP-12358. Add -safely flag to rm to prompt when deleting many files.
+    (xyao via wang)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index f3bc2e1..9f053b8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -381,5 +381,11 @@ public class CommonConfigurationKeysPublic {
       "hadoop.shell.missing.defaultFs.warning";
   public static final boolean HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT =
       false;
+
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES =
+      "hadoop.shell.safely.delete.limit.num.files";
+  public static final long HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT =
+      100;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
index 40d9478..ec45d17 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
@@ -25,6 +25,7 @@ import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.fs.PathIsDirectoryException;
@@ -32,9 +33,13 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.util.ToolRunner;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT;
 
 /**
- * Classes that delete paths
+ * Classes that delete paths.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -50,28 +55,36 @@ class Delete {
   /** remove non-directory paths */
   public static class Rm extends FsCommand {
     public static final String NAME = "rm";
-    public static final String USAGE = "[-f] [-r|-R] [-skipTrash] <src> ...";
+    public static final String USAGE = "[-f] [-r|-R] [-skipTrash] " +
+        "[-safely] <src> ...";
     public static final String DESCRIPTION =
-      "Delete all files that match the specified file pattern. " +
-      "Equivalent to the Unix command \"rm <src>\"\n" +
-      "-skipTrash: option bypasses trash, if enabled, and immediately " +
-      "deletes <src>\n" +
-      "-f: If the file does not exist, do not display a diagnostic " +
-      "message or modify the exit status to reflect an error.\n" +
-      "-[rR]:  Recursively deletes directories";
+        "Delete all files that match the specified file pattern. " +
+            "Equivalent to the Unix command \"rm <src>\"\n" +
+            "-f: If the file does not exist, do not display a diagnostic " +
+            "message or modify the exit status to reflect an error.\n" +
+            "-[rR]:  Recursively deletes directories.\n" +
+            "-skipTrash: option bypasses trash, if enabled, and immediately " +
+            "deletes <src>.\n" +
+            "-safely: option requires safety confirmation,if enabled, " +
+            "requires confirmation before deleting large directory with more " +
+            "than <hadoop.shell.delete.limit.num.files> files. Delay is " +
+            "expected when walking over large directory recursively to count " +
+            "the number of files to be deleted before the confirmation.\n";
 
     private boolean skipTrash = false;
     private boolean deleteDirs = false;
     private boolean ignoreFNF = false;
-    
+    private boolean safeDelete = false;
+
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
       CommandFormat cf = new CommandFormat(
-          1, Integer.MAX_VALUE, "f", "r", "R", "skipTrash");
+          1, Integer.MAX_VALUE, "f", "r", "R", "skipTrash", "safely");
       cf.parse(args);
       ignoreFNF = cf.getOpt("f");
       deleteDirs = cf.getOpt("r") || cf.getOpt("R");
       skipTrash = cf.getOpt("skipTrash");
+      safeDelete = cf.getOpt("safely");
     }
 
     @Override
@@ -102,7 +115,7 @@ class Delete {
       // problem (ie. creating the trash dir, moving the item to be deleted,
       // etc), then the path will just be deleted because moveToTrash returns
       // false and it falls thru to fs.delete.  this doesn't seem right
-      if (moveToTrash(item)) {
+      if (moveToTrash(item) || !canBeSafelyDeleted(item)) {
         return;
       }
       if (!item.fs.delete(item.path, deleteDirs)) {
@@ -111,6 +124,28 @@ class Delete {
       out.println("Deleted " + item);
     }
 
+    private boolean canBeSafelyDeleted(PathData item)
+        throws IOException {
+      boolean shouldDelete = true;
+      if (safeDelete) {
+        final long deleteLimit = getConf().getLong(
+            HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES,
+            HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT);
+        if (deleteLimit > 0) {
+          ContentSummary cs = item.fs.getContentSummary(item.path);
+          final long numFiles = cs.getFileCount();
+          if (numFiles > deleteLimit) {
+            if (!ToolRunner.confirmPrompt("Proceed deleting " + numFiles +
+                " files?")) {
+              System.err.println("Delete aborted at user request.\n");
+              shouldDelete = false;
+            }
+          }
+        }
+      }
+      return shouldDelete;
+    }
+
     private boolean moveToTrash(PathData item) throws IOException {
       boolean success = false;
       if (!skipTrash) {
@@ -122,7 +157,7 @@ class Delete {
           String msg = ioe.getMessage();
           if (ioe.getCause() != null) {
             msg += ": " + ioe.getCause().getMessage();
-	  }
+          }
           throw new IOException(msg + ". Consider using -skipTrash option", ioe);
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index b813aa9..410d966 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1962,4 +1962,15 @@ for ldap providers in the same way as above does.
     <name>hadoop.shell.missing.defaultFs.warning</name>
     <value>false</value>
   </property>
+
+  <property>
+    <name>hadoop.shell.safely.delete.limit.num.files</name>
+    <value>100</value>
+    <description>Used by -safely option of hadoop fs shell -rm command to avoid
+      accidental deletion of large directories. When enabled, the -rm command
+      requires confirmation if the number of files to be deleted is greater than
+      this limit.  The default limit is 100 files. The warning is disabled if
+      the limit is 0 or the -safely is not specified in -rm command.
+    </description>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java
index c350388..e1514ff 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java
@@ -42,7 +42,7 @@ public class TestCLI extends CLITestHelper {
 
   @Override
   protected CommandExecutor.Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor("").executeCommand(cmd.getCmd());
+    return cmd.getExecutor("", conf).executeCommand(cmd.getCmd());
 
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java
index 50cb3a5..8823f5c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java
@@ -17,11 +17,14 @@
  */
 package org.apache.hadoop.cli.util;
 
+import org.apache.hadoop.conf.Configuration;
+
 /**
  * This interface is to generalize types of test command for upstream projects
  */
 public interface CLICommand {
-  public CommandExecutor getExecutor(String tag) throws IllegalArgumentException;
+  public CommandExecutor getExecutor(String tag, Configuration conf)
+      throws IllegalArgumentException;
   public CLICommandTypes getType();
   public String getCmd();
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java
index 602a07f..d912fad 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.cli.util;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FsShell;
 
 /**
@@ -32,9 +33,10 @@ public class CLITestCmd implements CLICommand {
   }
 
   @Override
-  public CommandExecutor getExecutor(String tag) throws IllegalArgumentException {
+  public CommandExecutor getExecutor(String tag, Configuration conf)
+      throws IllegalArgumentException {
     if (getType() instanceof CLICommandFS)
-      return new FSCmdExecutor(tag, new FsShell());
+      return new FSCmdExecutor(tag, new FsShell(conf));
     throw new
         IllegalArgumentException("Unknown type of test command: " + getType());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index 62afff2..aa9ee76 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -391,7 +391,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rm \[-f\] \[-r\|-R\] \[-skipTrash\] &lt;src&gt; \.\.\. :\s*</expected-output>
+          <expected-output>^-rm \[-f\] \[-r\|-R\] \[-skipTrash\] \[-safely\] &lt;src&gt; \.\.\. :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -403,7 +403,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*-skipTrash\s+option bypasses trash, if enabled, and immediately deletes &lt;src&gt;( )*</expected-output>
+          <expected-output>^\s*-skipTrash\s+option bypasses trash, if enabled, and immediately deletes &lt;src&gt;\.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -415,7 +415,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s+-\[rR\]\s+Recursively deletes directories\s*</expected-output>
+          <expected-output>^\s+-\[rR\]\s+Recursively deletes directories\.\s*</expected-output>
         </comparator>
       </comparators>
     </test>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java
index 89932cc..992e8fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.cli.util.CLICommandTypes;
 import org.apache.hadoop.cli.util.CLITestCmd;
 import org.apache.hadoop.cli.util.CommandExecutor;
 import org.apache.hadoop.cli.util.FSCmdExecutor;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 
 public class CLITestCmdDFS extends CLITestCmd {
@@ -30,9 +31,10 @@ public class CLITestCmdDFS extends CLITestCmd {
   }
 
   @Override
-  public CommandExecutor getExecutor(String tag) throws IllegalArgumentException {
+  public CommandExecutor getExecutor(String tag, Configuration conf)
+      throws IllegalArgumentException {
     if (getType() instanceof CLICommandDFSAdmin)
-      return new FSCmdExecutor(tag, new DFSAdmin());
-    return super.getExecutor(tag);
+      return new FSCmdExecutor(tag, new DFSAdmin(conf));
+    return super.getExecutor(tag, conf);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
index 02207e6..a6f8651 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
@@ -73,7 +73,7 @@ public class TestAclCLI extends CLITestHelperDFS {
 
   @Override
   protected Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
+    return cmd.getExecutor(namenode, conf).executeCommand(cmd.getCmd());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
index f25c4fe..ddb11b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.cli.util.CLITestCmd;
 import org.apache.hadoop.cli.util.CacheAdminCmdExecutor;
 import org.apache.hadoop.cli.util.CommandExecutor;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -119,18 +120,18 @@ public class TestCacheAdminCLI extends CLITestHelper {
     }
 
     @Override
-    public CommandExecutor getExecutor(String tag)
+    public CommandExecutor getExecutor(String tag, Configuration conf)
         throws IllegalArgumentException {
       if (getType() instanceof CLICommandCacheAdmin) {
         return new CacheAdminCmdExecutor(tag, new CacheAdmin(conf));
       }
-      return super.getExecutor(tag);
+      return super.getExecutor(tag, conf);
     }
   }
 
   @Override
   protected Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor("").executeCommand(cmd.getCmd());
+    return cmd.getExecutor("", conf).executeCommand(cmd.getCmd());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
index 1c870a2..44e662f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
@@ -149,18 +149,18 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
     }
 
     @Override
-    public CommandExecutor getExecutor(String tag)
+    public CommandExecutor getExecutor(String tag, Configuration conf)
         throws IllegalArgumentException {
       if (getType() instanceof CLICommandCryptoAdmin) {
         return new CryptoAdminCmdExecutor(tag, new CryptoAdmin(conf));
       }
-      return super.getExecutor(tag);
+      return super.getExecutor(tag, conf);
     }
   }
 
   @Override
   protected Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
+    return cmd.getExecutor(namenode, conf).executeCommand(cmd.getCmd());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
new file mode 100644
index 0000000..04328f5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.cli;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.cli.util.CLICommand;
+import org.apache.hadoop.cli.util.CommandExecutor.Result;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestDeleteCLI extends CLITestHelperDFS {
+  protected MiniDFSCluster dfsCluster = null;
+  protected FileSystem fs = null;
+  protected String namenode = null;
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+    conf.setLong(CommonConfigurationKeysPublic.
+        HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES, 5);
+
+    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    dfsCluster.waitClusterUp();
+    namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
+
+    fs = dfsCluster.getFileSystem();
+    assertTrue("Not an HDFS: " + fs.getUri(),
+        fs instanceof DistributedFileSystem);
+  }
+
+  @After
+  @Override
+  public void tearDown() throws Exception {
+    if (fs != null) {
+      fs.close();
+    }
+    if (dfsCluster != null) {
+      dfsCluster.shutdown();
+    }
+    Thread.sleep(2000);
+    super.tearDown();
+  }
+
+  @Override
+  protected String getTestFile() {
+    return "testDeleteConf.xml";
+  }
+
+  @Override
+  protected String expandCommand(final String cmd) {
+    String expCmd = cmd;
+    expCmd = expCmd.replaceAll("NAMENODE", namenode);
+    expCmd = super.expandCommand(expCmd);
+    return expCmd;
+  }
+
+  @Override
+  protected Result execute(CLICommand cmd) throws Exception {
+    return cmd.getExecutor(namenode, conf).executeCommand(cmd.getCmd());
+  }
+
+  @Test
+  @Override
+  public void testAll () {
+    super.testAll();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
index c348348..3630726 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
@@ -47,7 +47,7 @@ public class TestHDFSCLI extends CLITestHelperDFS {
     
     // Many of the tests expect a replication value of 1 in the output
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
-    
+
     // Build racks and hosts configuration to test dfsAdmin -printTopology
     String [] racks =  {"/rack1", "/rack1", "/rack2", "/rack2",
                         "/rack2", "/rack3", "/rack4", "/rack4" };
@@ -95,7 +95,7 @@ public class TestHDFSCLI extends CLITestHelperDFS {
   
   @Override
   protected Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
+    return cmd.getExecutor(namenode, conf).executeCommand(cmd.getCmd());
   }
   
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
index ce107ef..45c7909 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
@@ -87,7 +87,7 @@ public class TestXAttrCLI  extends CLITestHelperDFS {
   
   @Override
   protected Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
+    return cmd.getExecutor(namenode, conf).executeCommand(cmd.getCmd());
   }
   
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
index 1a612e8..6f4546d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
@@ -275,7 +275,8 @@ public class TestStorageRestore {
       String cmd = "-fs NAMENODE -restoreFailedStorage false";
       String namenode = config.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
       CommandExecutor executor =
-          new CLITestCmdDFS(cmd, new CLICommandDFSAdmin()).getExecutor(namenode);
+          new CLITestCmdDFS(cmd,
+              new CLICommandDFSAdmin()).getExecutor(namenode, config);
 
       executor.executeCommand(cmd);
       restore = fsi.getStorage().getRestoreFailedStorage();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testDeleteConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testDeleteConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testDeleteConf.xml
new file mode 100644
index 0000000..8701983
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testDeleteConf.xml
@@ -0,0 +1,83 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="testConf.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+    <!-- Normal mode is test. To run just the commands and dump the output
+         to the log, set it to nocompare -->
+    <mode>test</mode>
+
+    <!--  Comparator types:
+             ExactComparator
+             SubstringComparator
+             RegexpComparator
+             TokenComparator
+             -->
+    <tests>
+        <test>  <!-- TESTED -->
+            <description>rm -r directory that meet warning criteria when -safely is not used</description>
+            <test-commands>
+                <command>-fs NAMENODE -mkdir /dir0</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /dir0/data15bytes</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /dir0/data30bytes</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data60bytes /dir0/data60bytes</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data120bytes /dir0/data120bytes</command>
+                <command>-fs NAMENODE -mkdir /dir0/dir00</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /dir0/dir00/data15bytes</command>
+                <command>-fs NAMENODE -mkdir /dir0/dir01</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /dir0/dir01/data30bytes</command>
+                <command>-fs NAMENODE -ls /dir0</command>
+                <command>-fs NAMENODE -rm -r /dir0</command>
+            </test-commands>
+            <cleanup-commands>
+                <command>-fs NAMENODE -rm -r /dir0</command>
+            </cleanup-commands>
+            <comparators>
+                <comparator>
+                    <type>RegexpComparator</type>
+                    <expected-output>Deleted /dir0</expected-output>
+                </comparator>
+            </comparators>
+        </test>
+        <test>  <!-- TESTED -->
+            <description>rm -r directory that does not meet warning criteria when -safely is used</description>
+            <test-commands>
+                <command>-fs NAMENODE -mkdir /dir0</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /dir0/data15bytes</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /dir0/data30bytes</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data60bytes /dir0/data60bytes</command>
+                <command>-fs NAMENODE -mkdir /dir0/dir00</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /dir0/dir00/data15bytes</command>
+                <command>-fs NAMENODE -mkdir /dir0/dir01</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /dir0/dir01/data30bytes</command>
+                <command>-fs NAMENODE -ls /dir0</command>
+                <command>-fs NAMENODE -rm -r -safely /dir0</command>
+            </test-commands>
+            <cleanup-commands>
+                <command>-fs NAMENODE -rm -r /dir0</command>
+            </cleanup-commands>
+            <comparators>
+                <comparator>
+                    <type>RegexpComparator</type>
+                    <expected-output>Deleted /dir0</expected-output>
+                </comparator>
+            </comparators>
+        </test>
+    </tests>
+</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1feaf6d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/cli/CLITestCmdMR.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/cli/CLITestCmdMR.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/cli/CLITestCmdMR.java
index f9bc943..f4ab3dc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/cli/CLITestCmdMR.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/cli/CLITestCmdMR.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.cli;
 import org.apache.hadoop.cli.util.CLICommandTypes;
 import org.apache.hadoop.cli.util.CLITestCmd;
 import org.apache.hadoop.cli.util.CommandExecutor;
+import org.apache.hadoop.conf.Configuration;
 
 public class CLITestCmdMR extends CLITestCmd {
   public CLITestCmdMR(String str, CLICommandTypes type) {
@@ -34,7 +35,7 @@ public class CLITestCmdMR extends CLITestCmd {
    * of the test method.
    */
   @Override
-  public CommandExecutor getExecutor(String tag)
+  public CommandExecutor getExecutor(String tag, Configuration conf)
       throws IllegalArgumentException {
     throw new IllegalArgumentException("Method isn't supported");
   }


[24/50] [abbrv] hadoop git commit: HDFS-2070. Add more unit tests for FsShell getmerge (Daniel Templeton via Colin P. McCabe)

Posted by vv...@apache.org.
HDFS-2070. Add more unit tests for FsShell getmerge (Daniel Templeton via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c006c3a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c006c3a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c006c3a1

Branch: refs/heads/YARN-3926
Commit: c006c3a1e6d3631c5724e1eb0a0b620d9f7c8dcc
Parents: 7d6687f
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Wed Sep 2 12:36:14 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Wed Sep 2 12:36:14 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../src/test/resources/testHDFSConf.xml         | 142 ++++++++++++++++---
 2 files changed, 123 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c006c3a1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 14a9248..0f2d713 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -874,6 +874,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8965. Harden edit log reading code against out of memory errors (cmccabe)
 
+    HDFS-2070. Add more unit tests for FsShell getmerge (Daniel Templeton via
+    Colin P. McCabe)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c006c3a1/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 4e25d09..18c68ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -5268,7 +5268,126 @@
     </test>
     
     <!-- Tests for getmerge -->
-    <!-- Manual Testing -->
+    <test> <!-- TESTED -->
+      <description>getmerge: basic</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -getmerge /user/USERNAME/dir1 CLITEST_DATA/file</command>
+        <command>-cat CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME CLITEST_DATA/file</command>
+        <command>rm data</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output>12345678901234
+12345678901234
+12345678901234
+</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>getmerge: with file</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -getmerge /user/USERNAME/dir1/data15bytes CLITEST_DATA/file</command>
+        <command>-cat CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME CLITEST_DATA/file</command>
+        <command>rm data</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output>12345678901234
+</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>getmerge: with multiple files</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -getmerge /user/USERNAME/dir1/data15bytes /user/USERNAME/dir1/data30bytes CLITEST_DATA/file</command>
+        <command>-cat CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME CLITEST_DATA/file</command>
+        <command>rm data</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output>12345678901234
+12345678901234
+12345678901234
+</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>getmerge: with newlines</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -getmerge -nl /user/USERNAME/dir1 CLITEST_DATA/file</command>
+        <command>-cat CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME CLITEST_DATA/file</command>
+        <command>rm data</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output>12345678901234
+
+12345678901234
+12345678901234
+
+</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>getmerge: with multiple files and newlines</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -getmerge -nl /user/USERNAME/dir1/data15bytes /user/USERNAME/dir1/data30bytes CLITEST_DATA/file</command>
+        <command>-cat CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME CLITEST_DATA/file</command>
+        <command>rm data</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output>12345678901234
+
+12345678901234
+12345678901234
+
+</expected-output>
+        </comparator>
+      </comparators>
+    </test>
 
     <!-- Tests for cat -->
     <test> <!-- TESTED -->
@@ -16515,27 +16634,6 @@
       </comparators>
     </test>
 
-    <test>
-      <description>getmerge</description>
-      <test-commands>
-        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
-        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /user/USERNAME/dir1</command>
-        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /user/USERNAME/dir1</command>
-        <command>-fs NAMENODE -getmerge /user/USERNAME/dir1 CLITEST_DATA/file</command>
-        <command>-cat CLITEST_DATA/file</command>
-      </test-commands>
-      <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user/USERNAME CLITEST_DATA/file</command>
-        <command>rm data</command>
-      </cleanup-commands>
-      <comparators>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>12345678901234.*</expected-output>
-        </comparator>
-      </comparators>
-    </test>
-
     <!-- Tests for snapshots -->
     <test>
       <description>allowSnapshot</description>


[41/50] [abbrv] hadoop git commit: YARN-4103. RM WebServices missing scheme for appattempts logLinks. Contributed by Jonathan Eagles.

Posted by vv...@apache.org.
YARN-4103. RM WebServices missing scheme for appattempts logLinks. Contributed by Jonathan Eagles.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40d222e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40d222e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40d222e8

Branch: refs/heads/YARN-3926
Commit: 40d222e862063dc6c474cc6e8de0dce6c4395012
Parents: c83d13c
Author: Varun Vasudev <vv...@apache.org>
Authored: Thu Sep 3 23:58:26 2015 +0530
Committer: Varun Vasudev <vv...@apache.org>
Committed: Fri Sep 4 14:31:51 2015 +0530

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                           |  3 +++
 .../yarn/server/resourcemanager/webapp/RMAppBlock.java    |  3 ++-
 .../yarn/server/resourcemanager/webapp/RMWebServices.java |  7 ++++---
 .../server/resourcemanager/webapp/dao/AppAttemptInfo.java | 10 +++++-----
 .../resourcemanager/webapp/TestRMWebServicesApps.java     |  2 +-
 5 files changed, 15 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40d222e8/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4e9b411..29eabb5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -877,6 +877,9 @@ Release 2.7.2 - UNRELEASED
     YARN-4101. RM should print alert messages if Zookeeper and Resourcemanager
     gets connection issue. (Xuan Gong via jianhe)
 
+    YARN-4103. RM WebServices missing scheme for appattempts logLinks.
+    (Jonathan Eagles via vvasudeb)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40d222e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
index 38e0e3b..eec1fad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
@@ -130,7 +130,8 @@ public class RMAppBlock extends AppBlock{
         continue;
       }
       AppAttemptInfo attemptInfo =
-          new AppAttemptInfo(this.rm, rmAppAttempt, rmApp.getUser());
+          new AppAttemptInfo(this.rm, rmAppAttempt, rmApp.getUser(),
+              WebAppUtils.getHttpSchemePrefix(conf));
       String blacklistedNodesCount = "N/A";
       Set<String> nodes =
           RMAppAttemptBlock.getBlacklistedNodes(rm,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40d222e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 8e11e2a..2410053 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -696,7 +696,8 @@ public class RMWebServices {
   @GET
   @Path("/apps/{appid}/appattempts")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
-  public AppAttemptsInfo getAppAttempts(@PathParam("appid") String appId) {
+  public AppAttemptsInfo getAppAttempts(@Context HttpServletRequest hsr,
+      @PathParam("appid") String appId) {
 
     init();
     if (appId == null || appId.isEmpty()) {
@@ -714,8 +715,8 @@ public class RMWebServices {
 
     AppAttemptsInfo appAttemptsInfo = new AppAttemptsInfo();
     for (RMAppAttempt attempt : app.getAppAttempts().values()) {
-      AppAttemptInfo attemptInfo =
-          new AppAttemptInfo(rm, attempt, app.getUser());
+      AppAttemptInfo attemptInfo = new AppAttemptInfo(rm, attempt,
+          app.getUser(), hsr.getScheme() + "://");
       appAttemptsInfo.add(attemptInfo);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40d222e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
index 2c7bbd0..b6e95a6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMAppAttemptBlock;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 
@@ -46,7 +45,8 @@ public class AppAttemptInfo {
   public AppAttemptInfo() {
   }
 
-  public AppAttemptInfo(ResourceManager rm, RMAppAttempt attempt, String user) {
+  public AppAttemptInfo(ResourceManager rm, RMAppAttempt attempt, String user,
+      String schemePrefix) {
     this.startTime = 0;
     this.containerId = "";
     this.nodeHttpAddress = "";
@@ -61,9 +61,9 @@ public class AppAttemptInfo {
         this.containerId = masterContainer.getId().toString();
         this.nodeHttpAddress = masterContainer.getNodeHttpAddress();
         this.nodeId = masterContainer.getNodeId().toString();
-        this.logsLink =
-            WebAppUtils.getRunningLogURL("//" + masterContainer.getNodeHttpAddress(),
-                ConverterUtils.toString(masterContainer.getId()), user);
+        this.logsLink = WebAppUtils.getRunningLogURL(schemePrefix
+            + masterContainer.getNodeHttpAddress(),
+            ConverterUtils.toString(masterContainer.getId()), user);
         if (rm.getResourceScheduler() instanceof AbstractYarnScheduler) {
           AbstractYarnScheduler ayScheduler =
               (AbstractYarnScheduler) rm.getResourceScheduler();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40d222e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 919bb1a..a784295 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -1635,7 +1635,7 @@ public class TestRMWebServicesApps extends JerseyTestBase {
         .getMasterContainer().getNodeHttpAddress(), nodeHttpAddress);
     WebServicesTestUtils.checkStringMatch("nodeId", appAttempt
         .getMasterContainer().getNodeId().toString(), nodeId);
-    assertTrue("logsLink doesn't match", logsLink.startsWith("//"));
+    assertTrue("logsLink doesn't match ", logsLink.startsWith("http://"));
     assertTrue(
         "logsLink doesn't contain user info", logsLink.endsWith("/"
         + user));


[33/50] [abbrv] hadoop git commit: HADOOP-12213. Interrupted exception can occur when Client#stop is called. Contributed by Kuhu Shukla.

Posted by vv...@apache.org.
HADOOP-12213. Interrupted exception can occur when Client#stop is called. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ebc6581
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ebc6581
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ebc6581

Branch: refs/heads/YARN-3926
Commit: 0ebc658105336cfe3e1a248b411de60f1d380928
Parents: 355eaaa
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Thu Sep 3 23:32:42 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Thu Sep 3 23:32:42 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../main/java/org/apache/hadoop/ipc/Client.java |  9 +++--
 .../java/org/apache/hadoop/ipc/TestIPC.java     | 38 +++++++++++++++++++-
 3 files changed, 47 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ebc6581/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 20474be..512ca1b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1132,6 +1132,9 @@ Release 2.7.2 - UNRELEASED
     HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be
     closed in finally block. (Kiran Kumar M R and Sanghyun Yun via ozawa)
 
+    HADOOP-12213. Interrupted exception can occur when Client#stop is called.
+    (Kuhu Shukla via ozawa)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ebc6581/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 6996a51..9087e5c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -197,9 +197,10 @@ public class Client {
             clientExecutor.shutdownNow();
           }
         } catch (InterruptedException e) {
-          LOG.error("Interrupted while waiting for clientExecutor" +
-              "to stop", e);
+          LOG.warn("Interrupted while waiting for clientExecutor" +
+              " to stop");
           clientExecutor.shutdownNow();
+          Thread.currentThread().interrupt();
         }
         clientExecutor = null;
       }
@@ -256,6 +257,10 @@ public class Client {
     conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, timeout);
   }
 
+  @VisibleForTesting
+  public static final ExecutorService getClientExecutor() {
+    return Client.clientExcecutorFactory.clientExecutor;
+  }
   /**
    * Increment this client's reference count
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ebc6581/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 08508ae..4e2e2f1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -304,6 +304,8 @@ public class TestIPC {
       String causeText=cause.getMessage();
       assertTrue("Did not find " + causeText + " in " + message,
               message.contains(causeText));
+    } finally {
+      client.stop();
     }
   }
   
@@ -416,6 +418,7 @@ public class TestIPC {
       client.call(param, addr, null, null, 0, conf);
       
     } finally {
+      client.stop();
       server.stop();
     }
   }
@@ -531,6 +534,8 @@ public class TestIPC {
       fail("Expected an exception to have been thrown");
     } catch (IOException e) {
       assertTrue(e.getMessage().contains("Injected fault"));
+    } finally {
+      client.stop();
     }
   }
 
@@ -556,11 +561,11 @@ public class TestIPC {
     }).when(spyFactory).createSocket();
       
     Server server = new TestServer(1, true);
+    Client client = new Client(LongWritable.class, conf, spyFactory);
     server.start();
     try {
       // Call should fail due to injected exception.
       InetSocketAddress address = NetUtils.getConnectAddress(server);
-      Client client = new Client(LongWritable.class, conf, spyFactory);
       try {
         client.call(new LongWritable(RANDOM.nextLong()),
                 address, null, null, 0, conf);
@@ -577,6 +582,7 @@ public class TestIPC {
       client.call(new LongWritable(RANDOM.nextLong()),
           address, null, null, 0, conf);
     } finally {
+      client.stop();
       server.stop();
     }
   }
@@ -601,6 +607,7 @@ public class TestIPC {
     // set timeout to be bigger than 3*ping interval
     client.call(new LongWritable(RANDOM.nextLong()),
         addr, null, null, 3*PING_INTERVAL+MIN_SLEEP_TIME, conf);
+    client.stop();
   }
 
   @Test(timeout=60000)
@@ -621,6 +628,7 @@ public class TestIPC {
     } catch (SocketTimeoutException e) {
       LOG.info("Get a SocketTimeoutException ", e);
     }
+    client.stop();
   }
   
   /**
@@ -851,6 +859,8 @@ public class TestIPC {
             } catch (IOException e) {
               LOG.error(e);
             } catch (InterruptedException e) {
+            } finally {
+              client.stop();
             }
           }
         });
@@ -952,6 +962,31 @@ public class TestIPC {
         endFds - startFds < 20);
   }
   
+  /**
+   * Check if Client is interrupted after handling
+   * InterruptedException during cleanup
+   */
+  @Test(timeout=30000)
+  public void testInterrupted() {
+    Client client = new Client(LongWritable.class, conf);
+    client.getClientExecutor().submit(new Runnable() {
+      public void run() {
+        while(true);
+      }
+    });
+    Thread.currentThread().interrupt();
+    client.stop();
+    try {
+      assertTrue(Thread.currentThread().isInterrupted());
+      LOG.info("Expected thread interrupt during client cleanup");
+    } catch (AssertionError e) {
+      LOG.error("The Client did not interrupt after handling an Interrupted Exception");
+      Assert.fail("The Client did not interrupt after handling an Interrupted Exception");
+    }
+    // Clear Thread interrupt
+    Thread.currentThread().interrupted();
+  }
+
   private long countOpenFileDescriptors() {
     return FD_DIR.list().length;
   }
@@ -1315,6 +1350,7 @@ public class TestIPC {
       Mockito.verify(mockFactory, Mockito.times(maxTimeoutRetries))
           .createSocket();
     }
+    client.stop();
   }
   
   private void doIpcVersionTest(


[44/50] [abbrv] hadoop git commit: HDFS-8384. Allow NN to startup if there are files having a lease but are not under construction. Contributed by Jing Zhao.

Posted by vv...@apache.org.
HDFS-8384. Allow NN to startup if there are files having a lease but are not under construction. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8928729c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8928729c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8928729c

Branch: refs/heads/YARN-3926
Commit: 8928729c80af0a154524e06fb13ed9b191986a78
Parents: d16c4ee
Author: Jing Zhao <ji...@apache.org>
Authored: Fri Sep 4 11:42:22 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Fri Sep 4 11:42:22 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                    | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java   | 5 ++++-
 .../org/apache/hadoop/hdfs/server/namenode/LeaseManager.java   | 6 +++++-
 3 files changed, 12 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8928729c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e67c9d5..b1e53da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -891,6 +891,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9012. Move o.a.h.hdfs.protocol.datatransfer.PipelineAck class to
     hadoop-hdfs-client module. (Mingliang Liu via wheat9)
 
+    HDFS-8384. Allow NN to startup if there are files having a lease but are not
+    under construction. (jing9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8928729c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index adcb1d6..25b6928 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3184,7 +3184,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     assert hasWriteLock();
 
     FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
-    Preconditions.checkArgument(uc != null);
+    if (uc == null) {
+      throw new IOException("Cannot finalize file " + src
+          + " because it is not under construction");
+    }
     leaseManager.removeLease(uc.getClientName(), pendingFile);
 
     pendingFile.recordModification(latestSnapshot);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8928729c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index 1a1edaf..7cd6f3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -108,7 +108,11 @@ public class LeaseManager {
     long numUCBlocks = 0;
     for (Long id : getINodeIdWithLeases()) {
       final INodeFile cons = fsnamesystem.getFSDirectory().getInode(id).asFile();
-      Preconditions.checkState(cons.isUnderConstruction());
+      if (!cons.isUnderConstruction()) {
+        LOG.warn("The file " + cons.getFullPathName()
+            + " is not under construction but has lease.");
+        continue;
+      }
       BlockInfo[] blocks = cons.getBlocks();
       if(blocks == null) {
         continue;


[23/50] [abbrv] hadoop git commit: YARN-3893. Both RM in active state when Admin#transitionToActive failure from refeshAll() (Bibin A Chundatt via rohithsharmaks)

Posted by vv...@apache.org.
YARN-3893. Both RM in active state when Admin#transitionToActive failure from refeshAll() (Bibin A Chundatt via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d6687fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d6687fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d6687fe

Branch: refs/heads/YARN-3926
Commit: 7d6687fe76f6152a577ff2298c358dd30fce41fb
Parents: 095ab9a
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Wed Sep 2 15:22:48 2015 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Wed Sep 2 15:22:48 2015 +0530

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../server/resourcemanager/AdminService.java    | 20 ++++--
 .../resourcemanager/RMFatalEventType.java       |  5 +-
 .../yarn/server/resourcemanager/TestRMHA.java   | 71 ++++++++++++++++++++
 4 files changed, 94 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d6687fe/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index aade2d7..13fe9b0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -868,6 +868,9 @@ Release 2.7.2 - UNRELEASED
     YARN-3857: Memory leak in ResourceManager with SIMPLE mode.
     (mujunchao via zxu)
 
+    YARN-3893. Both RM in active state when Admin#transitionToActive failure 
+    from refeshAll() (Bibin A Chundatt via rohithsharmaks)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d6687fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 20c9800..d96ed8c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -297,6 +297,7 @@ public class AdminService extends CompositeService implements
     }
   }
 
+  @SuppressWarnings("unchecked")
   @Override
   public synchronized void transitionToActive(
       HAServiceProtocol.StateChangeRequestInfo reqInfo) throws IOException {
@@ -312,10 +313,6 @@ public class AdminService extends CompositeService implements
     checkHaStateChange(reqInfo);
     try {
       rm.transitionToActive();
-      // call all refresh*s for active RM to get the updated configurations.
-      refreshAll();
-      RMAuditLogger.logSuccess(user.getShortUserName(),
-          "transitionToActive", "RMHAProtocolService");
     } catch (Exception e) {
       RMAuditLogger.logFailure(user.getShortUserName(), "transitionToActive",
           "", "RMHAProtocolService",
@@ -323,6 +320,21 @@ public class AdminService extends CompositeService implements
       throw new ServiceFailedException(
           "Error when transitioning to Active mode", e);
     }
+    try {
+      // call all refresh*s for active RM to get the updated configurations.
+      refreshAll();
+    } catch (Exception e) {
+      LOG.error("RefreshAll failed so firing fatal event", e);
+      rmContext
+          .getDispatcher()
+          .getEventHandler()
+          .handle(
+          new RMFatalEvent(RMFatalEventType.TRANSITION_TO_ACTIVE_FAILED, e));
+      throw new ServiceFailedException(
+          "Error on refreshAll during transistion to Active", e);
+    }
+    RMAuditLogger.logSuccess(user.getShortUserName(), "transitionToActive",
+        "RMHAProtocolService");
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d6687fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java
index 789c018..87cc496 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java
@@ -26,5 +26,8 @@ public enum RMFatalEventType {
   STATE_STORE_OP_FAILED,
 
   // Source <- Embedded Elector
-  EMBEDDED_ELECTOR_FAILED
+  EMBEDDED_ELECTOR_FAILED,
+
+  // Source <- Admin Service
+  TRANSITION_TO_ACTIVE_FAILED
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d6687fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
index 0200e85..62cfe84 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
@@ -43,6 +43,8 @@ import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
+import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
@@ -52,6 +54,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.Assert;
@@ -577,6 +580,56 @@ public class TestRMHA {
     assertEquals(0, rm.getRMContext().getRMApps().size());
   }
 
+  @Test(timeout = 90000)
+  public void testTransitionedToActiveRefreshFail() throws Exception {
+    configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
+    YarnConfiguration conf = new YarnConfiguration(configuration);
+    configuration = new CapacitySchedulerConfiguration(conf);
+    rm = new MockRM(configuration) {
+      @Override
+      protected AdminService createAdminService() {
+        return new AdminService(this, getRMContext()) {
+          @Override
+          protected void setConfig(Configuration conf) {
+            super.setConfig(configuration);
+          }
+        };
+      }
+
+      @Override
+      protected Dispatcher createDispatcher() {
+        return new FailFastDispatcher();
+      }
+    };
+
+    rm.init(configuration);
+    rm.start();
+    final StateChangeRequestInfo requestInfo =
+        new StateChangeRequestInfo(
+            HAServiceProtocol.RequestSource.REQUEST_BY_USER);
+
+    configuration.set("yarn.scheduler.capacity.root.default.capacity", "100");
+    rm.adminService.transitionToStandby(requestInfo);
+    assertEquals(HAServiceState.STANDBY, rm.getRMContext().getHAServiceState());
+    configuration.set("yarn.scheduler.capacity.root.default.capacity", "200");
+    try {
+      rm.adminService.transitionToActive(requestInfo);
+    } catch (Exception e) {
+      assertTrue("Error on refreshAll during transistion to Active".contains(e
+          .getMessage()));
+    }
+    FailFastDispatcher dispatcher =
+        ((FailFastDispatcher) rm.rmContext.getDispatcher());
+    dispatcher.await();
+    assertEquals(1, dispatcher.getEventCount());
+    // Making correct conf and check the state
+    configuration.set("yarn.scheduler.capacity.root.default.capacity", "100");
+    rm.adminService.transitionToActive(requestInfo);
+    assertEquals(HAServiceState.ACTIVE, rm.getRMContext().getHAServiceState());
+    rm.adminService.transitionToStandby(requestInfo);
+    assertEquals(HAServiceState.STANDBY, rm.getRMContext().getHAServiceState());
+  }
+
   public void innerTestHAWithRMHostName(boolean includeBindHost) {
     //this is run two times, with and without a bind host configured
     if (includeBindHost) {
@@ -713,4 +766,22 @@ public class TestRMHA {
       return this.stopped;
     }
   }
+
+  class FailFastDispatcher extends DrainDispatcher {
+    int eventreceived = 0;
+
+    @SuppressWarnings("rawtypes")
+    @Override
+    protected void dispatch(Event event) {
+      if (event.getType() == RMFatalEventType.TRANSITION_TO_ACTIVE_FAILED) {
+        eventreceived++;
+      } else {
+        super.dispatch(event);
+      }
+    }
+
+    public int getEventCount() {
+      return eventreceived;
+    }
+  }
 }


[19/50] [abbrv] hadoop git commit: HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. (Kihwal Lee via yliu)

Posted by vv...@apache.org.
HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. (Kihwal Lee via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5652131d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5652131d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5652131d

Branch: refs/heads/YARN-3926
Commit: 5652131d2ea68c408dd3cd8bee31723642a8cdde
Parents: 4620767
Author: yliu <yl...@apache.org>
Authored: Wed Sep 2 08:58:51 2015 +0800
Committer: yliu <yl...@apache.org>
Committed: Wed Sep 2 08:58:51 2015 +0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                   | 3 +++
 .../apache/hadoop/hdfs/server/datanode/BPOfferService.java    | 3 +--
 .../apache/hadoop/hdfs/server/datanode/BPServiceActor.java    | 7 ++++---
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 5 +----
 4 files changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5652131d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 57ddcb2..ea398f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1302,6 +1302,9 @@ Release 2.7.2 - UNRELEASED
 
     HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
 
+    HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect.
+    (Kihwal Lee via yliu)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5652131d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 92323f1..7aab4f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -351,9 +351,8 @@ class BPOfferService {
             reg.getStorageInfo().getNamespaceID(), "namespace ID");
         checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
             reg.getStorageInfo().getClusterID(), "cluster ID");
-      } else {
-        bpRegistration = reg;
       }
+      bpRegistration = reg;
 
       dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
       // Add the initial block token secret keys to the DN's secret manager.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5652131d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 1817427..85ea6ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -767,15 +767,16 @@ class BPServiceActor implements Runnable {
   void register(NamespaceInfo nsInfo) throws IOException {
     // The handshake() phase loaded the block pool storage
     // off disk - so update the bpRegistration object from that info
-    bpRegistration = bpos.createRegistration();
+    DatanodeRegistration newBpRegistration = bpos.createRegistration();
 
     LOG.info(this + " beginning handshake with NN");
 
     while (shouldRun()) {
       try {
         // Use returned registration from namenode with updated fields
-        bpRegistration = bpNamenode.registerDatanode(bpRegistration);
-        bpRegistration.setNamespaceInfo(nsInfo);
+        newBpRegistration = bpNamenode.registerDatanode(newBpRegistration);
+        newBpRegistration.setNamespaceInfo(nsInfo);
+        bpRegistration = newBpRegistration;
         break;
       } catch(EOFException e) {  // namenode might have just restarted
         LOG.info("Problem connecting to server: " + nnAddr + " :"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5652131d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 5bc50b0..42cbd96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1261,10 +1261,7 @@ public class DataNode extends ReconfigurableBase
    */
   synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration,
       String blockPoolId) throws IOException {
-    // Set the ID if we haven't already
-    if (null == id) {
-      id = bpRegistration;
-    }
+    id = bpRegistration;
 
     if(!storage.getDatanodeUuid().equals(bpRegistration.getDatanodeUuid())) {
       throw new IOException("Inconsistent Datanode IDs. Name-node returned "


[14/50] [abbrv] hadoop git commit: YARN-2801. Add documentation for node labels feature. Contributed by Wangda Tan and Naganarasimha G R.

Posted by vv...@apache.org.
YARN-2801. Add documentation for node labels feature. Contributed by Wangda Tan and Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/faa38e1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/faa38e1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/faa38e1a

Branch: refs/heads/YARN-3926
Commit: faa38e1aa49907254bf981662a8aeb5dc52e75e3
Parents: bf669b6
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Tue Sep 1 17:54:14 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Tue Sep 1 17:54:49 2015 +0900

----------------------------------------------------------------------
 hadoop-project/src/site/site.xml                |   1 +
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../src/site/markdown/NodeLabel.md              | 140 +++++++++++++++++++
 3 files changed, 144 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/faa38e1a/hadoop-project/src/site/site.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index ee0dfcd..272ea7b 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -121,6 +121,7 @@
       <item name="Fair Scheduler" href="hadoop-yarn/hadoop-yarn-site/FairScheduler.html"/>
       <item name="ResourceManager Restart" href="hadoop-yarn/hadoop-yarn-site/ResourceManagerRestart.html"/>
       <item name="ResourceManager HA" href="hadoop-yarn/hadoop-yarn-site/ResourceManagerHA.html"/>
+      <item name="Node Labels" href="hadoop-yarn/hadoop-yarn-site/NodeLabel.html"/>
       <item name="Web Application Proxy" href="hadoop-yarn/hadoop-yarn-site/WebApplicationProxy.html"/>
       <item name="Timeline Server" href="hadoop-yarn/hadoop-yarn-site/TimelineServer.html"/>
       <item name="Writing YARN Applications" href="hadoop-yarn/hadoop-yarn-site/WritingYarnApplications.html"/>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/faa38e1a/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 999654d..e2b1307 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -827,6 +827,9 @@ Release 2.7.2 - UNRELEASED
     YARN-4092. Fixed UI redirection to print useful messages when both RMs are
     in standby mode. (Xuan Gong via jianhe)
 
+    YARN-2801. Add documentation for node labels feature. (Wangda Tan and Naganarasimha 
+    G R  via ozawa)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/faa38e1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
new file mode 100644
index 0000000..87019cd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
@@ -0,0 +1,140 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+YARN Node Labels
+===============
+
+# Overview
+Node label is a way to group nodes with similar characteristics and applications can specify where to run.
+
+Now we only support node partition, which is:
+
+* One node can have only one node partition, so a cluster is partitioned to several disjoint sub-clusters by node partitions. By default, nodes belong to DEFAULT partition (partition="")
+* User need to configure how much resources of each partition can be used by different queues. For more detail, please refer next section.
+* There are two kinds of node partitions:
+    * Exclusive: containers will be allocated to nodes with exactly match node partition. (e.g. asking partition=“x” will be allocated to node with partition=“x”, asking DEFAULT partition will be allocated to DEFAULT partition nodes).
+    * Non-exclusive: if a partition is non-exclusive, it shares idle resource to container requesting DEFAULT partition.
+
+User can specify set of node labels which can be accessed by each queue, one application can only use subset of node labels that can be accessed by the queue which contains the application.
+
+# Features
+The ```Node Labels``` supports the following features for now:
+
+* Partition cluster - each node can be assigned one label, so the cluster will be divided to several smaller disjoint partitions.
+* ACL of node-labels on queues - user can set accessible node labels on each queue so only some nodes can only be accessed by specific queues.
+* Specify percentage of resource of a partition which can be accessed by a queue - user can set percentage like: queue A can access 30% of resources on nodes with label=hbase. Such percentage setting will be consistent with existing resource manager
+* Specify required Node Label in resource request, it will only be allocated when node has the same label. If no node label requirement specified, such Resource Request will only be allocated on nodes belong to DEFAULT partition.
+* Operability
+    * Node labels and node labels mapping can be recovered across RM restart
+    * Update node labels - admin can update labels on nodes and labels on queues
+      when RM is running
+
+# Configuration
+## Setting up ```ResourceManager``` to enable ```Node Labels```:
+
+Setup following properties in ```yarn-site.xml```
+
+Property  | Value
+--- | ----
+yarn.node-labels.fs-store.root-dir  | hdfs://namenode:port/path/to/store/node-labels/
+yarn.node-labels.enabled | true
+
+Notes:
+
+* Make sure ```yarn.node-labels.fs-store.root-dir``` is created and ```ResourceManager``` has permission to access it. (Typically from “yarn” user)
+* If user want to store node label to local file system of RM (instead of HDFS), paths like `file:///home/yarn/node-label` can be used
+
+### Add/modify node labels list and node-to-labels mapping to YARN
+* Add cluster node labels list:
+    * Executing ```yarn rmadmin -addToClusterNodeLabels "label_1(exclusive=true/false),label_2(exclusive=true/false)"``` to add node label.
+    * If user don’t specify “(exclusive=…)”, execlusive will be ```true``` by default.
+    * Run ```yarn cluster --list-node-labels``` to check added node labels are visible in the cluster.
+
+* Add labels to nodes
+    * Executing ```yarn rmadmin -replaceLabelsOnNode “node1[:port]=label1 node2=label2”```. Added label1 to node1, label2 to node2. If user don’t specify port, it added the label to all ```NodeManagers``` running on the node.
+
+## Configuration of Schedulers for node labels
+### Capacity Scheduler Configuration
+Property  | Value
+----- | ------
+yarn.scheduler.capacity.`<queue-path>`.capacity | Set the percentage of the queue can access to nodes belong to DEFAULT partition. The sum of DEFAULT capacities for direct children under each parent, must be equal to 100.
+yarn.scheduler.capacity.`<queue-path>`.accessible-node-labels | Admin need specify labels can be accessible by each queue, split by comma, like “hbase,storm” means queue can access label hbase and storm. All queues can access to nodes without label, user don’t have to specify that. If user don’t specify this field, it will inherit from its parent. If user want to explicitly specify a queue can only access nodes without labels, just put a space as the value.
+yarn.scheduler.capacity.`<queue-path>`.accessible-node-labels.`<label>`.capacity | Set the percentage of the queue can access to nodes belong to `<label>` partition . The sum of `<label>` capacities for direct children under each parent, must be equal to 100. By default, it's 0.
+yarn.scheduler.capacity.`<queue-path>`.accessible-node-labels.`<label>`.maximum-capacity | Similar to yarn.scheduler.capacity.`<queue-path>`.maximum-capacity, it is for maximum-capacity for labels of each queue. By default, it's 100.
+yarn.scheduler.capacity.`<queue-path>`.default-node-label-expression | Value like “hbase”, which means: if applications submitted to the queue without specifying node label in their resource requests, it will use "hbase" as default-node-label-expression. By default, this is empty, so application will get containers from nodes without label.
+
+**An example of node label configuration**:
+
+Assume we have a queue structure
+
+```
+                root
+            /     |    \
+     engineer    sales  marketing
+```
+
+We have 5 nodes (hostname=h1..h5) in the cluster, each of them has 24G memory, 24 vcores. 1 among the 5 nodes has GPU (assume it’s h5). So admin added GPU label to h5.
+
+Assume user have a Capacity Scheduler configuration like: (key=value is used here for readability)
+
+```
+yarn.scheduler.capacity.root.queues=engineering,marketing,sales
+yarn.scheduler.capacity.root.engineering.capacity=33
+yarn.scheduler.capacity.root.marketing.capacity=34
+yarn.scheduler.capacity.root.sales.capacity=33
+
+yarn.scheduler.capacity.root.engineering.accessible-node-labels=GPU
+yarn.scheduler.capacity.root.marketing.accessible-node-labels=GPU
+
+yarn.scheduler.capacity.root.engineering.accessible-node-labels.GPU.capacity=50
+yarn.scheduler.capacity.root.marketing.accessible-node-labels.GPU.capacity=50
+
+yarn.scheduler.capacity.root.engineering.default-node-label-expression=GPU
+```
+
+You can see root.engineering/marketing/sales.capacity=33, so each of them can has guaranteed resource equals to 1/3 of resource **without partition**. So each of them can use 1/3 resource of h1..h4, which is 24 * 4 * (1/3) = (32G mem, 32 v-cores).
+
+And only engineering/marketing queue has permission to access GPU partition (see root.`<queue-name>`.accessible-node-labels).
+
+Each of engineering/marketing queue has guaranteed resource equals to 1/2 of resource **with partition=GPU**. So each of them can use 1/2 resource of h5, which is 24 * 0.5 = (12G mem, 12 v-cores).
+
+Notes:
+
+* After finishing configuration of CapacityScheduler, execute ```yarn rmadmin -refreshQueues``` to apply changes
+* Go to scheduler page of RM Web UI to check if you have successfully set configuration.
+
+# Specifying node label for application
+Applications can use following Java APIs to specify node label to request
+
+* `ApplicationSubmissionContext.setNodeLabelExpression(..)` to set node label expression for all containers of the application.
+* `ResourceRequest.setNodeLabelExpression(..)` to set node label expression for individual resource requests. This can overwrite node label expression set in ApplicationSubmissionContext
+* Specify `setAMContainerResourceRequest.setNodeLabelExpression` in `ApplicationSubmissionContext` to indicate expected node label for application master container.
+
+# Monitoring
+
+## Monitoring through web UI
+Following label-related fields can be seen on web UI:
+
+* Nodes page: http://RM-Address:port/cluster/nodes, you can get labels on each node
+* Node labels page: http://RM-Address:port/cluster/nodelabels, you can get type (exclusive/non-exclusive), number of active node managers, total resource of each partition
+* Scheduler page: http://RM-Address:port/cluster/scheduler, you can get label-related settings of each queue, and resource usage of queue partitions.
+
+## Monitoring through commandline
+
+* Use `yarn cluster --list-node-labels` to get labels in the cluster
+* Use `yarn node -status <NodeId>` to get node status including labels on a given node
+
+# Useful links
+* [YARN Capacity Scheduler](http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html), if you need more understanding about how to configure Capacity Scheduler
+* Write YARN application using node labels, you can see following two links as examples: [YARN distributed shell](https://issues.apache.org/jira/browse/YARN-2502), [Hadoop MapReduce](https://issues.apache.org/jira/browse/MAPREDUCE-6304)


[43/50] [abbrv] hadoop git commit: HDFS-9012. Move o.a.h.hdfs.protocol.datatransfer.PipelineAck class to hadoop-hdfs-client module. Contributed by Mingliang Liu.

Posted by vv...@apache.org.
HDFS-9012. Move o.a.h.hdfs.protocol.datatransfer.PipelineAck class to hadoop-hdfs-client module. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d16c4eee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d16c4eee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d16c4eee

Branch: refs/heads/YARN-3926
Commit: d16c4eee186492608ffeb1c2e83f437000cc64f6
Parents: 6eaca2e
Author: Haohui Mai <wh...@apache.org>
Authored: Fri Sep 4 10:41:09 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Fri Sep 4 10:41:09 2015 -0700

----------------------------------------------------------------------
 .../hdfs/protocol/datatransfer/PipelineAck.java | 243 ++++++++++++++++
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |  71 +++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hdfs/protocol/datatransfer/PipelineAck.java | 274 -------------------
 .../hdfs/server/datanode/BlockReceiver.java     |   2 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  38 +++
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |  71 -----
 7 files changed, 356 insertions(+), 346 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d16c4eee/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
new file mode 100644
index 0000000..3836606
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
@@ -0,0 +1,243 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import com.google.protobuf.TextFormat;
+import org.apache.hadoop.hdfs.util.LongBitFormat;
+
+/** Pipeline Acknowledgment **/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class PipelineAck {
+  PipelineAckProto proto;
+  public final static long UNKOWN_SEQNO = -2;
+  final static int OOB_START = Status.OOB_RESTART_VALUE; // the first OOB type
+  final static int OOB_END = Status.OOB_RESERVED3_VALUE; // the last OOB type
+
+  public enum ECN {
+    DISABLED(0),
+    SUPPORTED(1),
+    SUPPORTED2(2),
+    CONGESTED(3);
+
+    private final int value;
+    private static final ECN[] VALUES = values();
+    static ECN valueOf(int value) {
+      return VALUES[value];
+    }
+
+    ECN(int value) {
+      this.value = value;
+    }
+
+    public int getValue() {
+      return value;
+    }
+  }
+
+  private enum StatusFormat {
+    STATUS(null, 4),
+    RESERVED(STATUS.BITS, 1),
+    ECN_BITS(RESERVED.BITS, 2);
+
+    private final LongBitFormat BITS;
+
+    StatusFormat(LongBitFormat prev, int bits) {
+      BITS = new LongBitFormat(name(), prev, bits, 0);
+    }
+
+    static Status getStatus(int header) {
+      return Status.valueOf((int) STATUS.BITS.retrieve(header));
+    }
+
+    static ECN getECN(int header) {
+      return ECN.valueOf((int) ECN_BITS.BITS.retrieve(header));
+    }
+
+    public static int setStatus(int old, Status status) {
+      return (int) STATUS.BITS.combine(status.getNumber(), old);
+    }
+
+    public static int setECN(int old, ECN ecn) {
+      return (int) ECN_BITS.BITS.combine(ecn.getValue(), old);
+    }
+  }
+
+  /** default constructor **/
+  public PipelineAck() {
+  }
+  
+  /**
+   * Constructor assuming no next DN in pipeline
+   * @param seqno sequence number
+   * @param replies an array of replies
+   */
+  public PipelineAck(long seqno, int[] replies) {
+    this(seqno, replies, 0L);
+  }
+
+  /**
+   * Constructor
+   * @param seqno sequence number
+   * @param replies an array of replies
+   * @param downstreamAckTimeNanos ack RTT in nanoseconds, 0 if no next DN in pipeline
+   */
+  public PipelineAck(long seqno, int[] replies,
+                     long downstreamAckTimeNanos) {
+    ArrayList<Status> statusList = Lists.newArrayList();
+    ArrayList<Integer> flagList = Lists.newArrayList();
+    for (int r : replies) {
+      statusList.add(StatusFormat.getStatus(r));
+      flagList.add(r);
+    }
+    proto = PipelineAckProto.newBuilder()
+      .setSeqno(seqno)
+      .addAllReply(statusList)
+      .addAllFlag(flagList)
+      .setDownstreamAckTimeNanos(downstreamAckTimeNanos)
+      .build();
+  }
+  
+  /**
+   * Get the sequence number
+   * @return the sequence number
+   */
+  public long getSeqno() {
+    return proto.getSeqno();
+  }
+  
+  /**
+   * Get the number of replies
+   * @return the number of replies
+   */
+  public short getNumOfReplies() {
+    return (short)proto.getReplyCount();
+  }
+  
+  /**
+   * get the header flag of ith reply
+   */
+  public int getHeaderFlag(int i) {
+    if (proto.getFlagCount() > 0) {
+      return proto.getFlag(i);
+    } else {
+      return combineHeader(ECN.DISABLED, proto.getReply(i));
+    }
+  }
+
+  public int getFlag(int i) {
+    return proto.getFlag(i);
+  }
+
+  /**
+   * Get the time elapsed for downstream ack RTT in nanoseconds
+   * @return time elapsed for downstream ack in nanoseconds, 0 if no next DN in pipeline
+   */
+  public long getDownstreamAckTimeNanos() {
+    return proto.getDownstreamAckTimeNanos();
+  }
+
+  /**
+   * Check if this ack contains error status
+   * @return true if all statuses are SUCCESS
+   */
+  public boolean isSuccess() {
+    for (Status s : proto.getReplyList()) {
+      if (s != Status.SUCCESS) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  /**
+   * Returns the OOB status if this ack contains one. 
+   * @return null if it is not an OOB ack.
+   */
+  public Status getOOBStatus() {
+    // Normal data transfer acks will have a valid sequence number, so
+    // this will return right away in most cases.
+    if (getSeqno() != UNKOWN_SEQNO) {
+      return null;
+    }
+    for (Status s : proto.getReplyList()) {
+      // The following check is valid because protobuf guarantees to
+      // preserve the ordering of enum elements.
+      if (s.getNumber() >= OOB_START && s.getNumber() <= OOB_END) {
+        return s;
+      }
+    }
+    return null;
+  }
+
+  /** Get the Restart OOB ack status */
+  public static Status getRestartOOBStatus() {
+    return Status.OOB_RESTART;
+  }
+
+  /** return true if it is the restart OOB status code  */
+  public static boolean isRestartOOBStatus(Status st) {
+    return st.equals(Status.OOB_RESTART);
+  }
+
+  /**** Writable interface ****/
+  public void readFields(InputStream in) throws IOException {
+    proto = PipelineAckProto.parseFrom(vintPrefixed(in));
+  }
+
+  public void write(OutputStream out) throws IOException {
+    proto.writeDelimitedTo(out);
+  }
+  
+  @Override //Object
+  public String toString() {
+    return TextFormat.shortDebugString(proto);
+  }
+
+  public static Status getStatusFromHeader(int header) {
+    return StatusFormat.getStatus(header);
+  }
+
+  public static ECN getECNFromHeader(int header) {
+    return StatusFormat.getECN(header);
+  }
+
+  public static int setStatusForHeader(int old, Status status) {
+    return StatusFormat.setStatus(old, status);
+  }
+
+  public static int combineHeader(ECN ecn, Status status) {
+    int header = 0;
+    header = StatusFormat.setStatus(header, status);
+    header = StatusFormat.setECN(header, ecn);
+    return header;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d16c4eee/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
new file mode 100644
index 0000000..9399d84
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.io.Serializable;
+
+
+/**
+ * Bit format in a long.
+ */
+public class LongBitFormat implements Serializable {
+  private static final long serialVersionUID = 1L;
+
+  private final String NAME;
+  /** Bit offset */
+  private final int OFFSET;
+  /** Bit length */
+  private final int LENGTH;
+  /** Minimum value */
+  private final long MIN;
+  /** Maximum value */
+  private final long MAX;
+  /** Bit mask */
+  private final long MASK;
+
+  public LongBitFormat(String name, LongBitFormat previous, int length, long min) {
+    NAME = name;
+    OFFSET = previous == null? 0: previous.OFFSET + previous.LENGTH;
+    LENGTH = length;
+    MIN = min;
+    MAX = ((-1L) >>> (64 - LENGTH));
+    MASK = MAX << OFFSET;
+  }
+
+  /** Retrieve the value from the record. */
+  public long retrieve(long record) {
+    return (record & MASK) >>> OFFSET;
+  }
+
+  /** Combine the value to the record. */
+  public long combine(long value, long record) {
+    if (value < MIN) {
+      throw new IllegalArgumentException(
+          "Illagal value: " + NAME + " = " + value + " < MIN = " + MIN);
+    }
+    if (value > MAX) {
+      throw new IllegalArgumentException(
+          "Illagal value: " + NAME + " = " + value + " > MAX = " + MAX);
+    }
+    return (record & ~MASK) | (value << OFFSET);
+  }
+  
+  public long getMin() {
+    return MIN;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d16c4eee/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b9b89aa..e67c9d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -888,6 +888,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-9021. Use a yellow elephant rather than a blue one in diagram. (wang)
 
+    HDFS-9012. Move o.a.h.hdfs.protocol.datatransfer.PipelineAck class to
+    hadoop-hdfs-client module. (Mingliang Liu via wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d16c4eee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
deleted file mode 100644
index 44f38c6..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
+++ /dev/null
@@ -1,274 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol.datatransfer;
-
-import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_DEFAULT;
-
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import com.google.protobuf.TextFormat;
-import org.apache.hadoop.hdfs.util.LongBitFormat;
-
-/** Pipeline Acknowledgment **/
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class PipelineAck {
-  PipelineAckProto proto;
-  public final static long UNKOWN_SEQNO = -2;
-  final static int OOB_START = Status.OOB_RESTART_VALUE; // the first OOB type
-  final static int OOB_END = Status.OOB_RESERVED3_VALUE; // the last OOB type
-  final static int NUM_OOB_TYPES = OOB_END - OOB_START + 1;
-  // place holder for timeout value of each OOB type
-  final static long[] OOB_TIMEOUT;
-
-  public enum ECN {
-    DISABLED(0),
-    SUPPORTED(1),
-    SUPPORTED2(2),
-    CONGESTED(3);
-
-    private final int value;
-    private static final ECN[] VALUES = values();
-    static ECN valueOf(int value) {
-      return VALUES[value];
-    }
-
-    ECN(int value) {
-      this.value = value;
-    }
-
-    public int getValue() {
-      return value;
-    }
-  }
-
-  private enum StatusFormat {
-    STATUS(null, 4),
-    RESERVED(STATUS.BITS, 1),
-    ECN_BITS(RESERVED.BITS, 2);
-
-    private final LongBitFormat BITS;
-
-    StatusFormat(LongBitFormat prev, int bits) {
-      BITS = new LongBitFormat(name(), prev, bits, 0);
-    }
-
-    static Status getStatus(int header) {
-      return Status.valueOf((int) STATUS.BITS.retrieve(header));
-    }
-
-    static ECN getECN(int header) {
-      return ECN.valueOf((int) ECN_BITS.BITS.retrieve(header));
-    }
-
-    public static int setStatus(int old, Status status) {
-      return (int) STATUS.BITS.combine(status.getNumber(), old);
-    }
-
-    public static int setECN(int old, ECN ecn) {
-      return (int) ECN_BITS.BITS.combine(ecn.getValue(), old);
-    }
-  }
-
-  static {
-    OOB_TIMEOUT = new long[NUM_OOB_TYPES];
-    HdfsConfiguration conf = new HdfsConfiguration();
-    String[] ele = conf.get(DFS_DATANODE_OOB_TIMEOUT_KEY,
-        DFS_DATANODE_OOB_TIMEOUT_DEFAULT).split(",");
-    for (int i = 0; i < NUM_OOB_TYPES; i++) {
-      OOB_TIMEOUT[i] = (i < ele.length) ? Long.parseLong(ele[i]) : 0;
-    }
-  }
-
-  /** default constructor **/
-  public PipelineAck() {
-  }
-  
-  /**
-   * Constructor assuming no next DN in pipeline
-   * @param seqno sequence number
-   * @param replies an array of replies
-   */
-  public PipelineAck(long seqno, int[] replies) {
-    this(seqno, replies, 0L);
-  }
-
-  /**
-   * Constructor
-   * @param seqno sequence number
-   * @param replies an array of replies
-   * @param downstreamAckTimeNanos ack RTT in nanoseconds, 0 if no next DN in pipeline
-   */
-  public PipelineAck(long seqno, int[] replies,
-                     long downstreamAckTimeNanos) {
-    ArrayList<Status> statusList = Lists.newArrayList();
-    ArrayList<Integer> flagList = Lists.newArrayList();
-    for (int r : replies) {
-      statusList.add(StatusFormat.getStatus(r));
-      flagList.add(r);
-    }
-    proto = PipelineAckProto.newBuilder()
-      .setSeqno(seqno)
-      .addAllReply(statusList)
-      .addAllFlag(flagList)
-      .setDownstreamAckTimeNanos(downstreamAckTimeNanos)
-      .build();
-  }
-  
-  /**
-   * Get the sequence number
-   * @return the sequence number
-   */
-  public long getSeqno() {
-    return proto.getSeqno();
-  }
-  
-  /**
-   * Get the number of replies
-   * @return the number of replies
-   */
-  public short getNumOfReplies() {
-    return (short)proto.getReplyCount();
-  }
-  
-  /**
-   * get the header flag of ith reply
-   */
-  public int getHeaderFlag(int i) {
-    if (proto.getFlagCount() > 0) {
-      return proto.getFlag(i);
-    } else {
-      return combineHeader(ECN.DISABLED, proto.getReply(i));
-    }
-  }
-
-  public int getFlag(int i) {
-    return proto.getFlag(i);
-  }
-
-  /**
-   * Get the time elapsed for downstream ack RTT in nanoseconds
-   * @return time elapsed for downstream ack in nanoseconds, 0 if no next DN in pipeline
-   */
-  public long getDownstreamAckTimeNanos() {
-    return proto.getDownstreamAckTimeNanos();
-  }
-
-  /**
-   * Check if this ack contains error status
-   * @return true if all statuses are SUCCESS
-   */
-  public boolean isSuccess() {
-    for (Status s : proto.getReplyList()) {
-      if (s != Status.SUCCESS) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Returns the OOB status if this ack contains one. 
-   * @return null if it is not an OOB ack.
-   */
-  public Status getOOBStatus() {
-    // Normal data transfer acks will have a valid sequence number, so
-    // this will return right away in most cases.
-    if (getSeqno() != UNKOWN_SEQNO) {
-      return null;
-    }
-    for (Status s : proto.getReplyList()) {
-      // The following check is valid because protobuf guarantees to
-      // preserve the ordering of enum elements.
-      if (s.getNumber() >= OOB_START && s.getNumber() <= OOB_END) {
-        return s;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Get the timeout to be used for transmitting the OOB type
-   * @return the timeout in milliseconds
-   */
-  public static long getOOBTimeout(Status status) throws IOException {
-    int index = status.getNumber() - OOB_START;
-    if (index >= 0 && index < NUM_OOB_TYPES) {
-      return OOB_TIMEOUT[index];
-    } 
-    // Not an OOB.
-    throw new IOException("Not an OOB status: " + status);
-  }
-
-  /** Get the Restart OOB ack status */
-  public static Status getRestartOOBStatus() {
-    return Status.OOB_RESTART;
-  }
-
-  /** return true if it is the restart OOB status code  */
-  public static boolean isRestartOOBStatus(Status st) {
-    return st.equals(Status.OOB_RESTART);
-  }
-
-  /**** Writable interface ****/
-  public void readFields(InputStream in) throws IOException {
-    proto = PipelineAckProto.parseFrom(vintPrefixed(in));
-  }
-
-  public void write(OutputStream out) throws IOException {
-    proto.writeDelimitedTo(out);
-  }
-  
-  @Override //Object
-  public String toString() {
-    return TextFormat.shortDebugString(proto);
-  }
-
-  public static Status getStatusFromHeader(int header) {
-    return StatusFormat.getStatus(header);
-  }
-
-  public static ECN getECNFromHeader(int header) {
-    return StatusFormat.getECN(header);
-  }
-
-  public static int setStatusForHeader(int old, Status status) {
-    return StatusFormat.setStatus(old, status);
-  }
-
-  public static int combineHeader(ECN ecn, Status status) {
-    int header = 0;
-    header = StatusFormat.setStatus(header, status);
-    header = StatusFormat.setECN(header, ecn);
-    return header;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d16c4eee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 1cb308f..bc5396f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -1153,7 +1153,7 @@ class BlockReceiver implements Closeable {
 
       synchronized(this) {
         if (sending) {
-          wait(PipelineAck.getOOBTimeout(ackStatus));
+          wait(datanode.getOOBTimeout(ackStatus));
           // Didn't get my turn in time. Give up.
           if (sending) {
             throw new IOException("Could not send OOB reponse in time: "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d16c4eee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 42cbd96..e0adc6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -40,6 +40,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT;
@@ -359,6 +361,8 @@ public class DataNode extends ReconfigurableBase
       .availableProcessors();
   private static final double CONGESTION_RATIO = 1.5;
 
+  private long[] oobTimeouts; /** timeout value of each OOB type */
+
   /**
    * Creates a dummy DataNode for testing purpose.
    */
@@ -373,6 +377,7 @@ public class DataNode extends ReconfigurableBase
     this.connectToDnViaHostname = false;
     this.blockScanner = new BlockScanner(this, conf);
     this.pipelineSupportECN = false;
+    initOOBTimeout();
   }
 
   /**
@@ -446,6 +451,8 @@ public class DataNode extends ReconfigurableBase
                 return ret;
               }
             });
+
+    initOOBTimeout();
   }
 
   @Override  // ReconfigurableBase
@@ -3226,4 +3233,35 @@ public class DataNode extends ReconfigurableBase
     checkSuperuserPrivilege();
     spanReceiverHost.removeSpanReceiver(id);
   }
+
+  /**
+   * Get timeout value of each OOB type from configuration
+   */
+  private void initOOBTimeout() {
+    final int oobStart = Status.OOB_RESTART_VALUE; // the first OOB type
+    final int oobEnd = Status.OOB_RESERVED3_VALUE; // the last OOB type
+    final int numOobTypes = oobEnd - oobStart + 1;
+    oobTimeouts = new long[numOobTypes];
+
+    final String[] ele = conf.get(DFS_DATANODE_OOB_TIMEOUT_KEY,
+        DFS_DATANODE_OOB_TIMEOUT_DEFAULT).split(",");
+    for (int i = 0; i < numOobTypes; i++) {
+      oobTimeouts[i] = (i < ele.length) ? Long.parseLong(ele[i]) : 0;
+    }
+  }
+
+  /**
+   * Get the timeout to be used for transmitting the OOB type
+   * @return the timeout in milliseconds
+   */
+  public long getOOBTimeout(Status status)
+      throws IOException {
+    if (status.getNumber() < Status.OOB_RESTART_VALUE ||
+        status.getNumber() > Status.OOB_RESERVED3_VALUE) {
+      // Not an OOB.
+      throw new IOException("Not an OOB status: " + status);
+    }
+
+    return oobTimeouts[status.getNumber() - Status.OOB_RESTART_VALUE];
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d16c4eee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
deleted file mode 100644
index 9399d84..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.util;
-
-import java.io.Serializable;
-
-
-/**
- * Bit format in a long.
- */
-public class LongBitFormat implements Serializable {
-  private static final long serialVersionUID = 1L;
-
-  private final String NAME;
-  /** Bit offset */
-  private final int OFFSET;
-  /** Bit length */
-  private final int LENGTH;
-  /** Minimum value */
-  private final long MIN;
-  /** Maximum value */
-  private final long MAX;
-  /** Bit mask */
-  private final long MASK;
-
-  public LongBitFormat(String name, LongBitFormat previous, int length, long min) {
-    NAME = name;
-    OFFSET = previous == null? 0: previous.OFFSET + previous.LENGTH;
-    LENGTH = length;
-    MIN = min;
-    MAX = ((-1L) >>> (64 - LENGTH));
-    MASK = MAX << OFFSET;
-  }
-
-  /** Retrieve the value from the record. */
-  public long retrieve(long record) {
-    return (record & MASK) >>> OFFSET;
-  }
-
-  /** Combine the value to the record. */
-  public long combine(long value, long record) {
-    if (value < MIN) {
-      throw new IllegalArgumentException(
-          "Illagal value: " + NAME + " = " + value + " < MIN = " + MIN);
-    }
-    if (value > MAX) {
-      throw new IllegalArgumentException(
-          "Illagal value: " + NAME + " = " + value + " > MAX = " + MAX);
-    }
-    return (record & ~MASK) | (value << OFFSET);
-  }
-  
-  public long getMin() {
-    return MIN;
-  }
-}


[15/50] [abbrv] hadoop git commit: HADOOP-12359. hadoop fs -getmerge doc is wrong. Contributed by Jagadesh Kiran N.

Posted by vv...@apache.org.
HADOOP-12359. hadoop fs -getmerge doc is wrong. Contributed by Jagadesh Kiran N.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e251a76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e251a76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e251a76

Branch: refs/heads/YARN-3926
Commit: 2e251a767427a38ecb6c309ad979feecb29a09f4
Parents: faa38e1
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Sep 1 20:55:33 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Sep 1 20:55:33 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt        |  3 +++
 .../hadoop-common/src/site/markdown/FileSystemShell.md | 13 +++++++++++--
 2 files changed, 14 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e251a76/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 14e6fda..4eef964 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1120,6 +1120,9 @@ Release 2.7.2 - UNRELEASED
     HADOOP-12061. Incorrect command in single cluster setup document.
     (Kengo Seki via aajisaka)
 
+    HADOOP-12359. hadoop fs -getmerge doc is wrong.
+    (Jagadesh Kiran N via aajisaka)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e251a76/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index fb89ca1..d6d00e4 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -368,9 +368,18 @@ Returns 0 on success and non-zero on error.
 getmerge
 --------
 
-Usage: `hadoop fs -getmerge <src> <localdst> [addnl]`
+Usage: `hadoop fs -getmerge [-nl] <src> <localdst>`
 
-Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally addnl can be set to enable adding a newline character at the end of each file.
+Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally -nl can be set to enable adding a newline character (LF) at the end of each file.
+
+Examples:
+
+* `hadoop fs -getmerge -nl  /src  /opt/output.txt`
+* `hadoop fs -getmerge -nl  /src/file1.txt /src/file2.txt  /output.txt`
+
+Exit Code:
+
+Returns 0 on success and non-zero on error.
 
 help
 ----


[48/50] [abbrv] hadoop git commit: YARN-4024. YARN RM should avoid unnecessary resolving IP when NMs doing heartbeat. (Hong Zhiguo via wangda)

Posted by vv...@apache.org.
YARN-4024. YARN RM should avoid unnecessary resolving IP when NMs doing heartbeat. (Hong Zhiguo via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcc85e3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcc85e3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcc85e3b

Branch: refs/heads/YARN-3926
Commit: bcc85e3bab78bcacd430eac23141774465b96ef9
Parents: e1feaf6
Author: Wangda Tan <wa...@apache.org>
Authored: Fri Sep 4 15:13:53 2015 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Fri Sep 4 15:13:53 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |   5 +
 .../src/main/resources/yarn-default.xml         |   6 +
 .../resourcemanager/NodesListManager.java       | 142 ++++++++++++++++++-
 .../rmapp/TestNodesListManager.java             | 102 +++++++++++++
 5 files changed, 255 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc85e3b/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 662106b..98cc98f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -813,6 +813,9 @@ Release 2.8.0 - UNRELEASED
     YARN-4073. Removed unused ApplicationACLsManager in ContainerManagerImpl constructor.
     (Naganarasimha G R via rohithsharmaks)
 
+    YARN-4024. YARN RM should avoid unnecessary resolving IP when NMs doing heartbeat.
+    (Hong Zhiguo via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc85e3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a18ef7c..5e1bab2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -746,6 +746,11 @@ public class YarnConfiguration extends Configuration {
       + "proxy-user-privileges.enabled";
   public static final boolean DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED = false;
 
+  /** The expiry interval for node IP caching. -1 disables the caching */
+  public static final String RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS = RM_PREFIX
+      + "node-ip-cache.expiry-interval-secs";
+  public static final int DEFAULT_RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS = -1;
+
   /**
    * How many diagnostics/failure messages can be saved in RM for
    * log aggregation. It also defines the number of diagnostics/failure

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc85e3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 62ba599..436bfb0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -273,6 +273,12 @@
   </property>
 
   <property>
+    <description>The expiry interval for node IP caching. -1 disables the caching</description>
+    <name>yarn.resourcemanager.node-ip-cache.expiry-interval-secs</name>
+    <value>-1</value>
+  </property>
+
+  <property>
     <description>Number of threads to handle resource tracker calls.</description>
     <name>yarn.resourcemanager.resource-tracker.client.thread-count</name>
     <value>50</value>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc85e3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
index b9c76fb..abea85e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
@@ -24,13 +24,18 @@ import java.util.Collections;
 import java.util.HashSet;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.Map;
+import java.util.Iterator;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.HostsFileReader;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
@@ -46,9 +51,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.SystemClock;
 
 @SuppressWarnings("unchecked")
-public class NodesListManager extends AbstractService implements
+public class NodesListManager extends CompositeService implements
     EventHandler<NodesListManagerEvent> {
 
   private static final Log LOG = LogFactory.getLog(NodesListManager.class);
@@ -63,6 +70,8 @@ public class NodesListManager extends AbstractService implements
   private String includesFile;
   private String excludesFile;
 
+  private Resolver resolver;
+
   public NodesListManager(RMContext rmContext) {
     super(NodesListManager.class.getName());
     this.rmContext = rmContext;
@@ -73,6 +82,16 @@ public class NodesListManager extends AbstractService implements
 
     this.conf = conf;
 
+    int nodeIpCacheTimeout = conf.getInt(
+        YarnConfiguration.RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS,
+        YarnConfiguration.DEFAULT_RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS);
+    if (nodeIpCacheTimeout <= 0) {
+      resolver = new DirectResolver();
+    } else {
+      resolver = new CachedResolver(new SystemClock(), nodeIpCacheTimeout);
+      addIfService(resolver);
+    }
+
     // Read the hosts/exclude files to restrict access to the RM
     try {
       this.includesFile = conf.get(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,
@@ -148,17 +167,129 @@ public class NodesListManager extends AbstractService implements
     ClusterMetrics.getMetrics().setDecommisionedNMs(excludeList.size());
   }
 
+  @VisibleForTesting
+  public Resolver getResolver() {
+    return resolver;
+  }
+
+  @VisibleForTesting
+  public interface Resolver {
+    // try to resolve hostName to IP address, fallback to hostName if failed
+    String resolve(String hostName);
+  }
+
+  @VisibleForTesting
+  public static class DirectResolver implements Resolver {
+    @Override
+    public String resolve(String hostName) {
+      return NetUtils.normalizeHostName(hostName);
+    }
+  }
+
+  @VisibleForTesting
+  public static class CachedResolver extends AbstractService
+      implements Resolver {
+    private static class CacheEntry {
+      public String ip;
+      public long resolveTime;
+      public CacheEntry(String ip, long resolveTime) {
+        this.ip = ip;
+        this.resolveTime = resolveTime;
+      }
+    }
+    private Map<String, CacheEntry> cache =
+        new ConcurrentHashMap<String, CacheEntry>();
+    private int expiryIntervalMs;
+    private int checkIntervalMs;
+    private final Clock clock;
+    private Timer checkingTimer;
+    private TimerTask expireChecker = new ExpireChecker();
+
+    public CachedResolver(Clock clock, int expiryIntervalSecs) {
+      super("NodesListManager.CachedResolver");
+      this.clock = clock;
+      this.expiryIntervalMs = expiryIntervalSecs * 1000;
+      checkIntervalMs = expiryIntervalMs/3;
+      checkingTimer = new Timer(
+          "Timer-NodesListManager.CachedResolver.ExpireChecker", true);
+    }
+
+    @Override
+    protected void serviceStart() throws Exception {
+      checkingTimer.scheduleAtFixedRate(
+          expireChecker, checkIntervalMs, checkIntervalMs);
+      super.serviceStart();
+    }
+
+    @Override
+    protected void serviceStop() throws Exception {
+      checkingTimer.cancel();
+      super.serviceStop();
+    }
+
+    @VisibleForTesting
+    public void addToCache(String hostName, String ip) {
+      cache.put(hostName, new CacheEntry(ip, clock.getTime()));
+    }
+
+    public void removeFromCache(String hostName) {
+      cache.remove(hostName);
+    }
+
+    private String reload(String hostName) {
+      String ip = NetUtils.normalizeHostName(hostName);
+      addToCache(hostName, ip);
+      return ip;
+    }
+
+    @Override
+    public String resolve(String hostName) {
+      CacheEntry e = cache.get(hostName);
+      if (e != null) {
+        return e.ip;
+      }
+      return reload(hostName);
+    }
+
+    @VisibleForTesting
+    public TimerTask getExpireChecker() {
+      return expireChecker;
+    }
+
+    private class ExpireChecker extends TimerTask {
+      @Override
+      public void run() {
+        long currentTime = clock.getTime();
+        Iterator<Map.Entry<String, CacheEntry>> iterator =
+            cache.entrySet().iterator();
+        while (iterator.hasNext()) {
+          Map.Entry<String, CacheEntry> entry = iterator.next();
+          if (currentTime >
+              entry.getValue().resolveTime +
+                  CachedResolver.this.expiryIntervalMs) {
+            iterator.remove();
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("[" + entry.getKey() + ":" + entry.getValue().ip +
+                  "] Expired after " +
+                  CachedResolver.this.expiryIntervalMs / 1000 + " secs");
+            }
+          }
+        }
+      }
+    }
+  }
+
   public boolean isValidNode(String hostName) {
+    String ip = resolver.resolve(hostName);
     synchronized (hostsReader) {
       Set<String> hostsList = hostsReader.getHosts();
       Set<String> excludeList = hostsReader.getExcludedHosts();
-      String ip = NetUtils.normalizeHostName(hostName);
       return (hostsList.isEmpty() || hostsList.contains(hostName) || hostsList
           .contains(ip))
           && !(excludeList.contains(hostName) || excludeList.contains(ip));
     }
   }
-  
+
   /**
    * Provides the currently unusable nodes. Copies it into provided collection.
    * @param unUsableNodes
@@ -207,6 +338,11 @@ public class NodesListManager extends AbstractService implements
     default:
       LOG.error("Ignoring invalid eventtype " + event.getType());
     }
+    // remove the cache of normalized hostname if enabled
+    if (resolver instanceof CachedResolver) {
+      ((CachedResolver)resolver).removeFromCache(
+          eventNode.getNodeID().getHost());
+    }
   }
 
   private void disableHostsFileReader(Exception ex) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc85e3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java
index 5330976..2f57dbf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java
@@ -42,6 +42,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.util.ControlledClock;
+import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
@@ -130,6 +132,106 @@ public class TestNodesListManager {
 
   }
 
+  @Test
+  public void testCachedResolver() throws Exception {
+    Logger rootLogger = LogManager.getRootLogger();
+    rootLogger.setLevel(Level.DEBUG);
+    ControlledClock clock = new ControlledClock(new SystemClock());
+    clock.setTime(0);
+    final int CACHE_EXPIRY_INTERVAL_SECS = 30;
+    NodesListManager.CachedResolver resolver =
+        new NodesListManager.CachedResolver(clock, CACHE_EXPIRY_INTERVAL_SECS);
+    resolver.init(new YarnConfiguration());
+    resolver.start();
+    resolver.addToCache("testCachedResolverHost1", "1.1.1.1");
+    Assert.assertEquals("1.1.1.1",
+        resolver.resolve("testCachedResolverHost1"));
+
+    resolver.addToCache("testCachedResolverHost2", "1.1.1.2");
+    Assert.assertEquals("1.1.1.1",
+        resolver.resolve("testCachedResolverHost1"));
+    Assert.assertEquals("1.1.1.2",
+        resolver.resolve("testCachedResolverHost2"));
+
+    // test removeFromCache
+    resolver.removeFromCache("testCachedResolverHost1");
+    Assert.assertNotEquals("1.1.1.1",
+        resolver.resolve("testCachedResolverHost1"));
+    Assert.assertEquals("1.1.1.2",
+        resolver.resolve("testCachedResolverHost2"));
+
+    // test expiry
+    clock.tickMsec(CACHE_EXPIRY_INTERVAL_SECS * 1000 + 1);
+    resolver.getExpireChecker().run();
+    Assert.assertNotEquals("1.1.1.1",
+        resolver.resolve("testCachedResolverHost1"));
+    Assert.assertNotEquals("1.1.1.2",
+        resolver.resolve("testCachedResolverHost2"));
+  }
+
+  @Test
+  public void testDefaultResolver() throws Exception {
+    Logger rootLogger = LogManager.getRootLogger();
+    rootLogger.setLevel(Level.DEBUG);
+
+    YarnConfiguration conf = new YarnConfiguration();
+
+    MockRM rm = new MockRM(conf);
+    rm.init(conf);
+    NodesListManager nodesListManager = rm.getNodesListManager();
+
+    NodesListManager.Resolver resolver = nodesListManager.getResolver();
+    Assert.assertTrue("default resolver should be DirectResolver",
+        resolver instanceof NodesListManager.DirectResolver);
+  }
+
+  @Test
+  public void testCachedResolverWithEvent() throws Exception {
+    Logger rootLogger = LogManager.getRootLogger();
+    rootLogger.setLevel(Level.DEBUG);
+
+    YarnConfiguration conf = new YarnConfiguration();
+    conf.setInt(YarnConfiguration.RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS, 30);
+
+    MockRM rm = new MockRM(conf);
+    rm.init(conf);
+    NodesListManager nodesListManager = rm.getNodesListManager();
+    nodesListManager.init(conf);
+    nodesListManager.start();
+
+    NodesListManager.CachedResolver resolver =
+        (NodesListManager.CachedResolver)nodesListManager.getResolver();
+
+    resolver.addToCache("testCachedResolverHost1", "1.1.1.1");
+    resolver.addToCache("testCachedResolverHost2", "1.1.1.2");
+    Assert.assertEquals("1.1.1.1",
+        resolver.resolve("testCachedResolverHost1"));
+    Assert.assertEquals("1.1.1.2",
+        resolver.resolve("testCachedResolverHost2"));
+
+    RMNode rmnode1 = MockNodes.newNodeInfo(1, Resource.newInstance(28000, 8),
+        1, "testCachedResolverHost1", 1234);
+    RMNode rmnode2 = MockNodes.newNodeInfo(1, Resource.newInstance(28000, 8),
+        1, "testCachedResolverHost2", 1234);
+
+    nodesListManager.handle(
+        new NodesListManagerEvent(NodesListManagerEventType.NODE_USABLE,
+            rmnode1));
+    Assert.assertNotEquals("1.1.1.1",
+        resolver.resolve("testCachedResolverHost1"));
+    Assert.assertEquals("1.1.1.2",
+        resolver.resolve("testCachedResolverHost2"));
+
+    nodesListManager.handle(
+        new NodesListManagerEvent(NodesListManagerEventType.NODE_USABLE,
+            rmnode2));
+    Assert.assertNotEquals("1.1.1.1",
+        resolver.resolve("testCachedResolverHost1"));
+    Assert.assertNotEquals("1.1.1.2",
+        resolver.resolve("testCachedResolverHost2"));
+
+  }
+
   /*
    * Create dispatcher object
    */


[30/50] [abbrv] hadoop git commit: YARN-3970. Add REST api support for Application Priority. Contributed by Naganarasimha G R.

Posted by vv...@apache.org.
YARN-3970. Add REST api support for Application Priority. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b469ac53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b469ac53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b469ac53

Branch: refs/heads/YARN-3926
Commit: b469ac531af1bdda01a04ae0b8d39218ca292163
Parents: c92e31b
Author: Varun Vasudev <vv...@apache.org>
Authored: Thu Sep 3 16:35:10 2015 +0530
Committer: Varun Vasudev <vv...@apache.org>
Committed: Thu Sep 3 16:40:10 2015 +0530

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |   4 +-
 .../scheduler/capacity/CapacityScheduler.java   |  18 +--
 .../webapp/JAXBContextResolver.java             |   2 +-
 .../resourcemanager/webapp/RMWebServices.java   | 116 ++++++++++++++++++
 .../resourcemanager/webapp/dao/AppPriority.java |  44 +++++++
 .../TestRMWebServicesAppsModification.java      | 120 +++++++++++++++++++
 .../src/site/markdown/ResourceManagerRest.md    | 120 +++++++++++++++++++
 8 files changed, 415 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b469ac53/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5e0c43d..4e9b411 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -187,6 +187,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3250. Support admin cli interface in for Application Priority.
     (Rohith Sharma K S via jianhe)
 
+    YARN-3970. Add REST api support for Application Priority.
+    (Naganarasimha G R via vvasudev)
+
   IMPROVEMENTS
 
     YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b469ac53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
index ab29366..9c87eae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -645,7 +645,7 @@ public class ApplicationCLI extends YarnCLI {
     Priority newAppPriority = Priority.newInstance(Integer.parseInt(priority));
     sysout.println("Updating priority of an aplication " + applicationId);
     client.updateApplicationPriority(appId, newAppPriority);
-    sysout.println("Successfully updated the priority of any application "
-        + applicationId);
+    sysout.println("Successfully updated the application with id "
+        + applicationId + " with priority '" + priority + "'");
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b469ac53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index b5ccbd9..a7e9d8c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -1832,7 +1832,7 @@ public class CapacityScheduler extends
 
       LOG.info("Application '" + applicationId
           + "' is submitted without priority "
-          + "hence considering default queue/cluster priority:"
+          + "hence considering default queue/cluster priority: "
           + priorityFromContext.getPriority());
     }
 
@@ -1846,8 +1846,8 @@ public class CapacityScheduler extends
     appPriority = priorityFromContext;
 
     LOG.info("Priority '" + appPriority.getPriority()
-        + "' is acceptable in queue :" + queueName + "for application:"
-        + applicationId + "for the user: " + user);
+        + "' is acceptable in queue : " + queueName + " for application: "
+        + applicationId + " for the user: " + user);
 
     return appPriority;
   }
@@ -1876,14 +1876,14 @@ public class CapacityScheduler extends
           + "' is not present, hence could not change priority.");
     }
 
-    if (application.getPriority().equals(newPriority)) {
-      return;
-    }
-
     RMApp rmApp = rmContext.getRMApps().get(applicationId);
     appPriority = checkAndGetApplicationPriority(newPriority, rmApp.getUser(),
         rmApp.getQueue(), applicationId);
 
+    if (application.getPriority().equals(appPriority)) {
+      return;
+    }
+
     // Update new priority in Submission Context to keep track in HA
     rmApp.getApplicationSubmissionContext().setPriority(appPriority);
 
@@ -1909,7 +1909,7 @@ public class CapacityScheduler extends
     }
 
     LOG.info("Priority '" + appPriority + "' is updated in queue :"
-        + rmApp.getQueue() + "for application:" + applicationId
-        + "for the user: " + rmApp.getUser());
+        + rmApp.getQueue() + " for application: " + applicationId
+        + " for the user: " + rmApp.getUser());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b469ac53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
index 7d7a347..b61072d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
@@ -59,7 +59,7 @@ public class JAXBContextResolver implements ContextResolver<JAXBContext> {
     final Class[] rootUnwrappedTypes =
         { NewApplication.class, ApplicationSubmissionContextInfo.class,
             ContainerLaunchContextInfo.class, LocalResourceInfo.class,
-            DelegationToken.class, AppQueue.class };
+            DelegationToken.class, AppQueue.class, AppPriority.class };
 
     this.typesContextMap = new HashMap<Class, JAXBContext>();
     context =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b469ac53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index b1fa80a..8e11e2a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -85,6 +85,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -122,6 +123,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoSchedule
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo;
@@ -1048,6 +1050,120 @@ public class RMWebServices {
   }
 
   @GET
+  @Path("/apps/{appid}/priority")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public AppPriority getAppPriority(@Context HttpServletRequest hsr,
+      @PathParam("appid") String appId) throws AuthorizationException {
+    init();
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+    String userName = "UNKNOWN-USER";
+    if (callerUGI != null) {
+      userName = callerUGI.getUserName();
+    }
+    RMApp app = null;
+    try {
+      app = getRMAppForAppId(appId);
+    } catch (NotFoundException e) {
+      RMAuditLogger.logFailure(userName, AuditConstants.KILL_APP_REQUEST,
+          "UNKNOWN", "RMWebService",
+          "Trying to get state of an absent application " + appId);
+      throw e;
+    }
+
+    AppPriority ret = new AppPriority();
+    ret.setPriority(
+        app.getApplicationSubmissionContext().getPriority().getPriority());
+
+    return ret;
+  }
+
+  @PUT
+  @Path("/apps/{appid}/priority")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public Response updateApplicationPriority(AppPriority targetPriority,
+      @Context HttpServletRequest hsr, @PathParam("appid") String appId)
+      throws AuthorizationException, YarnException, InterruptedException,
+          IOException {
+    init();
+    if (targetPriority == null) {
+      throw new YarnException("Target Priority cannot be null");
+    }
+
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+    if (callerUGI == null) {
+      throw new AuthorizationException(
+          "Unable to obtain user name, user not authenticated");
+    }
+
+    if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
+      return Response.status(Status.FORBIDDEN)
+          .entity("The default static user cannot carry out this operation.")
+          .build();
+    }
+
+    String userName = callerUGI.getUserName();
+    RMApp app = null;
+    try {
+      app = getRMAppForAppId(appId);
+    } catch (NotFoundException e) {
+      RMAuditLogger.logFailure(userName, AuditConstants.KILL_APP_REQUEST,
+          "UNKNOWN", "RMWebService",
+          "Trying to move an absent application " + appId);
+      throw e;
+    }
+    Priority priority = app.getApplicationSubmissionContext().getPriority();
+    if (priority == null
+        || priority.getPriority() != targetPriority.getPriority()) {
+      return modifyApplicationPriority(app, callerUGI,
+          targetPriority.getPriority());
+    }
+    return Response.status(Status.OK).entity(targetPriority).build();
+  }
+
+  private Response modifyApplicationPriority(final RMApp app,
+      UserGroupInformation callerUGI, final int appPriority)
+          throws IOException, InterruptedException {
+    String userName = callerUGI.getUserName();
+    try {
+      callerUGI.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws IOException, YarnException {
+          Priority priority = Priority.newInstance(appPriority);
+          UpdateApplicationPriorityRequest request =
+              UpdateApplicationPriorityRequest
+                  .newInstance(app.getApplicationId(), priority);
+          rm.getClientRMService().updateApplicationPriority(request);
+          return null;
+        }
+      });
+    } catch (UndeclaredThrowableException ue) {
+      // if the root cause is a permissions issue
+      // bubble that up to the user
+      if (ue.getCause() instanceof YarnException) {
+        YarnException ye = (YarnException) ue.getCause();
+        if (ye.getCause() instanceof AccessControlException) {
+          String appId = app.getApplicationId().toString();
+          String msg = "Unauthorized attempt to change priority of appid "
+              + appId + " by remote user " + userName;
+          return Response.status(Status.FORBIDDEN).entity(msg).build();
+        } else if (ye.getMessage().startsWith("Application in")
+            && ye.getMessage().endsWith("state cannot be update priority.")) {
+          return Response.status(Status.BAD_REQUEST).entity(ye.getMessage())
+              .build();
+        } else {
+          throw ue;
+        }
+      } else {
+        throw ue;
+      }
+    }
+    AppPriority ret = new AppPriority(
+        app.getApplicationSubmissionContext().getPriority().getPriority());
+    return Response.status(Status.OK).entity(ret).build();
+  }
+
+  @GET
   @Path("/apps/{appid}/queue")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public AppQueue getAppQueue(@Context HttpServletRequest hsr,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b469ac53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppPriority.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppPriority.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppPriority.java
new file mode 100644
index 0000000..d905635
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppPriority.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+@XmlRootElement(name = "applicationpriority")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class AppPriority {
+
+  private int priority;
+
+  public AppPriority() {
+  }
+
+  public AppPriority(int priority) {
+    this.priority = priority;
+  }
+
+  public void setPriority(int priority) {
+    this.priority = priority;
+  }
+
+  public int getPriority() {
+    return this.priority;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b469ac53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
index de4d116..df3c2bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
@@ -970,6 +970,92 @@ public class TestRMWebServicesAppsModification extends JerseyTestBase {
   }
 
   @Test(timeout = 90000)
+  public void testUpdateAppPriority() throws Exception {
+    client().addFilter(new LoggingFilter(System.out));
+
+    if (!(rm.getResourceScheduler() instanceof CapacityScheduler)) {
+      // till the fair scheduler modifications for priority is completed
+      return;
+    }
+
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+    Configuration conf = new Configuration();
+    conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
+    cs.setClusterMaxPriority(conf);
+
+    // default root queue allows anyone to have admin acl
+    CapacitySchedulerConfiguration csconf =
+        new CapacitySchedulerConfiguration();
+    String[] queues = { "default", "test" };
+    csconf.setQueues("root", queues);
+    csconf.setCapacity("root.default", 50.0f);
+    csconf.setCapacity("root.test", 50.0f);
+    csconf.setAcl("root", QueueACL.ADMINISTER_QUEUE, "someuser");
+    csconf.setAcl("root.default", QueueACL.ADMINISTER_QUEUE, "someuser");
+    csconf.setAcl("root.test", QueueACL.ADMINISTER_QUEUE, "someuser");
+    rm.getResourceScheduler().reinitialize(csconf, rm.getRMContext());
+
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    String[] mediaTypes =
+        { MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML };
+    MediaType[] contentTypes =
+        { MediaType.APPLICATION_JSON_TYPE, MediaType.APPLICATION_XML_TYPE };
+    for (String mediaType : mediaTypes) {
+      for (MediaType contentType : contentTypes) {
+        RMApp app = rm.submitApp(CONTAINER_MB, "", webserviceUserName);
+        amNodeManager.nodeHeartbeat(true);
+        int modifiedPriority = 8;
+        AppPriority priority = new AppPriority(modifiedPriority);
+        Object entity;
+        if (contentType.equals(MediaType.APPLICATION_JSON_TYPE)) {
+          entity = appPriorityToJSON(priority);
+        } else {
+          entity = priority;
+        }
+        ClientResponse response = this
+            .constructWebResource("apps", app.getApplicationId().toString(),
+                "priority")
+            .entity(entity, contentType).accept(mediaType)
+            .put(ClientResponse.class);
+
+        if (!isAuthenticationEnabled()) {
+          assertEquals(Status.UNAUTHORIZED, response.getClientResponseStatus());
+          continue;
+        }
+        assertEquals(Status.OK, response.getClientResponseStatus());
+        if (mediaType.equals(MediaType.APPLICATION_JSON)) {
+          verifyAppPriorityJson(response, modifiedPriority);
+        } else {
+          verifyAppPriorityXML(response, modifiedPriority);
+        }
+
+        response = this
+            .constructWebResource("apps", app.getApplicationId().toString(),
+                "priority")
+            .accept(mediaType).get(ClientResponse.class);
+        assertEquals(Status.OK, response.getClientResponseStatus());
+        if (mediaType.equals(MediaType.APPLICATION_JSON)) {
+          verifyAppPriorityJson(response, modifiedPriority);
+        } else {
+          verifyAppPriorityXML(response, modifiedPriority);
+        }
+
+        // check unauthorized
+        app = rm.submitApp(CONTAINER_MB, "", "someuser");
+        amNodeManager.nodeHeartbeat(true);
+        response = this
+            .constructWebResource("apps", app.getApplicationId().toString(),
+                "priority")
+            .entity(entity, contentType).accept(mediaType)
+            .put(ClientResponse.class);
+        assertEquals(Status.FORBIDDEN, response.getClientResponseStatus());
+      }
+    }
+    rm.stop();
+  }
+
+  @Test(timeout = 90000)
   public void testAppMove() throws Exception {
 
     client().addFilter(new LoggingFilter(System.out));
@@ -1049,6 +1135,15 @@ public class TestRMWebServicesAppsModification extends JerseyTestBase {
     rm.stop();
   }
 
+  protected static String appPriorityToJSON(AppPriority targetPriority)
+      throws Exception {
+    StringWriter sw = new StringWriter();
+    JSONJAXBContext ctx = new JSONJAXBContext(AppPriority.class);
+    JSONMarshaller jm = ctx.createJSONMarshaller();
+    jm.marshallToJSON(targetPriority, sw);
+    return sw.toString();
+  }
+
   protected static String appQueueToJSON(AppQueue targetQueue) throws Exception {
     StringWriter sw = new StringWriter();
     JSONJAXBContext ctx = new JSONJAXBContext(AppQueue.class);
@@ -1056,6 +1151,31 @@ public class TestRMWebServicesAppsModification extends JerseyTestBase {
     jm.marshallToJSON(targetQueue, sw);
     return sw.toString();
   }
+ protected static void verifyAppPriorityJson(ClientResponse response,
+      int expectedPriority) throws JSONException {
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    int responsePriority = json.getInt("priority");
+    assertEquals(expectedPriority, responsePriority);
+  }
+
+  protected static void verifyAppPriorityXML(ClientResponse response,
+      int expectedPriority)
+          throws ParserConfigurationException, IOException, SAXException {
+    assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+    String xml = response.getEntity(String.class);
+    DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+    DocumentBuilder db = dbf.newDocumentBuilder();
+    InputSource is = new InputSource();
+    is.setCharacterStream(new StringReader(xml));
+    Document dom = db.parse(is);
+    NodeList nodes = dom.getElementsByTagName("applicationpriority");
+    assertEquals("incorrect number of elements", 1, nodes.getLength());
+    Element element = (Element) nodes.item(0);
+    int responsePriority = WebServicesTestUtils.getXmlInt(element, "priority");
+    assertEquals(expectedPriority, responsePriority);
+  }
 
   protected static void
       verifyAppQueueJson(ClientResponse response, String queue)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b469ac53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index e16f8c1..f8b8061 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -30,6 +30,7 @@ ResourceManager REST API's.
 * [Cluster Applications API(Submit Application)](#Cluster_Applications_APISubmit_Application)
 * [Cluster Application State API](#Cluster_Application_State_API)
 * [Cluster Application Queue API](#Cluster_Application_Queue_API)
+* [Cluster Application Priority API](#Cluster_Application_Priority_API)
 * [Cluster Delegation Tokens API](#Cluster_Delegation_Tokens_API)
 
 Overview
@@ -2770,6 +2771,125 @@ Response Body:
       <queue>test</queue>
     </appqueue>
 
+Cluster Application Priority API
+-----------------------------
+
+With the application priority API, you can query the priority of a submitted app as well update priority of a running or accepted app using a PUT request specifying the target priority. To perform the PUT operation, authentication has to be setup for the RM web services. In addition, you must be authorized to update the app priority. Currently you can only update the app priority if you're using the Capacity scheduler.
+
+Please note that in order to update priority of an app, you must have an authentication filter setup for the HTTP interface. The functionality requires that a username is set in the HttpServletRequest. If no filter is setup, the response will be an "UNAUTHORIZED" response.
+
+This feature is currently in the alpha stage and may change in the future.
+
+### URI
+
+      * http://<rm http address:port>/ws/v1/cluster/apps/{appid}/priority
+
+### HTTP Operations Supported
+
+      * GET
+      * PUT
+
+### Query Parameters Supported
+
+      None
+
+### Elements of *apppriority* object
+
+When you make a request for the state of an app, the information returned has the following fields
+
+| Item | Data Type | Description |
+|:---- |:---- |:---- |
+| priority | int | The application priority |
+
+### Response Examples
+
+**JSON responses**
+
+HTTP Request
+
+      GET http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/priority
+
+Response Header:
+
+    HTTP/1.1 200 OK
+    Content-Type: application/json
+    Transfer-Encoding: chunked
+    Server: Jetty(6.1.26)
+
+Response Body:
+
+    {
+      "priority":0
+    }
+
+HTTP Request
+
+      PUT http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/priority
+
+Request Body:
+
+    {
+      "priority":8
+    }
+
+Response Header:
+
+    HTTP/1.1 200 OK
+    Content-Type: application/json
+    Transfer-Encoding: chunked
+    Server: Jetty(6.1.26)
+
+Response Body:
+
+    {
+      "priority":8
+    }
+
+**XML responses**
+
+HTTP Request
+
+      GET http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/priority
+
+Response Header:
+
+    HTTP/1.1 200 OK
+    Content-Type: application/xml
+    Content-Length: 98
+    Server: Jetty(6.1.26)
+
+Response Body:
+
+    <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+    <applicationpriority>
+      <priority>0</priority>
+    </applicationpriority>
+
+HTTP Request
+
+      PUT http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/priority
+
+Request Body:
+
+    <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+    <applicationpriority>
+      <priority>8</priority>
+    </applicationpriority>
+
+Response Header:
+
+    HTTP/1.1 200 OK
+    Content-Type: application/xml
+    Content-Length: 95
+    Server: Jetty(6.1.26)
+
+Response Body:
+
+    <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+    <applicationpriority>
+      <priority>8</priority>
+    </applicationpriority>
+
 Cluster Delegation Tokens API
 -----------------------------
 


[07/50] [abbrv] hadoop git commit: YARN-4092. Fixed UI redirection to print useful messages when both RMs are in standby mode. Contributed by Xuan Gong

Posted by vv...@apache.org.
YARN-4092. Fixed UI redirection to print useful messages when both RMs are in standby mode. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3fd2ccc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3fd2ccc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3fd2ccc

Branch: refs/heads/YARN-3926
Commit: a3fd2ccc869dfc1f04d1cf0a8678d4d90a43a80f
Parents: 826ae1c
Author: Jian He <ji...@apache.org>
Authored: Mon Aug 31 17:33:24 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Mon Aug 31 17:33:24 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../hadoop/yarn/client/TestRMFailover.java      | 27 ++++++
 .../hadoop/yarn/webapp/YarnWebParams.java       |  1 +
 .../resourcemanager/webapp/RMWebAppFilter.java  | 90 +++++++++++++++++++-
 4 files changed, 117 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3fd2ccc/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4201b4f..19c1082 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -422,6 +422,9 @@ Release 2.8.0 - UNRELEASED
     YARN-1556. NPE getting application report with a null appId. (Weiwei Yang via 
     junping_du)
 
+    YARN-4092. Fixed UI redirection to print useful messages when both RMs are
+    in standby mode. (Xuan Gong via jianhe)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3fd2ccc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index 0d03fd4..cbc220a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -27,6 +27,7 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
+
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.logging.Log;
@@ -45,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
 import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -265,6 +267,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
     getAdminService(0).transitionToActive(req);
     String rm1Url = "http://0.0.0.0:18088";
     String rm2Url = "http://0.0.0.0:28088";
+
     String redirectURL = getRedirectURL(rm2Url);
     // if uri is null, RMWebAppFilter will append a slash at the trail of the redirection url
     assertEquals(redirectURL,rm1Url+"/");
@@ -304,6 +307,17 @@ public class TestRMFailover extends ClientBaseWithFixes {
 
     redirectURL = getRedirectURL(rm2Url + "/proxy/" + fakeAppId);
     assertNull(redirectURL);
+
+    // transit the active RM to standby
+    // Both of RMs are in standby mode
+    getAdminService(0).transitionToStandby(req);
+    // RM2 is expected to send the httpRequest to itself.
+    // The Header Field: Refresh is expected to be set.
+    redirectURL = getRefreshURL(rm2Url);
+    assertTrue(redirectURL != null
+        && redirectURL.contains(YarnWebParams.NEXT_REFRESH_INTERVAL)
+        && redirectURL.contains(rm2Url));
+
   }
 
   // set up http connection with the given url and get the redirection url from the response
@@ -323,4 +337,17 @@ public class TestRMFailover extends ClientBaseWithFixes {
     return redirectUrl;
   }
 
+  static String getRefreshURL(String url) {
+    String redirectUrl = null;
+    try {
+      HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection();
+      // do not automatically follow the redirection
+      // otherwise we get too many redirections exception
+      conn.setInstanceFollowRedirects(false);
+      redirectUrl = conn.getHeaderField("Refresh");
+    } catch (Exception e) {
+      // throw new RuntimeException(e);
+    }
+    return redirectUrl;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3fd2ccc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
index 679e1d6..3792649 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
@@ -40,4 +40,5 @@ public interface YarnWebParams {
   String NODE_STATE = "node.state";
   String NODE_LABEL = "node.label";
   String WEB_UI_TYPE = "web.ui.type";
+  String NEXT_REFRESH_INTERVAL = "next.fresh.interval";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3fd2ccc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
index 500f17a..a8f793a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
@@ -20,6 +20,10 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
 import java.io.IOException;
 import java.io.PrintWriter;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Random;
 import java.util.Set;
 
 import javax.inject.Inject;
@@ -29,8 +33,11 @@ import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HtmlQuoting;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
 
 import com.google.common.collect.Sets;
 import com.google.inject.Injector;
@@ -48,11 +55,26 @@ public class RMWebAppFilter extends GuiceContainer {
   // define a set of URIs which do not need to do redirection
   private static final Set<String> NON_REDIRECTED_URIS = Sets.newHashSet(
       "/conf", "/stacks", "/logLevel", "/logs");
+  private String path;
+  private static final int BASIC_SLEEP_TIME = 5;
+  private static final int MAX_SLEEP_TIME = 5 * 60;
 
   @Inject
-  public RMWebAppFilter(Injector injector) {
+  public RMWebAppFilter(Injector injector, Configuration conf) {
     super(injector);
     this.injector=injector;
+    InetSocketAddress sock = YarnConfiguration.useHttps(conf)
+        ? conf.getSocketAddr(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS,
+            YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS,
+            YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT)
+        : conf.getSocketAddr(YarnConfiguration.RM_WEBAPP_ADDRESS,
+            YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS,
+            YarnConfiguration.DEFAULT_RM_WEBAPP_PORT);
+
+    path = sock.getHostName() + ":" + Integer.toString(sock.getPort());
+    path = YarnConfiguration.useHttps(conf)
+        ? "https://" + path
+        : "http://" + path;
   }
 
   @Override
@@ -69,9 +91,11 @@ public class RMWebAppFilter extends GuiceContainer {
     rmWebApp.checkIfStandbyRM();
     if (rmWebApp.isStandby()
         && shouldRedirect(rmWebApp, uri)) {
-      String redirectPath = rmWebApp.getRedirectPath() + uri;
+
+      String redirectPath = rmWebApp.getRedirectPath();
 
       if (redirectPath != null && !redirectPath.isEmpty()) {
+        redirectPath += uri;
         String redirectMsg =
             "This is standby RM. The redirect url is: " + redirectPath;
         PrintWriter out = response.getWriter();
@@ -79,11 +103,40 @@ public class RMWebAppFilter extends GuiceContainer {
         response.setHeader("Location", redirectPath);
         response.setStatus(HttpServletResponse.SC_TEMPORARY_REDIRECT);
         return;
+      } else {
+        boolean doRetry = true;
+        String retryIntervalStr =
+            request.getParameter(YarnWebParams.NEXT_REFRESH_INTERVAL);
+        int retryInterval = 0;
+        if (retryIntervalStr != null) {
+          try {
+            retryInterval = Integer.parseInt(retryIntervalStr.trim());
+          } catch (NumberFormatException ex) {
+            doRetry = false;
+          }
+        }
+        int next = calculateExponentialTime(retryInterval);
+
+        String redirectUrl =
+            appendOrReplaceParamter(path + uri,
+              YarnWebParams.NEXT_REFRESH_INTERVAL + "=" + (retryInterval + 1));
+        if (redirectUrl == null || next > MAX_SLEEP_TIME) {
+          doRetry = false;
+        }
+        String redirectMsg =
+            doRetry ? "Can not find any active RM. Will retry in next " + next
+                + " seconds." : "There is no active RM right now.";
+        PrintWriter out = response.getWriter();
+        out.println(redirectMsg);
+        if (doRetry) {
+          response.setHeader("Refresh", next + ";url=" + redirectUrl);
+          response.setStatus(HttpServletResponse.SC_TEMPORARY_REDIRECT);
+        }
       }
+      return;
     }
 
     super.doFilter(request, response, chain);
-
   }
 
   private boolean shouldRedirect(RMWebApp rmWebApp, String uri) {
@@ -92,4 +145,33 @@ public class RMWebAppFilter extends GuiceContainer {
         && !uri.startsWith(ProxyUriUtils.PROXY_BASE)
         && !NON_REDIRECTED_URIS.contains(uri);
   }
-}
+
+  private String appendOrReplaceParamter(String uri, String newQuery) {
+    if (uri.contains(YarnWebParams.NEXT_REFRESH_INTERVAL + "=")) {
+      return uri.replaceAll(YarnWebParams.NEXT_REFRESH_INTERVAL + "=[^&]+",
+        newQuery);
+    }
+    try {
+      URI oldUri = new URI(uri);
+      String appendQuery = oldUri.getQuery();
+      if (appendQuery == null) {
+        appendQuery = newQuery;
+      } else {
+        appendQuery += "&" + newQuery;
+      }
+
+      URI newUri =
+          new URI(oldUri.getScheme(), oldUri.getAuthority(), oldUri.getPath(),
+            appendQuery, oldUri.getFragment());
+
+      return newUri.toString();
+    } catch (URISyntaxException e) {
+      return null;
+    }
+  }
+
+  private static int calculateExponentialTime(int retries) {
+    long baseTime = BASIC_SLEEP_TIME * (1L << retries);
+    return (int) (baseTime * ((new Random()).nextDouble() + 0.5));
+  }
+}
\ No newline at end of file


[09/50] [abbrv] hadoop git commit: HDFS-8946. Improve choosing datanode storage for block placement. (yliu)

Posted by vv...@apache.org.
HDFS-8946. Improve choosing datanode storage for block placement. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fa41d9d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fa41d9d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fa41d9d

Branch: refs/heads/YARN-3926
Commit: 8fa41d9dd4b923bf4141f019414a1a8b079124c6
Parents: 4eaa7fd
Author: yliu <yl...@apache.org>
Authored: Tue Sep 1 08:52:50 2015 +0800
Committer: yliu <yl...@apache.org>
Committed: Tue Sep 1 08:52:50 2015 +0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../BlockPlacementPolicyDefault.java            | 147 ++++++-------------
 .../blockmanagement/DatanodeDescriptor.java     |  36 +++--
 .../blockmanagement/TestReplicationPolicy.java  |  26 +++-
 4 files changed, 93 insertions(+), 118 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fa41d9d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ef8fac5..6584c84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -870,6 +870,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8990. Move RemoteBlockReader to hdfs-client module.
     (Mingliang via wheat9)
 
+    HDFS-8946. Improve choosing datanode storage for block placement. (yliu)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fa41d9d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 6d7a765..f761150 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -26,12 +26,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
@@ -458,19 +455,18 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
         for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
             .entrySet().iterator(); iter.hasNext(); ) {
           Map.Entry<StorageType, Integer> entry = iter.next();
-          for (DatanodeStorageInfo localStorage : DFSUtil.shuffle(
-              localDatanode.getStorageInfos())) {
-            StorageType type = entry.getKey();
-            if (addIfIsGoodTarget(localStorage, excludedNodes, blocksize,
-                results, type) >= 0) {
-              int num = entry.getValue();
-              if (num == 1) {
-                iter.remove();
-              } else {
-                entry.setValue(num - 1);
-              }
-              return localStorage;
+          DatanodeStorageInfo localStorage = chooseStorage4Block(
+              localDatanode, blocksize, results, entry.getKey());
+          if (localStorage != null) {
+            // add node and related nodes to excludedNode
+            addToExcludedNodes(localDatanode, excludedNodes);
+            int num = entry.getValue();
+            if (num == 1) {
+              iter.remove();
+            } else {
+              entry.setValue(num - 1);
             }
+            return localStorage;
           }
         }
       } 
@@ -651,7 +647,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
                             boolean avoidStaleNodes,
                             EnumMap<StorageType, Integer> storageTypes)
                             throws NotEnoughReplicasException {
-      
+
     int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
         scope, excludedNodes);
     StringBuilder builder = null;
@@ -669,49 +665,39 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
           builder.append("\nNode ").append(NodeBase.getPath(chosenNode)).append(" [");
         }
         numOfAvailableNodes--;
-        if (!isGoodDatanode(chosenNode, maxNodesPerRack, considerLoad,
+        DatanodeStorageInfo storage = null;
+        if (isGoodDatanode(chosenNode, maxNodesPerRack, considerLoad,
             results, avoidStaleNodes)) {
-          if (LOG.isDebugEnabled()) {
-            builder.append("\n]");
-          }
-          badTarget = true;
-          continue;
-        }
-
-        final DatanodeStorageInfo[] storages = DFSUtil.shuffle(
-            chosenNode.getStorageInfos());
-        int i = 0;
-        boolean search = true;
-        for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
-            .entrySet().iterator(); search && iter.hasNext(); ) {
-          Map.Entry<StorageType, Integer> entry = iter.next();
-          for (i = 0; i < storages.length; i++) {
-            StorageType type = entry.getKey();
-            final int newExcludedNodes = addIfIsGoodTarget(storages[i],
-                excludedNodes, blocksize, results, type);
-            if (newExcludedNodes >= 0) {
+          for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
+              .entrySet().iterator(); iter.hasNext(); ) {
+            Map.Entry<StorageType, Integer> entry = iter.next();
+            storage = chooseStorage4Block(
+                chosenNode, blocksize, results, entry.getKey());
+            if (storage != null) {
               numOfReplicas--;
               if (firstChosen == null) {
-                firstChosen = storages[i];
+                firstChosen = storage;
               }
-              numOfAvailableNodes -= newExcludedNodes;
+              // add node and related nodes to excludedNode
+              numOfAvailableNodes -=
+                  addToExcludedNodes(chosenNode, excludedNodes);
               int num = entry.getValue();
               if (num == 1) {
                 iter.remove();
               } else {
                 entry.setValue(num - 1);
               }
-              search = false;
               break;
             }
           }
         }
+
         if (LOG.isDebugEnabled()) {
           builder.append("\n]");
         }
 
         // If no candidate storage was found on this DN then set badTarget.
-        badTarget = (i == storages.length);
+        badTarget = (storage == null);
       }
     }
       
@@ -740,32 +726,27 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   }
 
   /**
-   * If the given storage is a good target, add it to the result list and
-   * update the set of excluded nodes.
-   * @return -1 if the given is not a good target;
-   *         otherwise, return the number of nodes added to excludedNodes set.
+   * Choose a good storage of given storage type from datanode, and add it to
+   * the result list.
+   *
+   * @param dnd datanode descriptor
+   * @param blockSize requested block size
+   * @param results the result storages
+   * @param storageType requested storage type
+   * @return the chosen datanode storage
    */
-  int addIfIsGoodTarget(DatanodeStorageInfo storage,
-      Set<Node> excludedNodes,
+  DatanodeStorageInfo chooseStorage4Block(DatanodeDescriptor dnd,
       long blockSize,
       List<DatanodeStorageInfo> results,
       StorageType storageType) {
-    if (isGoodTarget(storage, blockSize, results, storageType)) {
+    DatanodeStorageInfo storage =
+        dnd.chooseStorage4Block(storageType, blockSize);
+    if (storage != null) {
       results.add(storage);
-      // add node and related nodes to excludedNode
-      return addToExcludedNodes(storage.getDatanodeDescriptor(), excludedNodes);
-    } else { 
-      return -1;
-    }
-  }
-
-  private static void logNodeIsNotChosen(DatanodeStorageInfo storage, String reason) {
-    if (LOG.isDebugEnabled()) {
-      // build the error message for later use.
-      debugLoggingBuilder.get()
-          .append("\n  Storage ").append(storage)
-          .append(" is not chosen since ").append(reason).append(".");
+    } else {
+      logNodeIsNotChosen(dnd, "no good storage to place the block ");
     }
+    return storage;
   }
 
   private static void logNodeIsNotChosen(DatanodeDescriptor node,
@@ -837,52 +818,6 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   }
 
   /**
-   * Determine if a storage is a good target.
-   *
-   * @param storage The target storage
-   * @param blockSize Size of block
-   * @param results A list containing currently chosen nodes. Used to check if
-   *                too many nodes has been chosen in the target rack.
-   * @return Return true if <i>node</i> has enough space.
-   */
-  private boolean isGoodTarget(DatanodeStorageInfo storage,
-                               long blockSize,
-                               List<DatanodeStorageInfo> results,
-                               StorageType requiredStorageType) {
-    if (storage.getStorageType() != requiredStorageType) {
-      logNodeIsNotChosen(storage, "storage types do not match,"
-          + " where the required storage type is " + requiredStorageType);
-      return false;
-    }
-    if (storage.getState() == State.READ_ONLY_SHARED) {
-      logNodeIsNotChosen(storage, "storage is read-only");
-      return false;
-    }
-
-    if (storage.getState() == State.FAILED) {
-      logNodeIsNotChosen(storage, "storage has failed");
-      return false;
-    }
-
-    DatanodeDescriptor node = storage.getDatanodeDescriptor();
-
-    final long requiredSize = blockSize * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE;
-    final long scheduledSize = blockSize * node.getBlocksScheduled(storage.getStorageType());
-    final long remaining = node.getRemaining(storage.getStorageType(),
-        requiredSize);
-    if (requiredSize > remaining - scheduledSize) {
-      logNodeIsNotChosen(storage, "the node does not have enough "
-          + storage.getStorageType() + " space"
-          + " (required=" + requiredSize
-          + ", scheduled=" + scheduledSize
-          + ", remaining=" + remaining + ")");
-      return false;
-    }
-
-    return true;
-  }
-
-  /**
    * Return a pipeline of nodes.
    * The pipeline is formed finding a shortest path that 
    * starts from the writer and traverses all <i>nodes</i>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fa41d9d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 7e3c59b..0b398c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -31,14 +31,15 @@ import java.util.Queue;
 import java.util.Set;
 
 import com.google.common.annotations.VisibleForTesting;
-
 import com.google.common.collect.ImmutableList;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -663,26 +664,39 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
 
   /**
-   * Return the sum of remaining spaces of the specified type. If the remaining
-   * space of a storage is less than minSize, it won't be counted toward the
-   * sum.
+   * Find whether the datanode contains good storage of given type to
+   * place block of size <code>blockSize</code>.
    *
-   * @param t The storage type. If null, the type is ignored.
-   * @param minSize The minimum free space required.
-   * @return the sum of remaining spaces that are bigger than minSize.
+   * <p>Currently datanode only cares about the storage type, in this
+   * method, the first storage of given type we see is returned.
+   *
+   * @param t requested storage type
+   * @param blockSize requested block size
+   * @return
    */
-  public long getRemaining(StorageType t, long minSize) {
+  public DatanodeStorageInfo chooseStorage4Block(StorageType t,
+      long blockSize) {
+    final long requiredSize =
+        blockSize * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE;
+    final long scheduledSize = blockSize * getBlocksScheduled(t);
     long remaining = 0;
+    DatanodeStorageInfo storage = null;
     for (DatanodeStorageInfo s : getStorageInfos()) {
       if (s.getState() == State.NORMAL &&
-          (t == null || s.getStorageType() == t)) {
+          s.getStorageType() == t) {
+        if (storage == null) {
+          storage = s;
+        }
         long r = s.getRemaining();
-        if (r >= minSize) {
+        if (r >= requiredSize) {
           remaining += r;
         }
       }
     }
-    return remaining;
+    if (requiredSize > remaining - scheduledSize) {
+      return null;
+    }
+    return storage;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fa41d9d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index cec33fe..27d647c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -181,7 +181,7 @@ public class TestReplicationPolicy {
    * considered.
    */
   @Test
-  public void testChooseNodeWithMultipleStorages() throws Exception {
+  public void testChooseNodeWithMultipleStorages1() throws Exception {
     updateHeartbeatWithUsage(dataNodes[5],
         2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
         (2*HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE)/3, 0L,
@@ -201,6 +201,30 @@ public class TestReplicationPolicy {
   }
 
   /**
+   * Test whether all storages on the datanode are considered while
+   * choosing target to place block.
+   */
+  @Test
+  public void testChooseNodeWithMultipleStorages2() throws Exception {
+    updateHeartbeatWithUsage(dataNodes[5],
+        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+        (2*HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE)/3, 0L,
+        0L, 0L, 0, 0);
+
+    updateHeartbeatForExtraStorage(
+        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+        HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L);
+
+    DatanodeStorageInfo[] targets;
+    targets = chooseTarget (1, dataNodes[5],
+        new ArrayList<DatanodeStorageInfo>(), null);
+    assertEquals(1, targets.length);
+    assertEquals(dataNodes[5], targets[0].getDatanodeDescriptor());
+
+    resetHeartbeatForStorages();
+  }
+
+  /**
    * In this testcase, client is dataNodes[0]. So the 1st replica should be
    * placed on dataNodes[0], the 2nd replica should be placed on 
    * different rack and third should be placed on different node


[38/50] [abbrv] hadoop git commit: HDFS-9002. Move o.a.h.hdfs.net/*Peer classes to hdfs-client. Contributed by Mingliang Liu.

Posted by vv...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index d921507..1e561cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.protocolPB;
 import com.google.common.collect.Lists;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
+import org.apache.hadoop.crypto.CipherOption;
+import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -264,4 +266,104 @@ public class PBHelperClient {
     assert size >= 0;
     return new ExactSizeInputStream(input, size);
   }
+
+  public static CipherOption convert(HdfsProtos.CipherOptionProto proto) {
+    if (proto != null) {
+      CipherSuite suite = null;
+      if (proto.getSuite() != null) {
+        suite = convert(proto.getSuite());
+      }
+      byte[] inKey = null;
+      if (proto.getInKey() != null) {
+        inKey = proto.getInKey().toByteArray();
+      }
+      byte[] inIv = null;
+      if (proto.getInIv() != null) {
+        inIv = proto.getInIv().toByteArray();
+      }
+      byte[] outKey = null;
+      if (proto.getOutKey() != null) {
+        outKey = proto.getOutKey().toByteArray();
+      }
+      byte[] outIv = null;
+      if (proto.getOutIv() != null) {
+        outIv = proto.getOutIv().toByteArray();
+      }
+      return new CipherOption(suite, inKey, inIv, outKey, outIv);
+    }
+    return null;
+  }
+
+  public static CipherSuite convert(HdfsProtos.CipherSuiteProto proto) {
+    switch (proto) {
+    case AES_CTR_NOPADDING:
+      return CipherSuite.AES_CTR_NOPADDING;
+    default:
+      // Set to UNKNOWN and stash the unknown enum value
+      CipherSuite suite = CipherSuite.UNKNOWN;
+      suite.setUnknownValue(proto.getNumber());
+      return suite;
+    }
+  }
+
+  public static HdfsProtos.CipherOptionProto convert(CipherOption option) {
+    if (option != null) {
+      HdfsProtos.CipherOptionProto.Builder builder = HdfsProtos.CipherOptionProto.
+          newBuilder();
+      if (option.getCipherSuite() != null) {
+        builder.setSuite(convert(option.getCipherSuite()));
+      }
+      if (option.getInKey() != null) {
+        builder.setInKey(ByteString.copyFrom(option.getInKey()));
+      }
+      if (option.getInIv() != null) {
+        builder.setInIv(ByteString.copyFrom(option.getInIv()));
+      }
+      if (option.getOutKey() != null) {
+        builder.setOutKey(ByteString.copyFrom(option.getOutKey()));
+      }
+      if (option.getOutIv() != null) {
+        builder.setOutIv(ByteString.copyFrom(option.getOutIv()));
+      }
+      return builder.build();
+    }
+    return null;
+  }
+
+  public static HdfsProtos.CipherSuiteProto convert(CipherSuite suite) {
+    switch (suite) {
+    case UNKNOWN:
+      return HdfsProtos.CipherSuiteProto.UNKNOWN;
+    case AES_CTR_NOPADDING:
+      return HdfsProtos.CipherSuiteProto.AES_CTR_NOPADDING;
+    default:
+      return null;
+    }
+  }
+
+  public static List<HdfsProtos.CipherOptionProto> convertCipherOptions(
+      List<CipherOption> options) {
+    if (options != null) {
+      List<HdfsProtos.CipherOptionProto> protos =
+          Lists.newArrayListWithCapacity(options.size());
+      for (CipherOption option : options) {
+        protos.add(convert(option));
+      }
+      return protos;
+    }
+    return null;
+  }
+
+  public static List<CipherOption> convertCipherOptionProtos(
+      List<HdfsProtos.CipherOptionProto> protos) {
+    if (protos != null) {
+      List<CipherOption> options =
+          Lists.newArrayListWithCapacity(protos.size());
+      for (HdfsProtos.CipherOptionProto proto : protos) {
+        options.add(convert(proto));
+      }
+      return options;
+    }
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 42460ed..ce3fbb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -883,6 +883,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8890. Allow admin to specify which blockpools the balancer should run
     on. (Chris Trezzo via mingma)
 
+    HDFS-9002. Move o.a.h.hdfs.net/*Peer classes to hdfs-client.
+    (Mingliang Liu via wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 268a5b9..95e9ad4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -99,7 +99,6 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
 import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -3018,7 +3017,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     try {
       sock = socketFactory.createSocket();
       NetUtils.connect(sock, addr, getRandomLocalInterfaceAddr(), socketTimeout);
-      peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
+      peer = DFSUtilClient.peerFromSocketAndKey(saslClient, sock, this,
           blockToken, datanodeId);
       peer.setReadTimeout(socketTimeout);
       peer.setWriteTimeout(socketTimeout);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 6420b55..84858f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -599,14 +599,28 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   // Security-related configs
   public static final String DFS_ENCRYPT_DATA_TRANSFER_KEY = "dfs.encrypt.data.transfer";
   public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;
-  public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY = "dfs.encrypt.data.transfer.cipher.key.bitlength";
-  public static final int    DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT = 128;
-  public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY = "dfs.encrypt.data.transfer.cipher.suites";
+  @Deprecated
+  public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY =
+      HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY;
+  @Deprecated
+  public static final int    DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT =
+      HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT;
+  @Deprecated
+  public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY =
+      HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
   public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm";
-  public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
-  public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";
-  public static final String DFS_DATA_TRANSFER_PROTECTION_DEFAULT = "";
-  public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class";
+  @Deprecated
+  public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS =
+      HdfsClientConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS;
+  @Deprecated
+  public static final String DFS_DATA_TRANSFER_PROTECTION_KEY =
+      HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+  @Deprecated
+  public static final String DFS_DATA_TRANSFER_PROTECTION_DEFAULT =
+      HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_DEFAULT;
+  @Deprecated
+  public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY =
+      HdfsClientConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY;
   public static final int    DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT = 100;
   public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses";
   public static final String DFS_ENCRYPTION_KEY_PROVIDER_URI =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
deleted file mode 100644
index a9f33e7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.net;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.Socket;
-import java.nio.channels.ReadableByteChannel;
-
-import org.apache.hadoop.net.unix.DomainSocket;
-
-/**
- * Represents a peer that we communicate with by using a basic Socket
- * that has no associated Channel.
- *
- */
-class BasicInetPeer implements Peer {
-  private final Socket socket;
-  private final OutputStream out;
-  private final InputStream in;
-  private final boolean isLocal;
-
-  public BasicInetPeer(Socket socket) throws IOException {
-    this.socket = socket;
-    this.out = socket.getOutputStream();
-    this.in = socket.getInputStream();
-    this.isLocal = socket.getInetAddress().equals(socket.getLocalAddress());
-  }
-
-  @Override
-  public ReadableByteChannel getInputStreamChannel() {
-    /*
-     * This Socket has no channel, so there's nothing to return here.
-     */
-    return null;
-  }
-
-  @Override
-  public void setReadTimeout(int timeoutMs) throws IOException {
-    socket.setSoTimeout(timeoutMs);
-  }
-
-  @Override
-  public int getReceiveBufferSize() throws IOException {
-    return socket.getReceiveBufferSize();
-  }
-
-  @Override
-  public boolean getTcpNoDelay() throws IOException {
-    return socket.getTcpNoDelay();
-  }
-
-  @Override
-  public void setWriteTimeout(int timeoutMs) {
-   /* 
-    * We can't implement write timeouts. :(
-    * 
-    * Java provides no facility to set a blocking write timeout on a Socket.
-    * You can simulate a blocking write with a timeout by using
-    * non-blocking I/O.  However, we can't use nio here, because this Socket
-    * doesn't have an associated Channel.
-    * 
-    * See http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4031100 for
-    * more details.
-    */
-  }
-
-  @Override
-  public boolean isClosed() {
-    return socket.isClosed();
-  }
-
-  @Override
-  public void close() throws IOException {
-    socket.close();
-  }
-
-  @Override
-  public String getRemoteAddressString() {
-    return socket.getRemoteSocketAddress().toString();
-  }
-
-  @Override
-  public String getLocalAddressString() {
-    return socket.getLocalSocketAddress().toString();
-  }
-  
-  @Override
-  public InputStream getInputStream() throws IOException {
-    return in;
-  }
-
-  @Override
-  public OutputStream getOutputStream() throws IOException {
-    return out;
-  }
-
-  @Override
-  public boolean isLocal() {
-    return isLocal;
-  }
-
-  @Override
-  public String toString() {
-    return "BasicInetPeer(" + socket.toString() + ")";
-  }
-
-  @Override
-  public DomainSocket getDomainSocket() {
-    return null;
-  }
-
-  @Override
-  public boolean hasSecureChannel() {
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
deleted file mode 100644
index da660c7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.net;
-
-import java.io.IOException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
-import org.apache.hadoop.net.unix.DomainSocket;
-
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.ReadableByteChannel;
-
-/**
- * Represents a peer that we communicate with by using an encrypted
- * communications medium.
- */
-@InterfaceAudience.Private
-public class EncryptedPeer implements Peer {
-  private final Peer enclosedPeer;
-
-  /**
-   * An encrypted InputStream.
-   */
-  private final InputStream in;
-  
-  /**
-   * An encrypted OutputStream.
-   */
-  private final OutputStream out;
-  
-  /**
-   * An encrypted ReadableByteChannel.
-   */
-  private final ReadableByteChannel channel;
-
-  public EncryptedPeer(Peer enclosedPeer, IOStreamPair ios) {
-    this.enclosedPeer = enclosedPeer;
-    this.in = ios.in;
-    this.out = ios.out;
-    this.channel = ios.in instanceof ReadableByteChannel ? 
-        (ReadableByteChannel)ios.in : null;
-  }
-
-  @Override
-  public ReadableByteChannel getInputStreamChannel() {
-    return channel;
-  }
-
-  @Override
-  public void setReadTimeout(int timeoutMs) throws IOException {
-    enclosedPeer.setReadTimeout(timeoutMs);
-  }
-
-  @Override
-  public int getReceiveBufferSize() throws IOException {
-    return enclosedPeer.getReceiveBufferSize();
-  }
-
-  @Override
-  public boolean getTcpNoDelay() throws IOException {
-    return enclosedPeer.getTcpNoDelay();
-  }
-
-  @Override
-  public void setWriteTimeout(int timeoutMs) throws IOException {
-    enclosedPeer.setWriteTimeout(timeoutMs);
-  }
-
-  @Override
-  public boolean isClosed() {
-    return enclosedPeer.isClosed();
-  }
-
-  @Override
-  public void close() throws IOException {
-    try {
-      in.close();
-    } finally {
-      try {
-        out.close();
-      } finally {
-        enclosedPeer.close();
-      }
-    }
-  }
-
-  @Override
-  public String getRemoteAddressString() {
-    return enclosedPeer.getRemoteAddressString();
-  }
-
-  @Override
-  public String getLocalAddressString() {
-    return enclosedPeer.getLocalAddressString();
-  }
-
-  @Override
-  public InputStream getInputStream() throws IOException {
-    return in;
-  }
-
-  @Override
-  public OutputStream getOutputStream() throws IOException {
-    return out;
-  }
-
-  @Override
-  public boolean isLocal() {
-    return enclosedPeer.isLocal();
-  }
-
-  @Override
-  public String toString() {
-    return "EncryptedPeer(" + enclosedPeer + ")";
-  }
-
-  @Override
-  public DomainSocket getDomainSocket() {
-    return enclosedPeer.getDomainSocket();
-  }
-
-  @Override
-  public boolean hasSecureChannel() {
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
deleted file mode 100644
index 5bb4f56..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.net;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.Socket;
-import java.nio.channels.ReadableByteChannel;
-
-import org.apache.hadoop.net.SocketInputStream;
-import org.apache.hadoop.net.SocketOutputStream;
-import org.apache.hadoop.net.unix.DomainSocket;
-
-/**
- * Represents a peer that we communicate with by using non-blocking I/O 
- * on a Socket.
- */
-class NioInetPeer implements Peer {
-  private final Socket socket;
-
-  /**
-   * An InputStream which simulates blocking I/O with timeouts using NIO.
-   */
-  private final SocketInputStream in;
-  
-  /**
-   * An OutputStream which simulates blocking I/O with timeouts using NIO.
-   */
-  private final SocketOutputStream out;
-
-  private final boolean isLocal;
-
-  NioInetPeer(Socket socket) throws IOException {
-    this.socket = socket;
-    this.in = new SocketInputStream(socket.getChannel(), 0);
-    this.out = new SocketOutputStream(socket.getChannel(), 0);
-    this.isLocal = socket.getInetAddress().equals(socket.getLocalAddress());
-  }
-
-  @Override
-  public ReadableByteChannel getInputStreamChannel() {
-    return in;
-  }
-
-  @Override
-  public void setReadTimeout(int timeoutMs) throws IOException {
-    in.setTimeout(timeoutMs);
-  }
-
-  @Override
-  public int getReceiveBufferSize() throws IOException {
-    return socket.getReceiveBufferSize();
-  }
-
-  @Override
-  public boolean getTcpNoDelay() throws IOException {
-    return socket.getTcpNoDelay();
-  }
-
-  @Override
-  public void setWriteTimeout(int timeoutMs) throws IOException {
-    out.setTimeout(timeoutMs);
-  }
-
-  @Override
-  public boolean isClosed() {
-    return socket.isClosed();
-  }
-
-  @Override
-  public void close() throws IOException {
-    // We always close the outermost streams-- in this case, 'in' and 'out'
-    // Closing either one of these will also close the Socket.
-    try {
-      in.close();
-    } finally {
-      out.close();
-    }
-  }
-
-  @Override
-  public String getRemoteAddressString() {
-    return socket.getRemoteSocketAddress().toString();
-  }
-
-  @Override
-  public String getLocalAddressString() {
-    return socket.getLocalSocketAddress().toString();
-  }
-
-  @Override
-  public InputStream getInputStream() throws IOException {
-    return in;
-  }
-
-  @Override
-  public OutputStream getOutputStream() throws IOException {
-    return out;
-  }
-
-  @Override
-  public boolean isLocal() {
-    return isLocal;
-  }
-
-  @Override
-  public String toString() {
-    return "NioInetPeer(" + socket.toString() + ")";
-  }
-
-  @Override
-  public DomainSocket getDomainSocket() {
-    return null;
-  }
-
-  @Override
-  public boolean hasSecureChannel() {
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
index 2a547e0..e31e46a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
@@ -20,22 +20,15 @@ package org.apache.hadoop.hdfs.net;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
-import java.net.Socket;
 import java.net.SocketTimeoutException;
 import java.nio.channels.ServerSocketChannel;
-import java.nio.channels.SocketChannel;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
-import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.security.token.Token;
 
 @InterfaceAudience.Private
 public class TcpPeerServer implements PeerServer {
@@ -43,60 +36,6 @@ public class TcpPeerServer implements PeerServer {
 
   private final ServerSocket serverSocket;
 
-  public static Peer peerFromSocket(Socket socket)
-      throws IOException {
-    Peer peer = null;
-    boolean success = false;
-    try {
-      // TCP_NODELAY is crucial here because of bad interactions between
-      // Nagle's Algorithm and Delayed ACKs. With connection keepalive
-      // between the client and DN, the conversation looks like:
-      //   1. Client -> DN: Read block X
-      //   2. DN -> Client: data for block X
-      //   3. Client -> DN: Status OK (successful read)
-      //   4. Client -> DN: Read block Y
-      // The fact that step #3 and #4 are both in the client->DN direction
-      // triggers Nagling. If the DN is using delayed ACKs, this results
-      // in a delay of 40ms or more.
-      //
-      // TCP_NODELAY disables nagling and thus avoids this performance
-      // disaster.
-      socket.setTcpNoDelay(true);
-      SocketChannel channel = socket.getChannel();
-      if (channel == null) {
-        peer = new BasicInetPeer(socket);
-      } else {
-        peer = new NioInetPeer(socket);
-      }
-      success = true;
-      return peer;
-    } finally {
-      if (!success) {
-        if (peer != null) peer.close();
-        socket.close();
-      }
-    }
-  }
-
-  public static Peer peerFromSocketAndKey(
-        SaslDataTransferClient saslClient, Socket s,
-        DataEncryptionKeyFactory keyFactory,
-        Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
-        throws IOException {
-    Peer peer = null;
-    boolean success = false;
-    try {
-      peer = peerFromSocket(s);
-      peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
-      success = true;
-      return peer;
-    } finally {
-      if (!success) {
-        IOUtils.cleanup(null, peer);
-      }
-    }
-  }
-
   /**
    * Create a non-secure TcpPeerServer.
    *
@@ -136,7 +75,7 @@ public class TcpPeerServer implements PeerServer {
 
   @Override
   public Peer accept() throws IOException, SocketTimeoutException {
-    Peer peer = peerFromSocket(serverSocket.accept());
+    Peer peer = DFSUtilClient.peerFromSocket(serverSocket.accept());
     return peer;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java
deleted file mode 100644
index 23407f8..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol.datatransfer;
-
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * A little struct class to wrap an InputStream and an OutputStream.
- */
-@InterfaceAudience.Private
-public class IOStreamPair {
-  public final InputStream in;
-  public final OutputStream out;
-  
-  public IOStreamPair(InputStream in, OutputStream out) {
-    this.in = in;
-    this.out = out;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
deleted file mode 100644
index 9e6a43d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol.datatransfer;
-
-import java.net.InetAddress;
-
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.util.ReflectionUtils;
-
-/**
- * Class used to indicate whether a channel is trusted or not.
- * The default implementation is to return false indicating that
- * the channel is not trusted.
- * This class can be overridden to provide custom logic to determine
- * whether a channel is trusted or not. 
- * The custom class can be specified via configuration.
- *
- */
-public class TrustedChannelResolver implements Configurable {
-  Configuration conf;
-
-  /**
-   * Returns an instance of TrustedChannelResolver.
-   * Looks up the configuration to see if there is custom class specified.
-   * @param conf
-   * @return TrustedChannelResolver
-   */
-  public static TrustedChannelResolver getInstance(Configuration conf) {
-    Class<? extends TrustedChannelResolver> clazz =
-      conf.getClass(
-          DFSConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS,
-          TrustedChannelResolver.class, TrustedChannelResolver.class);
-    return ReflectionUtils.newInstance(clazz, conf);
-  }
-
-  @Override
-  public void setConf(Configuration conf) {
-    this.conf = conf;
-  }
-
-  @Override
-  public Configuration getConf() {
-    return conf;
-  }
-
-  /**
-   * Return boolean value indicating whether a channel is trusted or not
-   * from a client's perspective.
-   * @return true if the channel is trusted and false otherwise.
-   */
-  public boolean isTrusted() {
-    return false;
-  }
-
-
-  /**
-   * Identify boolean value indicating whether a channel is trusted or not.
-   * @param peerAddress address of the peer
-   * @return true if the channel is trusted and false otherwise.
-   */
-  public boolean isTrusted(InetAddress peerAddress) {
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java
deleted file mode 100644
index 959cba0..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
-
-/**
- * Creates a new {@link DataEncryptionKey} on demand.
- */
-@InterfaceAudience.Private
-public interface DataEncryptionKeyFactory {
-
-  /**
-   * Creates a new DataEncryptionKey.
-   *
-   * @return DataEncryptionKey newly created
-   * @throws IOException for any error
-   */
-  DataEncryptionKey newDataEncryptionKey() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
deleted file mode 100644
index 852819f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
+++ /dev/null
@@ -1,519 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
-import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.InetAddress;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import javax.security.sasl.Sasl;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.crypto.CipherOption;
-import org.apache.hadoop.crypto.CipherSuite;
-import org.apache.hadoop.crypto.CryptoCodec;
-import org.apache.hadoop.crypto.CryptoInputStream;
-import org.apache.hadoop.crypto.CryptoOutputStream;
-import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
-import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto;
-import org.apache.hadoop.hdfs.protocolPB.PBHelper;
-import org.apache.hadoop.security.SaslPropertiesResolver;
-import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Charsets;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Maps;
-import com.google.common.net.InetAddresses;
-import com.google.protobuf.ByteString;
-
-/**
- * Utility methods implementing SASL negotiation for DataTransferProtocol.
- */
-@InterfaceAudience.Private
-public final class DataTransferSaslUtil {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-    DataTransferSaslUtil.class);
-
-  /**
-   * Delimiter for the three-part SASL username string.
-   */
-  public static final String NAME_DELIMITER = " ";
-
-  /**
-   * Sent by clients and validated by servers. We use a number that's unlikely
-   * to ever be sent as the value of the DATA_TRANSFER_VERSION.
-   */
-  public static final int SASL_TRANSFER_MAGIC_NUMBER = 0xDEADBEEF;
-
-  /**
-   * Checks that SASL negotiation has completed for the given participant, and
-   * the negotiated quality of protection is included in the given SASL
-   * properties and therefore acceptable.
-   *
-   * @param sasl participant to check
-   * @param saslProps properties of SASL negotiation
-   * @throws IOException for any error
-   */
-  public static void checkSaslComplete(SaslParticipant sasl,
-      Map<String, String> saslProps) throws IOException {
-    if (!sasl.isComplete()) {
-      throw new IOException("Failed to complete SASL handshake");
-    }
-    Set<String> requestedQop = ImmutableSet.copyOf(Arrays.asList(
-      saslProps.get(Sasl.QOP).split(",")));
-    String negotiatedQop = sasl.getNegotiatedQop();
-    LOG.debug("Verifying QOP, requested QOP = {}, negotiated QOP = {}",
-      requestedQop, negotiatedQop);
-    if (!requestedQop.contains(negotiatedQop)) {
-      throw new IOException(String.format("SASL handshake completed, but " +
-        "channel does not have acceptable quality of protection, " +
-        "requested = %s, negotiated = %s", requestedQop, negotiatedQop));
-    }
-  }
-  
-  /**
-   * Check whether requested SASL Qop contains privacy.
-   * 
-   * @param saslProps properties of SASL negotiation
-   * @return boolean true if privacy exists
-   */
-  public static boolean requestedQopContainsPrivacy(
-      Map<String, String> saslProps) {
-    Set<String> requestedQop = ImmutableSet.copyOf(Arrays.asList(
-        saslProps.get(Sasl.QOP).split(",")));
-    return requestedQop.contains("auth-conf");
-  }
-
-  /**
-   * Creates SASL properties required for an encrypted SASL negotiation.
-   *
-   * @param encryptionAlgorithm to use for SASL negotation
-   * @return properties of encrypted SASL negotiation
-   */
-  public static Map<String, String> createSaslPropertiesForEncryption(
-      String encryptionAlgorithm) {
-    Map<String, String> saslProps = Maps.newHashMapWithExpectedSize(3);
-    saslProps.put(Sasl.QOP, QualityOfProtection.PRIVACY.getSaslQop());
-    saslProps.put(Sasl.SERVER_AUTH, "true");
-    saslProps.put("com.sun.security.sasl.digest.cipher", encryptionAlgorithm);
-    return saslProps;
-  }
-
-  /**
-   * For an encrypted SASL negotiation, encodes an encryption key to a SASL
-   * password.
-   *
-   * @param encryptionKey to encode
-   * @return key encoded as SASL password
-   */
-  public static char[] encryptionKeyToPassword(byte[] encryptionKey) {
-    return new String(Base64.encodeBase64(encryptionKey, false), Charsets.UTF_8)
-      .toCharArray();
-  }
-
-  /**
-   * Returns InetAddress from peer.  The getRemoteAddressString has the form
-   * [host][/ip-address]:port.  The host may be missing.  The IP address (and
-   * preceding '/') may be missing.  The port preceded by ':' is always present.
-   *
-   * @param peer
-   * @return InetAddress from peer
-   */
-  public static InetAddress getPeerAddress(Peer peer) {
-    String remoteAddr = peer.getRemoteAddressString().split(":")[0];
-    int slashIdx = remoteAddr.indexOf('/');
-    return InetAddresses.forString(slashIdx != -1 ?
-        remoteAddr.substring(slashIdx + 1, remoteAddr.length()) :
-        remoteAddr);
-  }
-
-  /**
-   * Creates a SaslPropertiesResolver from the given configuration.  This method
-   * works by cloning the configuration, translating configuration properties
-   * specific to DataTransferProtocol to what SaslPropertiesResolver expects,
-   * and then delegating to SaslPropertiesResolver for initialization.  This
-   * method returns null if SASL protection has not been configured for
-   * DataTransferProtocol.
-   *
-   * @param conf configuration to read
-   * @return SaslPropertiesResolver for DataTransferProtocol, or null if not
-   *   configured
-   */
-  public static SaslPropertiesResolver getSaslPropertiesResolver(
-      Configuration conf) {
-    String qops = conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY);
-    if (qops == null || qops.isEmpty()) {
-      LOG.debug("DataTransferProtocol not using SaslPropertiesResolver, no " +
-        "QOP found in configuration for {}", DFS_DATA_TRANSFER_PROTECTION_KEY);
-      return null;
-    }
-    Configuration saslPropsResolverConf = new Configuration(conf);
-    saslPropsResolverConf.set(HADOOP_RPC_PROTECTION, qops);
-    Class<? extends SaslPropertiesResolver> resolverClass = conf.getClass(
-      HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
-      SaslPropertiesResolver.class, SaslPropertiesResolver.class);
-    resolverClass = conf.getClass(DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY,
-      resolverClass, SaslPropertiesResolver.class);
-    saslPropsResolverConf.setClass(HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
-      resolverClass, SaslPropertiesResolver.class);
-    SaslPropertiesResolver resolver = SaslPropertiesResolver.getInstance(
-      saslPropsResolverConf);
-    LOG.debug("DataTransferProtocol using SaslPropertiesResolver, configured " +
-      "QOP {} = {}, configured class {} = {}", DFS_DATA_TRANSFER_PROTECTION_KEY, qops, 
-      DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY, resolverClass);
-    return resolver;
-  }
-
-  /**
-   * Reads a SASL negotiation message.
-   *
-   * @param in stream to read
-   * @return bytes of SASL negotiation messsage
-   * @throws IOException for any error
-   */
-  public static byte[] readSaslMessage(InputStream in) throws IOException {
-    DataTransferEncryptorMessageProto proto =
-        DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in));
-    if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) {
-      throw new InvalidEncryptionKeyException(proto.getMessage());
-    } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) {
-      throw new IOException(proto.getMessage());
-    } else {
-      return proto.getPayload().toByteArray();
-    }
-  }
-  
-  /**
-   * Reads a SASL negotiation message and negotiation cipher options. 
-   * 
-   * @param in stream to read
-   * @param cipherOptions list to store negotiation cipher options
-   * @return byte[] SASL negotiation message
-   * @throws IOException for any error
-   */
-  public static byte[] readSaslMessageAndNegotiationCipherOptions(
-      InputStream in, List<CipherOption> cipherOptions) throws IOException {
-    DataTransferEncryptorMessageProto proto =
-        DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in));
-    if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) {
-      throw new InvalidEncryptionKeyException(proto.getMessage());
-    } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) {
-      throw new IOException(proto.getMessage());
-    } else {
-      List<CipherOptionProto> optionProtos = proto.getCipherOptionList();
-      if (optionProtos != null) {
-        for (CipherOptionProto optionProto : optionProtos) {
-          cipherOptions.add(PBHelper.convert(optionProto));
-        }
-      }
-      return proto.getPayload().toByteArray();
-    }
-  }
-  
-  /**
-   * Negotiate a cipher option which server supports.
-   * 
-   * @param conf the configuration
-   * @param options the cipher options which client supports
-   * @return CipherOption negotiated cipher option
-   */
-  public static CipherOption negotiateCipherOption(Configuration conf,
-      List<CipherOption> options) throws IOException {
-    // Negotiate cipher suites if configured.  Currently, the only supported
-    // cipher suite is AES/CTR/NoPadding, but the protocol allows multiple
-    // values for future expansion.
-    String cipherSuites = conf.get(DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY);
-    if (cipherSuites == null || cipherSuites.isEmpty()) {
-      return null;
-    }
-    if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) {
-      throw new IOException(String.format("Invalid cipher suite, %s=%s",
-          DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites));
-    }
-    if (options != null) {
-      for (CipherOption option : options) {
-        CipherSuite suite = option.getCipherSuite();
-        if (suite == CipherSuite.AES_CTR_NOPADDING) {
-          int keyLen = conf.getInt(
-              DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY,
-              DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT) / 8;
-          CryptoCodec codec = CryptoCodec.getInstance(conf, suite);
-          byte[] inKey = new byte[keyLen];
-          byte[] inIv = new byte[suite.getAlgorithmBlockSize()];
-          byte[] outKey = new byte[keyLen];
-          byte[] outIv = new byte[suite.getAlgorithmBlockSize()];
-          codec.generateSecureRandom(inKey);
-          codec.generateSecureRandom(inIv);
-          codec.generateSecureRandom(outKey);
-          codec.generateSecureRandom(outIv);
-          return new CipherOption(suite, inKey, inIv, outKey, outIv);
-        }
-      }
-    }
-    return null;
-  }
-  
-  /**
-   * Send SASL message and negotiated cipher option to client.
-   * 
-   * @param out stream to receive message
-   * @param payload to send
-   * @param option negotiated cipher option
-   * @throws IOException for any error
-   */
-  public static void sendSaslMessageAndNegotiatedCipherOption(
-      OutputStream out, byte[] payload, CipherOption option) 
-          throws IOException {
-    DataTransferEncryptorMessageProto.Builder builder =
-        DataTransferEncryptorMessageProto.newBuilder();
-    
-    builder.setStatus(DataTransferEncryptorStatus.SUCCESS);
-    if (payload != null) {
-      builder.setPayload(ByteString.copyFrom(payload));
-    }
-    if (option != null) {
-      builder.addCipherOption(PBHelper.convert(option));
-    }
-    
-    DataTransferEncryptorMessageProto proto = builder.build();
-    proto.writeDelimitedTo(out);
-    out.flush();
-  }
-  
-  /**
-   * Create IOStreamPair of {@link org.apache.hadoop.crypto.CryptoInputStream}
-   * and {@link org.apache.hadoop.crypto.CryptoOutputStream}
-   * 
-   * @param conf the configuration
-   * @param cipherOption negotiated cipher option
-   * @param out underlying output stream
-   * @param in underlying input stream
-   * @param isServer is server side
-   * @return IOStreamPair the stream pair
-   * @throws IOException for any error
-   */
-  public static IOStreamPair createStreamPair(Configuration conf,
-      CipherOption cipherOption, OutputStream out, InputStream in, 
-      boolean isServer) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Creating IOStreamPair of CryptoInputStream and " +
-          "CryptoOutputStream.");
-    }
-    CryptoCodec codec = CryptoCodec.getInstance(conf, 
-        cipherOption.getCipherSuite());
-    byte[] inKey = cipherOption.getInKey();
-    byte[] inIv = cipherOption.getInIv();
-    byte[] outKey = cipherOption.getOutKey();
-    byte[] outIv = cipherOption.getOutIv();
-    InputStream cIn = new CryptoInputStream(in, codec, 
-        isServer ? inKey : outKey, isServer ? inIv : outIv);
-    OutputStream cOut = new CryptoOutputStream(out, codec, 
-        isServer ? outKey : inKey, isServer ? outIv : inIv);
-    return new IOStreamPair(cIn, cOut);
-  }
-
-  /**
-   * Sends a SASL negotiation message indicating an error.
-   *
-   * @param out stream to receive message
-   * @param message to send
-   * @throws IOException for any error
-   */
-  public static void sendGenericSaslErrorMessage(OutputStream out,
-      String message) throws IOException {
-    sendSaslMessage(out, DataTransferEncryptorStatus.ERROR, null, message);
-  }
-
-  /**
-   * Sends a SASL negotiation message.
-   *
-   * @param out stream to receive message
-   * @param payload to send
-   * @throws IOException for any error
-   */
-  public static void sendSaslMessage(OutputStream out, byte[] payload)
-      throws IOException {
-    sendSaslMessage(out, DataTransferEncryptorStatus.SUCCESS, payload, null);
-  }
-  
-  /**
-   * Send a SASL negotiation message and negotiation cipher options to server.
-   * 
-   * @param out stream to receive message
-   * @param payload to send
-   * @param options cipher options to negotiate
-   * @throws IOException for any error
-   */
-  public static void sendSaslMessageAndNegotiationCipherOptions(
-      OutputStream out, byte[] payload, List<CipherOption> options)
-          throws IOException {
-    DataTransferEncryptorMessageProto.Builder builder =
-        DataTransferEncryptorMessageProto.newBuilder();
-    
-    builder.setStatus(DataTransferEncryptorStatus.SUCCESS);
-    if (payload != null) {
-      builder.setPayload(ByteString.copyFrom(payload));
-    }
-    if (options != null) {
-      builder.addAllCipherOption(PBHelper.convertCipherOptions(options));
-    }
-    
-    DataTransferEncryptorMessageProto proto = builder.build();
-    proto.writeDelimitedTo(out);
-    out.flush();
-  }
-  
-  /**
-   * Read SASL message and negotiated cipher option from server.
-   * 
-   * @param in stream to read
-   * @return SaslResponseWithNegotiatedCipherOption SASL message and 
-   * negotiated cipher option
-   * @throws IOException for any error
-   */
-  public static SaslResponseWithNegotiatedCipherOption
-      readSaslMessageAndNegotiatedCipherOption(InputStream in)
-          throws IOException {
-    DataTransferEncryptorMessageProto proto =
-        DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in));
-    if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) {
-      throw new InvalidEncryptionKeyException(proto.getMessage());
-    } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) {
-      throw new IOException(proto.getMessage());
-    } else {
-      byte[] response = proto.getPayload().toByteArray();
-      List<CipherOption> options = PBHelper.convertCipherOptionProtos(
-          proto.getCipherOptionList());
-      CipherOption option = null;
-      if (options != null && !options.isEmpty()) {
-        option = options.get(0);
-      }
-      return new SaslResponseWithNegotiatedCipherOption(response, option);
-    }
-  }
-  
-  /**
-   * Encrypt the key and iv of the negotiated cipher option.
-   * 
-   * @param option negotiated cipher option
-   * @param sasl SASL participant representing server
-   * @return CipherOption negotiated cipher option which contains the 
-   * encrypted key and iv
-   * @throws IOException for any error
-   */
-  public static CipherOption wrap(CipherOption option, SaslParticipant sasl) 
-      throws IOException {
-    if (option != null) {
-      byte[] inKey = option.getInKey();
-      if (inKey != null) {
-        inKey = sasl.wrap(inKey, 0, inKey.length);
-      }
-      byte[] outKey = option.getOutKey();
-      if (outKey != null) {
-        outKey = sasl.wrap(outKey, 0, outKey.length);
-      }
-      return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(),
-          outKey, option.getOutIv());
-    }
-    
-    return null;
-  }
-  
-  /**
-   * Decrypt the key and iv of the negotiated cipher option.
-   * 
-   * @param option negotiated cipher option
-   * @param sasl SASL participant representing client
-   * @return CipherOption negotiated cipher option which contains the 
-   * decrypted key and iv
-   * @throws IOException for any error
-   */
-  public static CipherOption unwrap(CipherOption option, SaslParticipant sasl)
-      throws IOException {
-    if (option != null) {
-      byte[] inKey = option.getInKey();
-      if (inKey != null) {
-        inKey = sasl.unwrap(inKey, 0, inKey.length);
-      }
-      byte[] outKey = option.getOutKey();
-      if (outKey != null) {
-        outKey = sasl.unwrap(outKey, 0, outKey.length);
-      }
-      return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(),
-          outKey, option.getOutIv());
-    }
-    
-    return null;
-  }
-
-  /**
-   * Sends a SASL negotiation message.
-   *
-   * @param out stream to receive message
-   * @param status negotiation status
-   * @param payload to send
-   * @param message to send
-   * @throws IOException for any error
-   */
-  public static void sendSaslMessage(OutputStream out,
-      DataTransferEncryptorStatus status, byte[] payload, String message)
-          throws IOException {
-    DataTransferEncryptorMessageProto.Builder builder =
-        DataTransferEncryptorMessageProto.newBuilder();
-    
-    builder.setStatus(status);
-    if (payload != null) {
-      builder.setPayload(ByteString.copyFrom(payload));
-    }
-    if (message != null) {
-      builder.setMessage(message);
-    }
-    
-    DataTransferEncryptorMessageProto proto = builder.build();
-    proto.writeDelimitedTo(out);
-    out.flush();
-  }
-
-  /**
-   * There is no reason to instantiate this class.
-   */
-  private DataTransferSaslUtil() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
deleted file mode 100644
index 00b131f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
+++ /dev/null
@@ -1,498 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
-import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.InetAddress;
-import java.net.Socket;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import javax.security.auth.callback.Callback;
-import javax.security.auth.callback.CallbackHandler;
-import javax.security.auth.callback.NameCallback;
-import javax.security.auth.callback.PasswordCallback;
-import javax.security.auth.callback.UnsupportedCallbackException;
-import javax.security.sasl.RealmCallback;
-import javax.security.sasl.RealmChoiceCallback;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.crypto.CipherOption;
-import org.apache.hadoop.crypto.CipherSuite;
-import org.apache.hadoop.hdfs.net.EncryptedPeer;
-import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
-import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
-import org.apache.hadoop.security.SaslPropertiesResolver;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Charsets;
-import com.google.common.collect.Lists;
-
-/**
- * Negotiates SASL for DataTransferProtocol on behalf of a client.  There are
- * two possible supported variants of SASL negotiation: either a general-purpose
- * negotiation supporting any quality of protection, or a specialized
- * negotiation that enforces privacy as the quality of protection using a
- * cryptographically strong encryption key.
- *
- * This class is used in both the HDFS client and the DataNode.  The DataNode
- * needs it, because it acts as a client to other DataNodes during write
- * pipelines and block transfers.
- */
-@InterfaceAudience.Private
-public class SaslDataTransferClient {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-    SaslDataTransferClient.class);
-
-  private final Configuration conf;
-  private final AtomicBoolean fallbackToSimpleAuth;
-  private final SaslPropertiesResolver saslPropsResolver;
-  private final TrustedChannelResolver trustedChannelResolver;
-
-  /**
-   * Creates a new SaslDataTransferClient.  This constructor is used in cases
-   * where it is not relevant to track if a secure client did a fallback to
-   * simple auth.  For intra-cluster connections between data nodes in the same
-   * cluster, we can assume that all run under the same security configuration.
-   *
-   * @param conf the configuration
-   * @param saslPropsResolver for determining properties of SASL negotiation
-   * @param trustedChannelResolver for identifying trusted connections that do
-   *   not require SASL negotiation
-   */
-  public SaslDataTransferClient(Configuration conf, 
-      SaslPropertiesResolver saslPropsResolver,
-      TrustedChannelResolver trustedChannelResolver) {
-    this(conf, saslPropsResolver, trustedChannelResolver, null);
-  }
-
-  /**
-   * Creates a new SaslDataTransferClient.
-   *
-   * @param conf the configuration
-   * @param saslPropsResolver for determining properties of SASL negotiation
-   * @param trustedChannelResolver for identifying trusted connections that do
-   *   not require SASL negotiation
-   * @param fallbackToSimpleAuth checked on each attempt at general SASL
-   *   handshake, if true forces use of simple auth
-   */
-  public SaslDataTransferClient(Configuration conf, 
-      SaslPropertiesResolver saslPropsResolver,
-      TrustedChannelResolver trustedChannelResolver,
-      AtomicBoolean fallbackToSimpleAuth) {
-    this.conf = conf;
-    this.fallbackToSimpleAuth = fallbackToSimpleAuth;
-    this.saslPropsResolver = saslPropsResolver;
-    this.trustedChannelResolver = trustedChannelResolver;
-  }
-
-  /**
-   * Sends client SASL negotiation for a newly allocated socket if required.
-   *
-   * @param socket connection socket
-   * @param underlyingOut connection output stream
-   * @param underlyingIn connection input stream
-   * @param encryptionKeyFactory for creation of an encryption key
-   * @param accessToken connection block access token
-   * @param datanodeId ID of destination DataNode
-   * @return new pair of streams, wrapped after SASL negotiation
-   * @throws IOException for any error
-   */
-  public IOStreamPair newSocketSend(Socket socket, OutputStream underlyingOut,
-      InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory,
-      Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
-      throws IOException {
-    // The encryption key factory only returns a key if encryption is enabled.
-    DataEncryptionKey encryptionKey = !trustedChannelResolver.isTrusted() ?
-      encryptionKeyFactory.newDataEncryptionKey() : null;
-    IOStreamPair ios = send(socket.getInetAddress(), underlyingOut,
-      underlyingIn, encryptionKey, accessToken, datanodeId);
-    return ios != null ? ios : new IOStreamPair(underlyingIn, underlyingOut);
-  }
-
-  /**
-   * Sends client SASL negotiation for a peer if required.
-   *
-   * @param peer connection peer
-   * @param encryptionKeyFactory for creation of an encryption key
-   * @param accessToken connection block access token
-   * @param datanodeId ID of destination DataNode
-   * @return new pair of streams, wrapped after SASL negotiation
-   * @throws IOException for any error
-   */
-  public Peer peerSend(Peer peer, DataEncryptionKeyFactory encryptionKeyFactory,
-      Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
-      throws IOException {
-    IOStreamPair ios = checkTrustAndSend(getPeerAddress(peer),
-      peer.getOutputStream(), peer.getInputStream(), encryptionKeyFactory,
-      accessToken, datanodeId);
-    // TODO: Consider renaming EncryptedPeer to SaslPeer.
-    return ios != null ? new EncryptedPeer(peer, ios) : peer;
-  }
-
-  /**
-   * Sends client SASL negotiation for a socket if required.
-   *
-   * @param socket connection socket
-   * @param underlyingOut connection output stream
-   * @param underlyingIn connection input stream
-   * @param encryptionKeyFactory for creation of an encryption key
-   * @param accessToken connection block access token
-   * @param datanodeId ID of destination DataNode
-   * @return new pair of streams, wrapped after SASL negotiation
-   * @throws IOException for any error
-   */
-  public IOStreamPair socketSend(Socket socket, OutputStream underlyingOut,
-      InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory,
-      Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
-      throws IOException {
-    IOStreamPair ios = checkTrustAndSend(socket.getInetAddress(), underlyingOut,
-      underlyingIn, encryptionKeyFactory, accessToken, datanodeId);
-    return ios != null ? ios : new IOStreamPair(underlyingIn, underlyingOut);
-  }
-
-  /**
-   * Checks if an address is already trusted and then sends client SASL
-   * negotiation if required.
-   *
-   * @param addr connection address
-   * @param underlyingOut connection output stream
-   * @param underlyingIn connection input stream
-   * @param encryptionKeyFactory for creation of an encryption key
-   * @param accessToken connection block access token
-   * @param datanodeId ID of destination DataNode
-   * @return new pair of streams, wrapped after SASL negotiation
-   * @throws IOException for any error
-   */
-  private IOStreamPair checkTrustAndSend(InetAddress addr,
-      OutputStream underlyingOut, InputStream underlyingIn,
-      DataEncryptionKeyFactory encryptionKeyFactory,
-      Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
-      throws IOException {
-    if (!trustedChannelResolver.isTrusted() &&
-        !trustedChannelResolver.isTrusted(addr)) {
-      // The encryption key factory only returns a key if encryption is enabled.
-      DataEncryptionKey encryptionKey =
-        encryptionKeyFactory.newDataEncryptionKey();
-      return send(addr, underlyingOut, underlyingIn, encryptionKey, accessToken,
-        datanodeId);
-    } else {
-      LOG.debug(
-        "SASL client skipping handshake on trusted connection for addr = {}, "
-        + "datanodeId = {}", addr, datanodeId);
-      return null;
-    }
-  }
-
-  /**
-   * Sends client SASL negotiation if required.  Determines the correct type of
-   * SASL handshake based on configuration.
-   *
-   * @param addr connection address
-   * @param underlyingOut connection output stream
-   * @param underlyingIn connection input stream
-   * @param encryptionKey for an encrypted SASL handshake
-   * @param accessToken connection block access token
-   * @param datanodeId ID of destination DataNode
-   * @return new pair of streams, wrapped after SASL negotiation
-   * @throws IOException for any error
-   */
-  private IOStreamPair send(InetAddress addr, OutputStream underlyingOut,
-      InputStream underlyingIn, DataEncryptionKey encryptionKey,
-      Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
-      throws IOException {
-    if (encryptionKey != null) {
-      LOG.debug(
-        "SASL client doing encrypted handshake for addr = {}, datanodeId = {}",
-        addr, datanodeId);
-      return getEncryptedStreams(underlyingOut, underlyingIn,
-        encryptionKey);
-    } else if (!UserGroupInformation.isSecurityEnabled()) {
-      LOG.debug(
-        "SASL client skipping handshake in unsecured configuration for "
-        + "addr = {}, datanodeId = {}", addr, datanodeId);
-      return null;
-    } else if (SecurityUtil.isPrivilegedPort(datanodeId.getXferPort())) {
-      LOG.debug(
-        "SASL client skipping handshake in secured configuration with "
-        + "privileged port for addr = {}, datanodeId = {}", addr, datanodeId);
-      return null;
-    } else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) {
-      LOG.debug(
-        "SASL client skipping handshake in secured configuration with "
-        + "unsecured cluster for addr = {}, datanodeId = {}", addr, datanodeId);
-      return null;
-    } else if (saslPropsResolver != null) {
-      LOG.debug(
-        "SASL client doing general handshake for addr = {}, datanodeId = {}",
-        addr, datanodeId);
-      return getSaslStreams(addr, underlyingOut, underlyingIn, accessToken,
-        datanodeId);
-    } else {
-      // It's a secured cluster using non-privileged ports, but no SASL.  The
-      // only way this can happen is if the DataNode has
-      // ignore.secure.ports.for.testing configured, so this is a rare edge case.
-      LOG.debug(
-        "SASL client skipping handshake in secured configuration with no SASL "
-        + "protection configured for addr = {}, datanodeId = {}",
-        addr, datanodeId);
-      return null;
-    }
-  }
-
-  /**
-   * Sends client SASL negotiation for specialized encrypted handshake.
-   *
-   * @param underlyingOut connection output stream
-   * @param underlyingIn connection input stream
-   * @param encryptionKey for an encrypted SASL handshake
-   * @return new pair of streams, wrapped after SASL negotiation
-   * @throws IOException for any error
-   */
-  private IOStreamPair getEncryptedStreams(OutputStream underlyingOut,
-      InputStream underlyingIn, DataEncryptionKey encryptionKey)
-      throws IOException {
-    Map<String, String> saslProps = createSaslPropertiesForEncryption(
-      encryptionKey.encryptionAlgorithm);
-
-    LOG.debug("Client using encryption algorithm {}",
-      encryptionKey.encryptionAlgorithm);
-
-    String userName = getUserNameFromEncryptionKey(encryptionKey);
-    char[] password = encryptionKeyToPassword(encryptionKey.encryptionKey);
-    CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName,
-      password);
-    return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps,
-      callbackHandler);
-  }
-
-  /**
-   * The SASL username for an encrypted handshake consists of the keyId,
-   * blockPoolId, and nonce with the first two encoded as Strings, and the third
-   * encoded using Base64. The fields are each separated by a single space.
-   * 
-   * @param encryptionKey the encryption key to encode as a SASL username.
-   * @return encoded username containing keyId, blockPoolId, and nonce
-   */
-  private static String getUserNameFromEncryptionKey(
-      DataEncryptionKey encryptionKey) {
-    return encryptionKey.keyId + NAME_DELIMITER +
-        encryptionKey.blockPoolId + NAME_DELIMITER +
-        new String(Base64.encodeBase64(encryptionKey.nonce, false), Charsets.UTF_8);
-  }
-
-  /**
-   * Sets user name and password when asked by the client-side SASL object.
-   */
-  private static final class SaslClientCallbackHandler
-      implements CallbackHandler {
-
-    private final char[] password;
-    private final String userName;
-
-    /**
-     * Creates a new SaslClientCallbackHandler.
-     *
-     * @param userName SASL user name
-     * @Param password SASL password
-     */
-    public SaslClientCallbackHandler(String userName, char[] password) {
-      this.password = password;
-      this.userName = userName;
-    }
-
-    @Override
-    public void handle(Callback[] callbacks) throws IOException,
-        UnsupportedCallbackException {
-      NameCallback nc = null;
-      PasswordCallback pc = null;
-      RealmCallback rc = null;
-      for (Callback callback : callbacks) {
-        if (callback instanceof RealmChoiceCallback) {
-          continue;
-        } else if (callback instanceof NameCallback) {
-          nc = (NameCallback) callback;
-        } else if (callback instanceof PasswordCallback) {
-          pc = (PasswordCallback) callback;
-        } else if (callback instanceof RealmCallback) {
-          rc = (RealmCallback) callback;
-        } else {
-          throw new UnsupportedCallbackException(callback,
-              "Unrecognized SASL client callback");
-        }
-      }
-      if (nc != null) {
-        nc.setName(userName);
-      }
-      if (pc != null) {
-        pc.setPassword(password);
-      }
-      if (rc != null) {
-        rc.setText(rc.getDefaultText());
-      }
-    }
-  }
-
-  /**
-   * Sends client SASL negotiation for general-purpose handshake.
-   *
-   * @param addr connection address
-   * @param underlyingOut connection output stream
-   * @param underlyingIn connection input stream
-   * @param accessToken connection block access token
-   * @param datanodeId ID of destination DataNode
-   * @return new pair of streams, wrapped after SASL negotiation
-   * @throws IOException for any error
-   */
-  private IOStreamPair getSaslStreams(InetAddress addr,
-      OutputStream underlyingOut, InputStream underlyingIn,
-      Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
-      throws IOException {
-    Map<String, String> saslProps = saslPropsResolver.getClientProperties(addr);
-
-    String userName = buildUserName(accessToken);
-    char[] password = buildClientPassword(accessToken);
-    CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName,
-      password);
-    return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps,
-      callbackHandler);
-  }
-
-  /**
-   * Builds the client's user name for the general-purpose handshake, consisting
-   * of the base64-encoded serialized block access token identifier.  Note that
-   * this includes only the token identifier, not the token itself, which would
-   * include the password.  The password is a shared secret, and we must not
-   * write it on the network during the SASL authentication exchange.
-   *
-   * @param blockToken for block access
-   * @return SASL user name
-   */
-  private static String buildUserName(Token<BlockTokenIdentifier> blockToken) {
-    return new String(Base64.encodeBase64(blockToken.getIdentifier(), false),
-      Charsets.UTF_8);
-  }
-
-  /**
-   * Calculates the password on the client side for the general-purpose
-   * handshake.  The password consists of the block access token's password.
-   *
-   * @param blockToken for block access
-   * @return SASL password
-   */    
-  private char[] buildClientPassword(Token<BlockTokenIdentifier> blockToken) {
-    return new String(Base64.encodeBase64(blockToken.getPassword(), false),
-      Charsets.UTF_8).toCharArray();
-  }
-
-  /**
-   * This method actually executes the client-side SASL handshake.
-   *
-   * @param underlyingOut connection output stream
-   * @param underlyingIn connection input stream
-   * @param userName SASL user name
-   * @param saslProps properties of SASL negotiation
-   * @param callbackHandler for responding to SASL callbacks
-   * @return new pair of streams, wrapped after SASL negotiation
-   * @throws IOException for any error
-   */
-  private IOStreamPair doSaslHandshake(OutputStream underlyingOut,
-      InputStream underlyingIn, String userName, Map<String, String> saslProps,
-      CallbackHandler callbackHandler) throws IOException {
-
-    DataOutputStream out = new DataOutputStream(underlyingOut);
-    DataInputStream in = new DataInputStream(underlyingIn);
-
-    SaslParticipant sasl= SaslParticipant.createClientSaslParticipant(userName,
-      saslProps, callbackHandler);
-
-    out.writeInt(SASL_TRANSFER_MAGIC_NUMBER);
-    out.flush();
-
-    try {
-      // Start of handshake - "initial response" in SASL terminology.
-      sendSaslMessage(out, new byte[0]);
-
-      // step 1
-      byte[] remoteResponse = readSaslMessage(in);
-      byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
-      List<CipherOption> cipherOptions = null;
-      if (requestedQopContainsPrivacy(saslProps)) {
-        // Negotiate cipher suites if configured.  Currently, the only supported
-        // cipher suite is AES/CTR/NoPadding, but the protocol allows multiple
-        // values for future expansion.
-        String cipherSuites = conf.get(
-            DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY);
-        if (cipherSuites != null && !cipherSuites.isEmpty()) {
-          if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) {
-            throw new IOException(String.format("Invalid cipher suite, %s=%s",
-                DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites));
-          }
-          CipherOption option = new CipherOption(CipherSuite.AES_CTR_NOPADDING);
-          cipherOptions = Lists.newArrayListWithCapacity(1);
-          cipherOptions.add(option);
-        }
-      }
-      sendSaslMessageAndNegotiationCipherOptions(out, localResponse, 
-          cipherOptions);
-
-      // step 2 (client-side only)
-      SaslResponseWithNegotiatedCipherOption response = 
-          readSaslMessageAndNegotiatedCipherOption(in);
-      localResponse = sasl.evaluateChallengeOrResponse(response.payload);
-      assert localResponse == null;
-
-      // SASL handshake is complete
-      checkSaslComplete(sasl, saslProps);
-
-      CipherOption cipherOption = null;
-      if (sasl.isNegotiatedQopPrivacy()) {
-        // Unwrap the negotiated cipher option
-        cipherOption = unwrap(response.cipherOption, sasl);
-      }
-
-      // If negotiated cipher option is not null, we will use it to create 
-      // stream pair.
-      return cipherOption != null ? createStreamPair(
-          conf, cipherOption, underlyingOut, underlyingIn, false) : 
-            sasl.createStreamPair(out, in);
-    } catch (IOException ioe) {
-      sendGenericSaslErrorMessage(out, ioe.getMessage());
-      throw ioe;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
index f060beb..95965b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
 
 import java.io.ByteArrayInputStream;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java
deleted file mode 100644
index f14a075..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.util.Map;
-import javax.security.auth.callback.CallbackHandler;
-import javax.security.sasl.Sasl;
-import javax.security.sasl.SaslClient;
-import javax.security.sasl.SaslException;
-import javax.security.sasl.SaslServer;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
-import org.apache.hadoop.security.SaslInputStream;
-import org.apache.hadoop.security.SaslOutputStream;
-
-/**
- * Strongly inspired by Thrift's TSaslTransport class.
- *
- * Used to abstract over the <code>SaslServer</code> and
- * <code>SaslClient</code> classes, which share a lot of their interface, but
- * unfortunately don't share a common superclass.
- */
-@InterfaceAudience.Private
-class SaslParticipant {
-
-  // This has to be set as part of the SASL spec, but it don't matter for
-  // our purposes, but may not be empty. It's sent over the wire, so use
-  // a short string.
-  private static final String SERVER_NAME = "0";
-  private static final String PROTOCOL = "hdfs";
-  private static final String MECHANISM = "DIGEST-MD5";
-
-  // One of these will always be null.
-  private final SaslServer saslServer;
-  private final SaslClient saslClient;
-
-  /**
-   * Creates a SaslParticipant wrapping a SaslServer.
-   *
-   * @param saslProps properties of SASL negotiation
-   * @param callbackHandler for handling all SASL callbacks
-   * @return SaslParticipant wrapping SaslServer
-   * @throws SaslException for any error
-   */
-  public static SaslParticipant createServerSaslParticipant(
-      Map<String, String> saslProps, CallbackHandler callbackHandler)
-      throws SaslException {
-    return new SaslParticipant(Sasl.createSaslServer(MECHANISM,
-      PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
-  }
-
-  /**
-   * Creates a SaslParticipant wrapping a SaslClient.
-   *
-   * @param userName SASL user name
-   * @param saslProps properties of SASL negotiation
-   * @param callbackHandler for handling all SASL callbacks
-   * @return SaslParticipant wrapping SaslClient
-   * @throws SaslException for any error
-   */
-  public static SaslParticipant createClientSaslParticipant(String userName,
-      Map<String, String> saslProps, CallbackHandler callbackHandler)
-      throws SaslException {
-    return new SaslParticipant(Sasl.createSaslClient(new String[] { MECHANISM },
-      userName, PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
-  }
-
-  /**
-   * Private constructor wrapping a SaslServer.
-   *
-   * @param saslServer to wrap
-   */
-  private SaslParticipant(SaslServer saslServer) {
-    this.saslServer = saslServer;
-    this.saslClient = null;
-  }
-
-  /**
-   * Private constructor wrapping a SaslClient.
-   *
-   * @param saslClient to wrap
-   */
-  private SaslParticipant(SaslClient saslClient) {
-    this.saslServer = null;
-    this.saslClient = saslClient;
-  }
-
-  /**
-   * @see {@link SaslServer#evaluateResponse}
-   * @see {@link SaslClient#evaluateChallenge}
-   */
-  public byte[] evaluateChallengeOrResponse(byte[] challengeOrResponse)
-      throws SaslException {
-    if (saslClient != null) {
-      return saslClient.evaluateChallenge(challengeOrResponse);
-    } else {
-      return saslServer.evaluateResponse(challengeOrResponse);
-    }
-  }
-
-  /**
-   * After successful SASL negotation, returns the negotiated quality of
-   * protection.
-   *
-   * @return negotiated quality of protection
-   */
-  public String getNegotiatedQop() {
-    if (saslClient != null) {
-      return (String) saslClient.getNegotiatedProperty(Sasl.QOP);
-    } else {
-      return (String) saslServer.getNegotiatedProperty(Sasl.QOP);
-    }
-  }
-  
-  /**
-   * After successful SASL negotiation, returns whether it's QOP privacy
-   * 
-   * @return boolean whether it's QOP privacy
-   */
-  public boolean isNegotiatedQopPrivacy() {
-    String qop = getNegotiatedQop();
-    return qop != null && "auth-conf".equalsIgnoreCase(qop);
-  }
-  
-  /**
-   * Wraps a byte array.
-   * 
-   * @param bytes The array containing the bytes to wrap.
-   * @param off The starting position at the array
-   * @param len The number of bytes to wrap
-   * @return byte[] wrapped bytes
-   * @throws SaslException if the bytes cannot be successfully wrapped
-   */
-  public byte[] wrap(byte[] bytes, int off, int len) throws SaslException {
-    if (saslClient != null) {
-      return saslClient.wrap(bytes, off, len);
-    } else {
-      return saslServer.wrap(bytes, off, len);
-    }
-  }
-  
-  /**
-   * Unwraps a byte array.
-   * 
-   * @param bytes The array containing the bytes to unwrap.
-   * @param off The starting position at the array
-   * @param len The number of bytes to unwrap
-   * @return byte[] unwrapped bytes
-   * @throws SaslException if the bytes cannot be successfully unwrapped
-   */
-  public byte[] unwrap(byte[] bytes, int off, int len) throws SaslException {
-    if (saslClient != null) {
-      return saslClient.unwrap(bytes, off, len);
-    } else {
-      return saslServer.unwrap(bytes, off, len);
-    }
-  }
-
-  /**
-   * Returns true if SASL negotiation is complete.
-   *
-   * @return true if SASL negotiation is complete
-   */
-  public boolean isComplete() {
-    if (saslClient != null) {
-      return saslClient.isComplete();
-    } else {
-      return saslServer.isComplete();
-    }
-  }
-
-  /**
-   * Return some input/output streams that may henceforth have their
-   * communication encrypted, depending on the negotiated quality of protection.
-   *
-   * @param out output stream to wrap
-   * @param in input stream to wrap
-   * @return IOStreamPair wrapping the streams
-   */
-  public IOStreamPair createStreamPair(DataOutputStream out,
-      DataInputStream in) {
-    if (saslClient != null) {
-      return new IOStreamPair(
-          new SaslInputStream(in, saslClient),
-          new SaslOutputStream(out, saslClient));
-    } else {
-      return new IOStreamPair(
-          new SaslInputStream(in, saslServer),
-          new SaslOutputStream(out, saslServer));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslResponseWithNegotiatedCipherOption.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslResponseWithNegotiatedCipherOption.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslResponseWithNegotiatedCipherOption.java
deleted file mode 100644
index f69441b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslResponseWithNegotiatedCipherOption.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.crypto.CipherOption;
-
-@InterfaceAudience.Private
-public class SaslResponseWithNegotiatedCipherOption {
-  final byte[] payload;
-  final CipherOption cipherOption;
-  
-  public SaslResponseWithNegotiatedCipherOption(byte[] payload, 
-      CipherOption cipherOption) {
-    this.payload = payload;
-    this.cipherOption = cipherOption;
-  }
-}
\ No newline at end of file


[10/50] [abbrv] hadoop git commit: HDFS-8965. Harden edit log reading code against out of memory errors (cmccabe)

Posted by vv...@apache.org.
HDFS-8965. Harden edit log reading code against out of memory errors (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24f6a7c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24f6a7c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24f6a7c9

Branch: refs/heads/YARN-3926
Commit: 24f6a7c9563757234f53ca23e12f9c9208b53082
Parents: 8fa41d9
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Mon Aug 31 17:31:29 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Mon Aug 31 18:06:30 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../bkjournal/BookKeeperEditLogInputStream.java |   2 +-
 .../hadoop/hdfs/protocol/LayoutVersion.java     |   2 +-
 .../namenode/EditLogBackupInputStream.java      |   2 +-
 .../server/namenode/EditLogFileInputStream.java |   2 +-
 .../hdfs/server/namenode/FSEditLogOp.java       | 354 +++++++++++++------
 .../hdfs/server/namenode/TestEditLog.java       |   2 +-
 .../namenode/TestEditLogFileInputStream.java    |  80 +++++
 8 files changed, 341 insertions(+), 105 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f6a7c9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6584c84..57ddcb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -872,6 +872,8 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8946. Improve choosing datanode storage for block placement. (yliu)
 
+    HDFS-8965. Harden edit log reading code against out of memory errors (cmccabe)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f6a7c9/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
index e2098dd..86da807 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
@@ -83,7 +83,7 @@ class BookKeeperEditLogInputStream extends EditLogInputStream {
     tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
     DataInputStream in = new DataInputStream(tracker);
 
-    reader = new FSEditLogOp.Reader(in, tracker, logVersion);
+    reader = FSEditLogOp.Reader.create(in, tracker, logVersion);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f6a7c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
index c893744..1750790 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
@@ -87,7 +87,7 @@ public class LayoutVersion {
     FSIMAGE_COMPRESSION(-25, "Support for fsimage compression"),
     FSIMAGE_CHECKSUM(-26, "Support checksum for fsimage"),
     REMOVE_REL13_DISK_LAYOUT_SUPPORT(-27, "Remove support for 0.13 disk layout"),
-    EDITS_CHESKUM(-28, "Support checksum for editlog"),
+    EDITS_CHECKSUM(-28, "Support checksum for editlog"),
     UNUSED(-29, "Skipped version"),
     FSIMAGE_NAME_OPTIMIZATION(-30, "Store only last part of path in fsimage"),
     RESERVED_REL20_203(-31, -19, "Reserved for release 0.20.203", true,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f6a7c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
index 689cacc..81d285a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
@@ -119,7 +119,7 @@ class EditLogBackupInputStream extends EditLogInputStream {
 
     this.version = version;
 
-    reader = new FSEditLogOp.Reader(in, tracker, version);
+    reader = FSEditLogOp.Reader.create(in, tracker, version);
   }
 
   void clear() throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f6a7c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
index 3e21c24..73a162e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
@@ -157,7 +157,7 @@ public class EditLogFileInputStream extends EditLogInputStream {
               "flags from log");
         }
       }
-      reader = new FSEditLogOp.Reader(dataIn, tracker, logVersion);
+      reader = FSEditLogOp.Reader.create(dataIn, tracker, logVersion);
       reader.setMaxOpSize(maxOpSize);
       state = State.OPEN;
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f6a7c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index ab36f17..125e1cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -4518,42 +4518,46 @@ public abstract class FSEditLogOp {
   /**
    * Class for reading editlog ops from a stream
    */
-  public static class Reader {
-    private final DataInputStream in;
-    private final StreamLimiter limiter;
-    private final int logVersion;
-    private final Checksum checksum;
-    private final OpInstanceCache cache;
-    private int maxOpSize;
-    private final boolean supportEditLogLength;
+  public abstract static class Reader {
+    final DataInputStream in;
+    final StreamLimiter limiter;
+    final OpInstanceCache cache;
+    final byte[] temp = new byte[4096];
+    final int logVersion;
+    int maxOpSize;
+
+    public static Reader create(DataInputStream in, StreamLimiter limiter,
+                                int logVersion) {
+      if (logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION) {
+        // Use the LengthPrefixedReader on edit logs which are newer than what
+        // we can parse.  (Newer layout versions are represented by smaller
+        // negative integers, for historical reasons.) Even though we can't
+        // parse the Ops contained in them, we should still be able to call
+        // scanOp on them.  This is important for the JournalNode during rolling
+        // upgrade.
+        return new LengthPrefixedReader(in, limiter, logVersion);
+      } else if (NameNodeLayoutVersion.supports(
+              NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)) {
+        return new LengthPrefixedReader(in, limiter, logVersion);
+      } else if (NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.EDITS_CHECKSUM, logVersion)) {
+        Checksum checksum = DataChecksum.newCrc32();
+        return new ChecksummedReader(checksum, in, limiter, logVersion);
+      } else {
+        return new LegacyReader(in, limiter, logVersion);
+      }
+    }
 
     /**
      * Construct the reader
-     * @param in The stream to read from.
-     * @param logVersion The version of the data coming from the stream.
+     * @param in            The stream to read from.
+     * @param limiter       The limiter for this stream.
+     * @param logVersion    The version of the data coming from the stream.
      */
-    public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
-      this.logVersion = logVersion;
-      if (NameNodeLayoutVersion.supports(
-          LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
-        this.checksum = DataChecksum.newCrc32();
-      } else {
-        this.checksum = null;
-      }
-      // It is possible that the logVersion is actually a future layoutversion
-      // during the rolling upgrade (e.g., the NN gets upgraded first). We
-      // assume future layout will also support length of editlog op.
-      this.supportEditLogLength = NameNodeLayoutVersion.supports(
-          NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
-          || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
-
-      if (this.checksum != null) {
-        this.in = new DataInputStream(
-            new CheckedInputStream(in, this.checksum));
-      } else {
-        this.in = in;
-      }
+    Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
+      this.in = in;
       this.limiter = limiter;
+      this.logVersion = logVersion;
       this.cache = new OpInstanceCache();
       this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
     }
@@ -4606,26 +4610,25 @@ public abstract class FSEditLogOp {
       }
     }
 
-    private void verifyTerminator() throws IOException {
+    void verifyTerminator() throws IOException {
       /** The end of the edit log should contain only 0x00 or 0xff bytes.
        * If it contains other bytes, the log itself may be corrupt.
        * It is important to check this; if we don't, a stray OP_INVALID byte 
        * could make us stop reading the edit log halfway through, and we'd never
        * know that we had lost data.
        */
-      byte[] buf = new byte[4096];
       limiter.clearLimit();
       int numRead = -1, idx = 0;
       while (true) {
         try {
           numRead = -1;
           idx = 0;
-          numRead = in.read(buf);
+          numRead = in.read(temp);
           if (numRead == -1) {
             return;
           }
           while (idx < numRead) {
-            if ((buf[idx] != (byte)0) && (buf[idx] != (byte)-1)) {
+            if ((temp[idx] != (byte)0) && (temp[idx] != (byte)-1)) {
               throw new IOException("Read extra bytes after " +
                 "the terminator!");
             }
@@ -4638,7 +4641,7 @@ public abstract class FSEditLogOp {
           if (numRead != -1) { 
             in.reset();
             IOUtils.skipFully(in, idx);
-            in.mark(buf.length + 1);
+            in.mark(temp.length + 1);
             IOUtils.skipFully(in, 1);
           }
         }
@@ -4653,14 +4656,164 @@ public abstract class FSEditLogOp {
      * If an exception is thrown, the stream's mark will be set to the first
      * problematic byte.  This usually means the beginning of the opcode.
      */
-    private FSEditLogOp decodeOp() throws IOException {
-      limiter.setLimit(maxOpSize);
+    public abstract FSEditLogOp decodeOp() throws IOException;
+
+    /**
+     * Similar to decodeOp(), but we only retrieve the transaction ID of the
+     * Op rather than reading it.  If the edit log format supports length
+     * prefixing, this can be much faster than full decoding.
+     *
+     * @return the last txid of the segment, or INVALID_TXID on EOF.
+     */
+    public abstract long scanOp() throws IOException;
+  }
+
+  /**
+   * Reads edit logs which are prefixed with a length.  These edit logs also
+   * include a checksum and transaction ID.
+   */
+  private static class LengthPrefixedReader extends Reader {
+    /**
+     * The minimum length of a length-prefixed Op.
+     *
+     * The minimum Op has:
+     * 1-byte opcode
+     * 4-byte length
+     * 8-byte txid
+     * 0-byte body
+     * 4-byte checksum
+     */
+    private static final int MIN_OP_LENGTH = 17;
+
+    /**
+     * The op id length.
+     *
+     * Not included in the stored length.
+     */
+    private static final int OP_ID_LENGTH = 1;
+
+    /**
+     * The checksum length.
+     *
+     * Not included in the stored length.
+     */
+    private static final int CHECKSUM_LENGTH = 4;
+
+    private final Checksum checksum;
+
+    LengthPrefixedReader(DataInputStream in, StreamLimiter limiter,
+                         int logVersion) {
+      super(in, limiter, logVersion);
+      this.checksum = DataChecksum.newCrc32();
+    }
+
+    @Override
+    public FSEditLogOp decodeOp() throws IOException {
+      long txid = decodeOpFrame();
+      if (txid == HdfsServerConstants.INVALID_TXID) {
+        return null;
+      }
+      in.reset();
       in.mark(maxOpSize);
+      FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(in.readByte());
+      FSEditLogOp op = cache.get(opCode);
+      if (op == null) {
+        throw new IOException("Read invalid opcode " + opCode);
+      }
+      op.setTransactionId(txid);
+      IOUtils.skipFully(in, 4 + 8); // skip length and txid
+      op.readFields(in, logVersion);
+      // skip over the checksum, which we validated above.
+      IOUtils.skipFully(in, CHECKSUM_LENGTH);
+      return op;
+    }
+
+    @Override
+    public long scanOp() throws IOException {
+      return decodeOpFrame();
+    }
 
-      if (checksum != null) {
-        checksum.reset();
+    /**
+     * Decode the opcode "frame".  This includes reading the opcode and
+     * transaction ID, and validating the checksum and length.  It does not
+     * include reading the opcode-specific fields.
+     * The input stream will be advanced to the end of the op at the end of this
+     * function.
+     *
+     * @return        An op with the txid set, but none of the other fields
+     *                  filled in, or null if we hit EOF.
+     */
+    private long decodeOpFrame() throws IOException {
+      limiter.setLimit(maxOpSize);
+      in.mark(maxOpSize);
+      byte opCodeByte;
+      try {
+        opCodeByte = in.readByte();
+      } catch (EOFException eof) {
+        // EOF at an opcode boundary is expected.
+        return HdfsServerConstants.INVALID_TXID;
       }
+      if (opCodeByte == FSEditLogOpCodes.OP_INVALID.getOpCode()) {
+        verifyTerminator();
+        return HdfsServerConstants.INVALID_TXID;
+      }
+      // Here, we verify that the Op size makes sense and that the
+      // data matches its checksum before attempting to construct an Op.
+      // This is important because otherwise we may encounter an
+      // OutOfMemoryException which could bring down the NameNode or
+      // JournalNode when reading garbage data.
+      int opLength =  in.readInt() + OP_ID_LENGTH + CHECKSUM_LENGTH;
+      if (opLength > maxOpSize) {
+        throw new IOException("Op " + (int)opCodeByte + " has size " +
+            opLength + ", but maxOpSize = " + maxOpSize);
+      } else  if (opLength < MIN_OP_LENGTH) {
+        throw new IOException("Op " + (int)opCodeByte + " has size " +
+            opLength + ", but the minimum op size is " + MIN_OP_LENGTH);
+      }
+      long txid = in.readLong();
+      // Verify checksum
+      in.reset();
+      in.mark(maxOpSize);
+      checksum.reset();
+      for (int rem = opLength - CHECKSUM_LENGTH; rem > 0;) {
+        int toRead = Math.min(temp.length, rem);
+        IOUtils.readFully(in, temp, 0, toRead);
+        checksum.update(temp, 0, toRead);
+        rem -= toRead;
+      }
+      int expectedChecksum = in.readInt();
+      int calculatedChecksum = (int)checksum.getValue();
+      if (expectedChecksum != calculatedChecksum) {
+        throw new ChecksumException(
+            "Transaction is corrupt. Calculated checksum is " +
+            calculatedChecksum + " but read checksum " +
+            expectedChecksum, txid);
+      }
+      return txid;
+    }
+  }
+
+  /**
+   * Read edit logs which have a checksum and a transaction ID, but not a
+   * length.
+   */
+  private static class ChecksummedReader extends Reader {
+    private final Checksum checksum;
 
+    ChecksummedReader(Checksum checksum, DataInputStream in,
+                      StreamLimiter limiter, int logVersion) {
+      super(new DataInputStream(
+          new CheckedInputStream(in, checksum)), limiter, logVersion);
+      this.checksum = checksum;
+    }
+
+    @Override
+    public FSEditLogOp decodeOp() throws IOException {
+      limiter.setLimit(maxOpSize);
+      in.mark(maxOpSize);
+      // Reset the checksum.  Since we are using a CheckedInputStream, each
+      // subsequent read from the  stream will update the checksum.
+      checksum.reset();
       byte opCodeByte;
       try {
         opCodeByte = in.readByte();
@@ -4668,88 +4821,89 @@ public abstract class FSEditLogOp {
         // EOF at an opcode boundary is expected.
         return null;
       }
-
       FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
       if (opCode == OP_INVALID) {
         verifyTerminator();
         return null;
       }
-
       FSEditLogOp op = cache.get(opCode);
       if (op == null) {
         throw new IOException("Read invalid opcode " + opCode);
       }
-
-      if (supportEditLogLength) {
-        in.readInt();
+      op.setTransactionId(in.readLong());
+      op.readFields(in, logVersion);
+      // Verify checksum
+      int calculatedChecksum = (int)checksum.getValue();
+      int expectedChecksum = in.readInt();
+      if (expectedChecksum != calculatedChecksum) {
+        throw new ChecksumException(
+            "Transaction is corrupt. Calculated checksum is " +
+                calculatedChecksum + " but read checksum " +
+                expectedChecksum, op.txid);
       }
+      return op;
+    }
 
+    @Override
+    public long scanOp() throws IOException {
+      // Edit logs of this age don't have any length prefix, so we just have
+      // to read the entire Op.
+      FSEditLogOp op = decodeOp();
+      return op == null ?
+          HdfsServerConstants.INVALID_TXID : op.getTransactionId();
+    }
+  }
+
+  /**
+   * Read older edit logs which may or may not have transaction IDs and other
+   * features.  This code is used during upgrades and to allow HDFS INotify to
+   * read older edit log files.
+   */
+  private static class LegacyReader extends Reader {
+    LegacyReader(DataInputStream in,
+                      StreamLimiter limiter, int logVersion) {
+      super(in, limiter, logVersion);
+    }
+
+    @Override
+    public FSEditLogOp decodeOp() throws IOException {
+      limiter.setLimit(maxOpSize);
+      in.mark(maxOpSize);
+      byte opCodeByte;
+      try {
+        opCodeByte = in.readByte();
+      } catch (EOFException eof) {
+        // EOF at an opcode boundary is expected.
+        return null;
+      }
+      FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
+      if (opCode == OP_INVALID) {
+        verifyTerminator();
+        return null;
+      }
+      FSEditLogOp op = cache.get(opCode);
+      if (op == null) {
+        throw new IOException("Read invalid opcode " + opCode);
+      }
       if (NameNodeLayoutVersion.supports(
-          LayoutVersion.Feature.STORED_TXIDS, logVersion)) {
-        // Read the txid
+            LayoutVersion.Feature.STORED_TXIDS, logVersion)) {
         op.setTransactionId(in.readLong());
       } else {
         op.setTransactionId(HdfsServerConstants.INVALID_TXID);
       }
-
       op.readFields(in, logVersion);
-
-      validateChecksum(in, checksum, op.txid);
       return op;
     }
 
-    /**
-     * Similar with decodeOp(), but instead of doing the real decoding, we skip
-     * the content of the op if the length of the editlog is supported.
-     * @return the last txid of the segment, or INVALID_TXID on exception
-     */
+    @Override
     public long scanOp() throws IOException {
-      if (supportEditLogLength) {
-        limiter.setLimit(maxOpSize);
-        in.mark(maxOpSize);
-
-        final byte opCodeByte;
-        try {
-          opCodeByte = in.readByte(); // op code
-        } catch (EOFException e) {
-          return HdfsServerConstants.INVALID_TXID;
-        }
-
-        FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
-        if (opCode == OP_INVALID) {
-          verifyTerminator();
-          return HdfsServerConstants.INVALID_TXID;
-        }
-
-        int length = in.readInt(); // read the length of the op
-        long txid = in.readLong(); // read the txid
-
-        // skip the remaining content
-        IOUtils.skipFully(in, length - 8); 
-        // TODO: do we want to verify checksum for JN? For now we don't.
-        return txid;
-      } else {
-        FSEditLogOp op = decodeOp();
-        return op == null ? HdfsServerConstants.INVALID_TXID : op.getTransactionId();
-      }
-    }
-
-    /**
-     * Validate a transaction's checksum
-     */
-    private void validateChecksum(DataInputStream in,
-                                  Checksum checksum,
-                                  long txid)
-        throws IOException {
-      if (checksum != null) {
-        int calculatedChecksum = (int)checksum.getValue();
-        int readChecksum = in.readInt(); // read in checksum
-        if (readChecksum != calculatedChecksum) {
-          throw new ChecksumException(
-              "Transaction is corrupt. Calculated checksum is " +
-              calculatedChecksum + " but read checksum " + readChecksum, txid);
-        }
+      if (!NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.STORED_TXIDS, logVersion)) {
+        throw new IOException("Can't scan a pre-transactional edit log.");
       }
+      FSEditLogOp op = decodeOp();
+      return op == null ?
+          HdfsServerConstants.INVALID_TXID : op.getTransactionId();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f6a7c9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index 68d008f..e59dec4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -875,7 +875,7 @@ public class TestEditLog {
       tracker = new FSEditLogLoader.PositionTrackingInputStream(in);
       in = new DataInputStream(tracker);
             
-      reader = new FSEditLogOp.Reader(in, tracker, version);
+      reader = FSEditLogOp.Reader.create(in, tracker, version);
     }
   
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f6a7c9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
index c0eb890..aecdc78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
@@ -25,19 +25,35 @@ import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 
 import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.util.EnumMap;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.util.Holder;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
 
 public class TestEditLogFileInputStream {
+  private static final Log LOG =
+      LogFactory.getLog(TestEditLogFileInputStream.class);
   private static final byte[] FAKE_LOG_DATA = TestEditLog.HADOOP20_SOME_EDITS;
 
+  private final static File TEST_DIR = PathUtils
+      .getTestDir(TestEditLogFileInputStream.class);
+
   @Test
   public void testReadURL() throws Exception {
     HttpURLConnection conn = mock(HttpURLConnection.class);
@@ -63,4 +79,68 @@ public class TestEditLogFileInputStream {
     assertEquals(FAKE_LOG_DATA.length, elis.length());
     elis.close();
   }
+
+  /**
+   * Regression test for HDFS-8965 which verifies that
+   * FSEditLogFileInputStream#scanOp verifies Op checksums.
+   */
+  @Test(timeout=60000)
+  public void testScanCorruptEditLog() throws Exception {
+    Configuration conf = new Configuration();
+    File editLog = new File(System.getProperty(
+        "test.build.data", "/tmp"), "testCorruptEditLog");
+
+    LOG.debug("Creating test edit log file: " + editLog);
+    EditLogFileOutputStream elos = new EditLogFileOutputStream(conf,
+        editLog.getAbsoluteFile(), 8192);
+    elos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
+    FSEditLogOp.OpInstanceCache cache = new FSEditLogOp.OpInstanceCache();
+    FSEditLogOp.MkdirOp mkdirOp = FSEditLogOp.MkdirOp.getInstance(cache);
+    mkdirOp.reset();
+    mkdirOp.setRpcCallId(123);
+    mkdirOp.setTransactionId(1);
+    mkdirOp.setInodeId(789L);
+    mkdirOp.setPath("/mydir");
+    PermissionStatus perms = PermissionStatus.createImmutable(
+        "myuser", "mygroup", FsPermission.createImmutable((short)0777));
+    mkdirOp.setPermissionStatus(perms);
+    elos.write(mkdirOp);
+    mkdirOp.reset();
+    mkdirOp.setRpcCallId(456);
+    mkdirOp.setTransactionId(2);
+    mkdirOp.setInodeId(123L);
+    mkdirOp.setPath("/mydir2");
+    perms = PermissionStatus.createImmutable(
+        "myuser", "mygroup", FsPermission.createImmutable((short)0666));
+    mkdirOp.setPermissionStatus(perms);
+    elos.write(mkdirOp);
+    elos.setReadyToFlush();
+    elos.flushAndSync(false);
+    elos.close();
+    long fileLen = editLog.length();
+
+    LOG.debug("Corrupting last 4 bytes of edit log file " + editLog +
+        ", whose length is " + fileLen);
+    RandomAccessFile rwf = new RandomAccessFile(editLog, "rw");
+    rwf.seek(fileLen - 4);
+    int b = rwf.readInt();
+    rwf.seek(fileLen - 4);
+    rwf.writeInt(b + 1);
+    rwf.close();
+
+    EditLogFileInputStream elis = new EditLogFileInputStream(editLog);
+    Assert.assertEquals(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
+        elis.getVersion(true));
+    Assert.assertEquals(1, elis.scanNextOp());
+    LOG.debug("Read transaction 1 from " + editLog);
+    try {
+      elis.scanNextOp();
+      Assert.fail("Expected scanNextOp to fail when op checksum was corrupt.");
+    } catch (IOException e) {
+      LOG.debug("Caught expected checksum error when reading corrupt " +
+          "transaction 2", e);
+      GenericTestUtils.assertExceptionContains("Transaction is corrupt.", e);
+    }
+    elis.close();
+  }
 }


[35/50] [abbrv] hadoop git commit: HDFS-8964. When validating the edit log, do not read at or beyond the file offset that is being written (Zhe Zhang via Colin P. McCabe)

Posted by vv...@apache.org.
HDFS-8964. When validating the edit log, do not read at or beyond the file offset that is being written (Zhe Zhang via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53c38cc8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53c38cc8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53c38cc8

Branch: refs/heads/YARN-3926
Commit: 53c38cc89ab979ec47557dcfa7affbad20578c0a
Parents: 524ba87
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Thu Sep 3 11:22:47 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Thu Sep 3 11:22:47 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hdfs/server/namenode/FSEditLogTestUtil.java |   3 +-
 .../hadoop/hdfs/qjournal/server/Journal.java    |  22 ++--
 .../server/namenode/EditLogFileInputStream.java |  15 ++-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  10 ++
 .../hdfs/server/namenode/FSEditLogLoader.java   |  12 ++-
 .../server/namenode/FileJournalManager.java     |  39 +++++--
 .../hdfs/server/namenode/SecondaryNameNode.java |   2 +-
 .../TestCheckPointForSecurityTokens.java        |   4 +-
 .../hdfs/server/namenode/TestEditLog.java       | 103 ++++++++++++++++++-
 .../server/namenode/TestFSEditLogLoader.java    |  13 ++-
 11 files changed, 199 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c38cc8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 275dce2..afc6cf4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1289,6 +1289,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-9009. Send metrics logs to NullAppender by default. (Arpit Agarwal)
 
+    HDFS-8964. When validating the edit log, do not read at or beyond the file
+    offset that is being written (Zhe Zhang via Colin P. McCabe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c38cc8/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
index a46f9cf..e5b9d01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
@@ -33,7 +33,8 @@ public class FSEditLogTestUtil {
 
   public static long countTransactionsInStream(EditLogInputStream in) 
       throws IOException {
-    FSEditLogLoader.EditLogValidation validation = FSEditLogLoader.validateEditLog(in);
+    FSEditLogLoader.EditLogValidation validation =
+        FSEditLogLoader.validateEditLog(in, Long.MAX_VALUE);
     return (validation.getEndTxId() - in.getFirstTxId()) + 1;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c38cc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 2953055..813f267 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -151,7 +151,7 @@ public class Journal implements Closeable {
     
     EditLogFile latest = scanStorageForLatestEdits();
     if (latest != null) {
-      highestWrittenTxId = latest.getLastTxId();
+      updateHighestWrittenTxId(latest.getLastTxId());
     }
   }
 
@@ -266,7 +266,17 @@ public class Journal implements Closeable {
   synchronized long getHighestWrittenTxId() {
     return highestWrittenTxId;
   }
-  
+
+  /**
+   * Update the highest Tx ID that has been written to the journal. Also update
+   * the {@link FileJournalManager#lastReadableTxId} of the underlying fjm.
+   * @param val The new value
+   */
+  private void updateHighestWrittenTxId(long val) {
+    highestWrittenTxId = val;
+    fjm.setLastReadableTxId(val);
+  }
+
   @VisibleForTesting
   JournalMetrics getMetricsForTests() {
     return metrics;
@@ -399,7 +409,7 @@ public class Journal implements Closeable {
     metrics.bytesWritten.incr(records.length);
     metrics.txnsWritten.incr(numTxns);
     
-    highestWrittenTxId = lastTxnId;
+    updateHighestWrittenTxId(lastTxnId);
     nextTxId = lastTxnId + 1;
   }
 
@@ -782,8 +792,8 @@ public class Journal implements Closeable {
             ": no current segment in place");
         
         // Update the highest txid for lag metrics
-        highestWrittenTxId = Math.max(segment.getEndTxId(),
-            highestWrittenTxId);
+        updateHighestWrittenTxId(Math.max(segment.getEndTxId(),
+            highestWrittenTxId));
       } else {
         LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) +
             ": old segment " + TextFormat.shortDebugString(currentSegment) +
@@ -812,7 +822,7 @@ public class Journal implements Closeable {
         // If we're shortening the log, update our highest txid
         // used for lag metrics.
         if (txnRange(currentSegment).containsLong(highestWrittenTxId)) {
-          highestWrittenTxId = segment.getEndTxId();
+          updateHighestWrittenTxId(segment.getEndTxId());
         }
       }
       syncedFile = syncLog(reqInfo, segment, fromUrl);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c38cc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
index 73a162e..3bf0ab4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
@@ -300,8 +300,17 @@ public class EditLogFileInputStream extends EditLogInputStream {
     return getName();
   }
 
-  static FSEditLogLoader.EditLogValidation validateEditLog(File file)
-      throws IOException {
+  /**
+   * @param file File being validated.
+   * @param maxTxIdToValidate Maximum Tx ID to try to validate. Validation
+   *                          returns after reading this or a higher ID.
+   *                          The file portion beyond this ID is potentially
+   *                          being updated.
+   * @return Result of the validation
+   * @throws IOException
+   */
+  static FSEditLogLoader.EditLogValidation validateEditLog(File file,
+      long maxTxIdToValidate) throws IOException {
     EditLogFileInputStream in;
     try {
       in = new EditLogFileInputStream(file);
@@ -314,7 +323,7 @@ public class EditLogFileInputStream extends EditLogInputStream {
     }
     
     try {
-      return FSEditLogLoader.validateEditLog(in);
+      return FSEditLogLoader.validateEditLog(in, maxTxIdToValidate);
     } finally {
       IOUtils.closeStream(in);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c38cc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index edf88c9..e255cff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -674,6 +674,16 @@ public class FSEditLog implements LogsPurgeable {
       synchronized (this) {
         if (sync) {
           synctxid = syncStart;
+          for (JournalManager jm : journalSet.getJournalManagers()) {
+            /**
+             * {@link FileJournalManager#lastReadableTxId} is only meaningful
+             * for file-based journals. Therefore the interface is not added to
+             * other types of {@link JournalManager}.
+             */
+            if (jm instanceof FileJournalManager) {
+              ((FileJournalManager)jm).setLastReadableTxId(syncStart);
+            }
+          }
           isSyncRunning = false;
         }
         this.notifyAll();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c38cc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index fc0bb78..bb36ca2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -1112,8 +1112,14 @@ public class FSEditLogLoader {
    * If there are invalid or corrupt transactions in the middle of the stream,
    * validateEditLog will skip over them.
    * This reads through the stream but does not close it.
+   *
+   * @param maxTxIdToValidate Maximum Tx ID to try to validate. Validation
+   *                          returns after reading this or a higher ID.
+   *                          The file portion beyond this ID is potentially
+   *                          being updated.
    */
-  static EditLogValidation validateEditLog(EditLogInputStream in) {
+  static EditLogValidation validateEditLog(EditLogInputStream in,
+      long maxTxIdToValidate) {
     long lastPos = 0;
     long lastTxId = HdfsServerConstants.INVALID_TXID;
     long numValid = 0;
@@ -1136,6 +1142,10 @@ public class FSEditLogLoader {
           || op.getTransactionId() > lastTxId) {
         lastTxId = op.getTransactionId();
       }
+      if (lastTxId >= maxTxIdToValidate) {
+        break;
+      }
+
       numValid++;
     }
     return new EditLogValidation(lastPos, lastTxId, false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c38cc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index ebd7475..a1488eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -76,6 +76,15 @@ public class FileJournalManager implements JournalManager {
 
   private File currentInProgress = null;
 
+  /**
+   * A FileJournalManager should maintain the largest Tx ID that has been
+   * safely written to its edit log files.
+   * It should limit readers to read beyond this ID to avoid potential race
+   * with ongoing writers.
+   * Initial value indicates that all transactions can be read.
+   */
+  private long lastReadableTxId = Long.MAX_VALUE;
+
   @VisibleForTesting
   StoragePurger purger
     = new NNStorageRetentionManager.DeletionStoragePurger();
@@ -159,6 +168,15 @@ public class FileJournalManager implements JournalManager {
     this.outputBufferCapacity = size;
   }
 
+
+  public long getLastReadableTxId() {
+    return lastReadableTxId;
+  }
+
+  public void setLastReadableTxId(long id) {
+    this.lastReadableTxId = id;
+  }
+
   @Override
   public void purgeLogsOlderThan(long minTxIdToKeep)
       throws IOException {
@@ -193,7 +211,7 @@ public class FileJournalManager implements JournalManager {
       }
       if (elf.isInProgress()) {
         try {
-          elf.validateLog();
+          elf.validateLog(getLastReadableTxId());
         } catch (IOException e) {
           LOG.error("got IOException while trying to validate header of " +
               elf + ".  Skipping.", e);
@@ -325,11 +343,13 @@ public class FileJournalManager implements JournalManager {
           (inProgressOk ? " (inProgress ok) " : " (excluding inProgress) ") +
           "from among " + elfs.size() + " candidate file(s)");
     }
-    addStreamsToCollectionFromFiles(elfs, streams, fromTxId, inProgressOk);
+    addStreamsToCollectionFromFiles(elfs, streams, fromTxId,
+        getLastReadableTxId(), inProgressOk);
   }
   
   static void addStreamsToCollectionFromFiles(Collection<EditLogFile> elfs,
-      Collection<EditLogInputStream> streams, long fromTxId, boolean inProgressOk) {
+      Collection<EditLogInputStream> streams, long fromTxId, long maxTxIdToValidate,
+      boolean inProgressOk) {
     for (EditLogFile elf : elfs) {
       if (elf.isInProgress()) {
         if (!inProgressOk) {
@@ -340,7 +360,7 @@ public class FileJournalManager implements JournalManager {
           continue;
         }
         try {
-          elf.validateLog();
+          elf.validateLog(maxTxIdToValidate);
         } catch (IOException e) {
           LOG.error("got IOException while trying to validate header of " +
               elf + ".  Skipping.", e);
@@ -384,7 +404,7 @@ public class FileJournalManager implements JournalManager {
           continue;
         }
 
-        elf.validateLog();
+        elf.validateLog(getLastReadableTxId());
 
         if (elf.hasCorruptHeader()) {
           elf.moveAsideCorruptFile();
@@ -516,9 +536,14 @@ public class FileJournalManager implements JournalManager {
      * Find out where the edit log ends.
      * This will update the lastTxId of the EditLogFile or
      * mark it as corrupt if it is.
+     * @param maxTxIdToValidate Maximum Tx ID to try to validate. Validation
+     *                          returns after reading this or a higher ID.
+     *                          The file portion beyond this ID is potentially
+     *                          being updated.
      */
-    public void validateLog() throws IOException {
-      EditLogValidation val = EditLogFileInputStream.validateEditLog(file);
+    public void validateLog(long maxTxIdToValidate) throws IOException {
+      EditLogValidation val = EditLogFileInputStream.validateEditLog(file,
+          maxTxIdToValidate);
       this.lastTxId = val.getEndTxId();
       this.hasCorruptHeader = val.hasCorruptHeader();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c38cc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 2267853..e3e0a7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -907,7 +907,7 @@ public class SecondaryNameNode implements Runnable,
             throw new RuntimeException(ioe);
           }
           FileJournalManager.addStreamsToCollectionFromFiles(editFiles, streams,
-              fromTxId, inProgressOk);
+              fromTxId, Long.MAX_VALUE, inProgressOk);
         }
       }
       

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c38cc8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
index 9401d07..d5e64ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
@@ -88,7 +88,7 @@ public class TestCheckPointForSecurityTokens {
       for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
         EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
         assertTrue(log.isInProgress());
-        log.validateLog();
+        log.validateLog(Long.MAX_VALUE);
         long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
         assertEquals("In-progress log " + log + " should have 5 transactions",
                      5, numTransactions);;
@@ -105,7 +105,7 @@ public class TestCheckPointForSecurityTokens {
       for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
         EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
         assertTrue(log.isInProgress());
-        log.validateLog();
+        log.validateLog(Long.MAX_VALUE);
         long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
         assertEquals("In-progress log " + log + " should only have START txn",
             1, numTransactions);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c38cc8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index e59dec4..0495860 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -66,6 +66,8 @@ import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -83,6 +85,9 @@ import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.xml.sax.ContentHandler;
@@ -1223,7 +1228,8 @@ public class TestEditLog {
                                                                           TXNS_PER_ROLL*11);
 
     for (EditLogInputStream edits : editStreams) {
-      FSEditLogLoader.EditLogValidation val = FSEditLogLoader.validateEditLog(edits);
+      FSEditLogLoader.EditLogValidation val =
+          FSEditLogLoader.validateEditLog(edits, Long.MAX_VALUE);
       long read = (val.getEndTxId() - edits.getFirstTxId()) + 1;
       LOG.info("Loading edits " + edits + " read " + read);
       assertEquals(startTxId, edits.getFirstTxId());
@@ -1573,4 +1579,99 @@ public class TestEditLog {
       }
     }
   }
+
+  class TestAppender extends AppenderSkeleton {
+    private final List<LoggingEvent> log = new ArrayList<>();
+
+    @Override
+    public boolean requiresLayout() {
+      return false;
+    }
+
+    @Override
+    protected void append(final LoggingEvent loggingEvent) {
+      log.add(loggingEvent);
+    }
+
+    @Override
+    public void close() {
+    }
+
+    public List<LoggingEvent> getLog() {
+      return new ArrayList<>(log);
+    }
+  }
+
+  /**
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testReadActivelyUpdatedLog() throws Exception {
+    final TestAppender appender = new TestAppender();
+    LogManager.getRootLogger().addAppender(appender);
+    Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    // Set single handler thread, so all transactions hit same thread-local ops.
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1);
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      FSImage fsimage = cluster.getNamesystem().getFSImage();
+      StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
+
+      final DistributedFileSystem fileSys = cluster.getFileSystem();
+      DFSInotifyEventInputStream events = fileSys.getInotifyEventStream();
+      fileSys.mkdirs(new Path("/test"));
+      fileSys.mkdirs(new Path("/test/dir1"));
+      fileSys.delete(new Path("/test/dir1"), true);
+      fsimage.getEditLog().logSync();
+      fileSys.mkdirs(new Path("/test/dir2"));
+
+
+      final File inProgressEdit = NNStorage.getInProgressEditsFile(sd, 1);
+      assertTrue(inProgressEdit.exists());
+      EditLogFileInputStream elis = new EditLogFileInputStream(inProgressEdit);
+      FSEditLogOp op;
+      long pos = 0;
+
+      while (true) {
+        op = elis.readOp();
+        if (op != null && op.opCode != FSEditLogOpCodes.OP_INVALID) {
+          pos = elis.getPosition();
+        } else {
+          break;
+        }
+      }
+      elis.close();
+      assertTrue(pos > 0);
+
+      RandomAccessFile rwf = new RandomAccessFile(inProgressEdit, "rw");
+      rwf.seek(pos);
+      assertEquals(rwf.readByte(), (byte) -1);
+
+      rwf.seek(pos + 1);
+      rwf.writeByte(2);
+
+      rwf.close();
+
+      events.poll();
+      String pattern = "Caught exception after reading (.*) ops";
+      Pattern r = Pattern.compile(pattern);
+      final List<LoggingEvent> log = appender.getLog();
+      for (LoggingEvent event : log) {
+        Matcher m = r.matcher(event.getRenderedMessage());
+        if (m.find()) {
+          fail("Should not try to read past latest syned edit log op");
+        }
+      }
+
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+      LogManager.getRootLogger().removeAppender(appender);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c38cc8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 55ba379..3c3423a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -318,7 +318,8 @@ public class TestFSEditLogLoader {
     } finally {
       rwf.close();
     }
-    EditLogValidation validation = EditLogFileInputStream.validateEditLog(logFile);
+    EditLogValidation validation =
+        EditLogFileInputStream.validateEditLog(logFile, Long.MAX_VALUE);
     assertTrue(validation.hasCorruptHeader());
   }
 
@@ -333,7 +334,7 @@ public class TestFSEditLogLoader {
     File logFileBak = new File(testDir, logFile.getName() + ".bak");
     Files.copy(logFile, logFileBak);
     EditLogValidation validation =
-        EditLogFileInputStream.validateEditLog(logFile);
+        EditLogFileInputStream.validateEditLog(logFile, Long.MAX_VALUE);
     assertTrue(!validation.hasCorruptHeader());
     // We expect that there will be an OP_START_LOG_SEGMENT, followed by
     // NUM_TXNS opcodes, followed by an OP_END_LOG_SEGMENT.
@@ -346,7 +347,8 @@ public class TestFSEditLogLoader {
       // Restore backup, corrupt the txn opcode
       Files.copy(logFileBak, logFile);
       corruptByteInFile(logFile, txOffset);
-      validation = EditLogFileInputStream.validateEditLog(logFile);
+      validation = EditLogFileInputStream.validateEditLog(logFile,
+          Long.MAX_VALUE);
       long expectedEndTxId = (txId == (NUM_TXNS + 1)) ?
           NUM_TXNS : (NUM_TXNS + 1);
       assertEquals("Failed when corrupting txn opcode at " + txOffset,
@@ -363,7 +365,8 @@ public class TestFSEditLogLoader {
       // Restore backup, corrupt the txn opcode
       Files.copy(logFileBak, logFile);
       truncateFile(logFile, txOffset);
-      validation = EditLogFileInputStream.validateEditLog(logFile);
+      validation = EditLogFileInputStream.validateEditLog(logFile,
+          Long.MAX_VALUE);
       long expectedEndTxId = (txId == 0) ?
           HdfsServerConstants.INVALID_TXID : (txId - 1);
       assertEquals("Failed when corrupting txid " + txId + " txn opcode " +
@@ -381,7 +384,7 @@ public class TestFSEditLogLoader {
     // layout flags section.
     truncateFile(logFile, 8);
     EditLogValidation validation =
-        EditLogFileInputStream.validateEditLog(logFile);
+        EditLogFileInputStream.validateEditLog(logFile, Long.MAX_VALUE);
     assertTrue(!validation.hasCorruptHeader());
     assertEquals(HdfsServerConstants.INVALID_TXID, validation.getEndTxId());
   }


[06/50] [abbrv] hadoop git commit: HDFS-8990. Move RemoteBlockReader to hdfs-client module. Contributed by Mingliang Liu.

Posted by vv...@apache.org.
HDFS-8990. Move RemoteBlockReader to hdfs-client module. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/826ae1c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/826ae1c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/826ae1c2

Branch: refs/heads/YARN-3926
Commit: 826ae1c26d31f87d88efc920b271bec7eec2e17a
Parents: caa04de
Author: Haohui Mai <wh...@apache.org>
Authored: Mon Aug 31 13:54:14 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Mon Aug 31 13:54:14 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/RemoteBlockReader.java   | 512 +++++++++++++++++++
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  | 480 +++++++++++++++++
 .../protocol/datatransfer/PacketHeader.java     | 214 ++++++++
 .../protocol/datatransfer/PacketReceiver.java   | 310 +++++++++++
 .../hdfs/util/ByteBufferOutputStream.java       |  49 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   1 -
 .../apache/hadoop/hdfs/RemoteBlockReader.java   | 508 ------------------
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  | 477 -----------------
 .../protocol/datatransfer/PacketHeader.java     | 214 --------
 .../protocol/datatransfer/PacketReceiver.java   | 310 -----------
 .../hdfs/util/ByteBufferOutputStream.java       |  49 --
 .../hdfs/TestClientBlockVerification.java       |   4 +-
 13 files changed, 1570 insertions(+), 1561 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
new file mode 100644
index 0000000..7509da5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
@@ -0,0 +1,512 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.EnumSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FSInputChecker;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.ReadOption;
+import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
+import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceScope;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * @deprecated this is an old implementation that is being left around
+ * in case any issues spring up with the new {@link RemoteBlockReader2} implementation.
+ * It will be removed in the next release.
+ */
+@InterfaceAudience.Private
+@Deprecated
+public class RemoteBlockReader extends FSInputChecker implements BlockReader {
+  static final Logger LOG = LoggerFactory.getLogger(FSInputChecker.class);
+
+  private final Peer peer;
+  private final DatanodeID datanodeID;
+  private final DataInputStream in;
+  private DataChecksum checksum;
+
+  /** offset in block of the last chunk received */
+  private long lastChunkOffset = -1;
+  private long lastChunkLen = -1;
+  private long lastSeqNo = -1;
+
+  /** offset in block where reader wants to actually read */
+  private long startOffset;
+
+  private final long blockId;
+
+  /** offset in block of of first chunk - may be less than startOffset
+      if startOffset is not chunk-aligned */
+  private final long firstChunkOffset;
+
+  private final int bytesPerChecksum;
+  private final int checksumSize;
+
+  /**
+   * The total number of bytes we need to transfer from the DN.
+   * This is the amount that the user has requested plus some padding
+   * at the beginning so that the read can begin on a chunk boundary.
+   */
+  private final long bytesNeededToFinish;
+  
+  /**
+   * True if we are reading from a local DataNode.
+   */
+  private final boolean isLocal;
+
+  private boolean eos = false;
+  private boolean sentStatusCode = false;
+  
+  ByteBuffer checksumBytes = null;
+  /** Amount of unread data in the current received packet */
+  int dataLeft = 0;
+  
+  private final PeerCache peerCache;
+  
+  /* FSInputChecker interface */
+  
+  /* same interface as inputStream java.io.InputStream#read()
+   * used by DFSInputStream#read()
+   * This violates one rule when there is a checksum error:
+   * "Read should not modify user buffer before successful read"
+   * because it first reads the data to user buffer and then checks
+   * the checksum.
+   */
+  @Override
+  public synchronized int read(byte[] buf, int off, int len) 
+                               throws IOException {
+    
+    // This has to be set here, *before* the skip, since we can
+    // hit EOS during the skip, in the case that our entire read
+    // is smaller than the checksum chunk.
+    boolean eosBefore = eos;
+
+    //for the first read, skip the extra bytes at the front.
+    if (lastChunkLen < 0 && startOffset > firstChunkOffset && len > 0) {
+      // Skip these bytes. But don't call this.skip()!
+      int toSkip = (int)(startOffset - firstChunkOffset);
+      if ( super.readAndDiscard(toSkip) != toSkip ) {
+        // should never happen
+        throw new IOException("Could not skip required number of bytes");
+      }
+    }
+    
+    int nRead = super.read(buf, off, len);
+
+    // if eos was set in the previous read, send a status code to the DN
+    if (eos && !eosBefore && nRead >= 0) {
+      if (needChecksum()) {
+        sendReadResult(peer, Status.CHECKSUM_OK);
+      } else {
+        sendReadResult(peer, Status.SUCCESS);
+      }
+    }
+    return nRead;
+  }
+
+  @Override
+  public synchronized long skip(long n) throws IOException {
+    /* How can we make sure we don't throw a ChecksumException, at least
+     * in majority of the cases?. This one throws. */  
+    long nSkipped = 0;
+    while (nSkipped < n) {
+      int toSkip = (int)Math.min(n-nSkipped, Integer.MAX_VALUE);
+      int ret = readAndDiscard(toSkip);
+      if (ret <= 0) {
+        return nSkipped;
+      }
+      nSkipped += ret;
+    }
+    return nSkipped;
+  }
+
+  @Override
+  public int read() throws IOException {
+    throw new IOException("read() is not expected to be invoked. " +
+                          "Use read(buf, off, len) instead.");
+  }
+  
+  @Override
+  public boolean seekToNewSource(long targetPos) throws IOException {
+    /* Checksum errors are handled outside the BlockReader. 
+     * DFSInputStream does not always call 'seekToNewSource'. In the 
+     * case of pread(), it just tries a different replica without seeking.
+     */ 
+    return false;
+  }
+  
+  @Override
+  public void seek(long pos) throws IOException {
+    throw new IOException("Seek() is not supported in BlockInputChecker");
+  }
+
+  @Override
+  protected long getChunkPosition(long pos) {
+    throw new RuntimeException("getChunkPosition() is not supported, " +
+                               "since seek is not required");
+  }
+  
+  /**
+   * Makes sure that checksumBytes has enough capacity 
+   * and limit is set to the number of checksum bytes needed 
+   * to be read.
+   */
+  private void adjustChecksumBytes(int dataLen) {
+    int requiredSize = 
+      ((dataLen + bytesPerChecksum - 1)/bytesPerChecksum)*checksumSize;
+    if (checksumBytes == null || requiredSize > checksumBytes.capacity()) {
+      checksumBytes =  ByteBuffer.wrap(new byte[requiredSize]);
+    } else {
+      checksumBytes.clear();
+    }
+    checksumBytes.limit(requiredSize);
+  }
+  
+  @Override
+  protected synchronized int readChunk(long pos, byte[] buf, int offset, 
+                                       int len, byte[] checksumBuf) 
+                                       throws IOException {
+    TraceScope scope =
+        Trace.startSpan("RemoteBlockReader#readChunk(" + blockId + ")",
+            Sampler.NEVER);
+    try {
+      return readChunkImpl(pos, buf, offset, len, checksumBuf);
+    } finally {
+      scope.close();
+    }
+  }
+
+  private synchronized int readChunkImpl(long pos, byte[] buf, int offset,
+                                     int len, byte[] checksumBuf)
+                                     throws IOException {
+    // Read one chunk.
+    if (eos) {
+      // Already hit EOF
+      return -1;
+    }
+    
+    // Read one DATA_CHUNK.
+    long chunkOffset = lastChunkOffset;
+    if ( lastChunkLen > 0 ) {
+      chunkOffset += lastChunkLen;
+    }
+    
+    // pos is relative to the start of the first chunk of the read.
+    // chunkOffset is relative to the start of the block.
+    // This makes sure that the read passed from FSInputChecker is the
+    // for the same chunk we expect to be reading from the DN.
+    if ( (pos + firstChunkOffset) != chunkOffset ) {
+      throw new IOException("Mismatch in pos : " + pos + " + " + 
+                            firstChunkOffset + " != " + chunkOffset);
+    }
+
+    // Read next packet if the previous packet has been read completely.
+    if (dataLeft <= 0) {
+      //Read packet headers.
+      PacketHeader header = new PacketHeader();
+      header.readFields(in);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("DFSClient readChunk got header " + header);
+      }
+
+      // Sanity check the lengths
+      if (!header.sanityCheck(lastSeqNo)) {
+           throw new IOException("BlockReader: error in packet header " +
+                                 header);
+      }
+
+      lastSeqNo = header.getSeqno();
+      dataLeft = header.getDataLen();
+      adjustChecksumBytes(header.getDataLen());
+      if (header.getDataLen() > 0) {
+        IOUtils.readFully(in, checksumBytes.array(), 0,
+                          checksumBytes.limit());
+      }
+    }
+
+    // Sanity checks
+    assert len >= bytesPerChecksum;
+    assert checksum != null;
+    assert checksumSize == 0 || (checksumBuf.length % checksumSize == 0);
+
+
+    int checksumsToRead, bytesToRead;
+
+    if (checksumSize > 0) {
+
+      // How many chunks left in our packet - this is a ceiling
+      // since we may have a partial chunk at the end of the file
+      int chunksLeft = (dataLeft - 1) / bytesPerChecksum + 1;
+
+      // How many chunks we can fit in databuffer
+      //  - note this is a floor since we always read full chunks
+      int chunksCanFit = Math.min(len / bytesPerChecksum,
+                                  checksumBuf.length / checksumSize);
+
+      // How many chunks should we read
+      checksumsToRead = Math.min(chunksLeft, chunksCanFit);
+      // How many bytes should we actually read
+      bytesToRead = Math.min(
+        checksumsToRead * bytesPerChecksum, // full chunks
+        dataLeft); // in case we have a partial
+    } else {
+      // no checksum
+      bytesToRead = Math.min(dataLeft, len);
+      checksumsToRead = 0;
+    }
+
+    if ( bytesToRead > 0 ) {
+      // Assert we have enough space
+      assert bytesToRead <= len;
+      assert checksumBytes.remaining() >= checksumSize * checksumsToRead;
+      assert checksumBuf.length >= checksumSize * checksumsToRead;
+      IOUtils.readFully(in, buf, offset, bytesToRead);
+      checksumBytes.get(checksumBuf, 0, checksumSize * checksumsToRead);
+    }
+
+    dataLeft -= bytesToRead;
+    assert dataLeft >= 0;
+
+    lastChunkOffset = chunkOffset;
+    lastChunkLen = bytesToRead;
+
+    // If there's no data left in the current packet after satisfying
+    // this read, and we have satisfied the client read, we expect
+    // an empty packet header from the DN to signify this.
+    // Note that pos + bytesToRead may in fact be greater since the
+    // DN finishes off the entire last chunk.
+    if (dataLeft == 0 &&
+        pos + bytesToRead >= bytesNeededToFinish) {
+
+      // Read header
+      PacketHeader hdr = new PacketHeader();
+      hdr.readFields(in);
+
+      if (!hdr.isLastPacketInBlock() ||
+          hdr.getDataLen() != 0) {
+        throw new IOException("Expected empty end-of-read packet! Header: " +
+                              hdr);
+      }
+
+      eos = true;
+    }
+
+    if ( bytesToRead == 0 ) {
+      return -1;
+    }
+
+    return bytesToRead;
+  }
+  
+  private RemoteBlockReader(String file, String bpid, long blockId,
+      DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
+      long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
+      DatanodeID datanodeID, PeerCache peerCache) {
+    // Path is used only for printing block and file information in debug
+    super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
+                    ":" + bpid + ":of:"+ file)/*too non path-like?*/,
+          1, verifyChecksum,
+          checksum.getChecksumSize() > 0? checksum : null, 
+          checksum.getBytesPerChecksum(),
+          checksum.getChecksumSize());
+
+    this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
+        createSocketAddr(datanodeID.getXferAddr()));
+    
+    this.peer = peer;
+    this.datanodeID = datanodeID;
+    this.in = in;
+    this.checksum = checksum;
+    this.startOffset = Math.max( startOffset, 0 );
+    this.blockId = blockId;
+
+    // The total number of bytes that we need to transfer from the DN is
+    // the amount that the user wants (bytesToRead), plus the padding at
+    // the beginning in order to chunk-align. Note that the DN may elect
+    // to send more than this amount if the read starts/ends mid-chunk.
+    this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
+
+    this.firstChunkOffset = firstChunkOffset;
+    lastChunkOffset = firstChunkOffset;
+    lastChunkLen = -1;
+
+    bytesPerChecksum = this.checksum.getBytesPerChecksum();
+    checksumSize = this.checksum.getChecksumSize();
+    this.peerCache = peerCache;
+  }
+
+  /**
+   * Create a new BlockReader specifically to satisfy a read.
+   * This method also sends the OP_READ_BLOCK request.
+   *
+   * @param file  File location
+   * @param block  The block object
+   * @param blockToken  The block token for security
+   * @param startOffset  The read offset, relative to block head
+   * @param len  The number of bytes to read
+   * @param bufferSize  The IO buffer size (not the client buffer size)
+   * @param verifyChecksum  Whether to verify checksum
+   * @param clientName  Client name
+   * @return New BlockReader instance, or null on error.
+   */
+  public static RemoteBlockReader newBlockReader(String file,
+                                     ExtendedBlock block, 
+                                     Token<BlockTokenIdentifier> blockToken,
+                                     long startOffset, long len,
+                                     int bufferSize, boolean verifyChecksum,
+                                     String clientName, Peer peer,
+                                     DatanodeID datanodeID,
+                                     PeerCache peerCache,
+                                     CachingStrategy cachingStrategy)
+                                       throws IOException {
+    // in and out will be closed when sock is closed (by the caller)
+    final DataOutputStream out =
+        new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
+    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
+        verifyChecksum, cachingStrategy);
+    
+    //
+    // Get bytes in block, set streams
+    //
+
+    DataInputStream in = new DataInputStream(
+        new BufferedInputStream(peer.getInputStream(), bufferSize));
+    
+    BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
+        PBHelperClient.vintPrefixed(in));
+    RemoteBlockReader2.checkSuccess(status, peer, block, file);
+    ReadOpChecksumInfoProto checksumInfo =
+      status.getReadOpChecksumInfo();
+    DataChecksum checksum = DataTransferProtoUtil.fromProto(
+        checksumInfo.getChecksum());
+    //Warning when we get CHECKSUM_NULL?
+    
+    // Read the first chunk offset.
+    long firstChunkOffset = checksumInfo.getChunkOffset();
+    
+    if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
+        firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
+      throw new IOException("BlockReader: error in first chunk offset (" +
+                            firstChunkOffset + ") startOffset is " + 
+                            startOffset + " for file " + file);
+    }
+
+    return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
+        in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
+        peer, datanodeID, peerCache);
+  }
+
+  @Override
+  public synchronized void close() throws IOException {
+    startOffset = -1;
+    checksum = null;
+    if (peerCache != null & sentStatusCode) {
+      peerCache.put(datanodeID, peer);
+    } else {
+      peer.close();
+    }
+
+    // in will be closed when its Socket is closed.
+  }
+  
+  @Override
+  public void readFully(byte[] buf, int readOffset, int amtToRead)
+      throws IOException {
+    IOUtils.readFully(this, buf, readOffset, amtToRead);
+  }
+
+  @Override
+  public int readAll(byte[] buf, int offset, int len) throws IOException {
+    return readFully(this, buf, offset, len);
+  }
+
+  /**
+   * When the reader reaches end of the read, it sends a status response
+   * (e.g. CHECKSUM_OK) to the DN. Failure to do so could lead to the DN
+   * closing our connection (which we will re-open), but won't affect
+   * data correctness.
+   */
+  void sendReadResult(Peer peer, Status statusCode) {
+    assert !sentStatusCode : "already sent status code to " + peer;
+    try {
+      RemoteBlockReader2.writeReadResult(peer.getOutputStream(), statusCode);
+      sentStatusCode = true;
+    } catch (IOException e) {
+      // It's ok not to be able to send this. But something is probably wrong.
+      LOG.info("Could not send read status (" + statusCode + ") to datanode " +
+               peer.getRemoteAddressString() + ": " + e.getMessage());
+    }
+  }
+
+  @Override
+  public int read(ByteBuffer buf) throws IOException {
+    throw new UnsupportedOperationException("readDirect unsupported in RemoteBlockReader");
+  }
+  
+  @Override
+  public int available() throws IOException {
+    // An optimistic estimate of how much data is available
+    // to us without doing network I/O.
+    return RemoteBlockReader2.TCP_WINDOW_SIZE;
+  }
+
+  @Override
+  public boolean isLocal() {
+    return isLocal;
+  }
+  
+  @Override
+  public boolean isShortCircuit() {
+    return false;
+  }
+
+  @Override
+  public ClientMmap getClientMmap(EnumSet<ReadOption> opts) {
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
new file mode 100644
index 0000000..5541e6d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
@@ -0,0 +1,480 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.nio.channels.ReadableByteChannel;
+import java.util.EnumSet;
+import java.util.UUID;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.ReadOption;
+import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
+import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceScope;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This is a wrapper around connection to datanode
+ * and understands checksum, offset etc.
+ *
+ * Terminology:
+ * <dl>
+ * <dt>block</dt>
+ *   <dd>The hdfs block, typically large (~64MB).
+ *   </dd>
+ * <dt>chunk</dt>
+ *   <dd>A block is divided into chunks, each comes with a checksum.
+ *       We want transfers to be chunk-aligned, to be able to
+ *       verify checksums.
+ *   </dd>
+ * <dt>packet</dt>
+ *   <dd>A grouping of chunks used for transport. It contains a
+ *       header, followed by checksum data, followed by real data.
+ *   </dd>
+ * </dl>
+ * Please see DataNode for the RPC specification.
+ *
+ * This is a new implementation introduced in Hadoop 0.23 which
+ * is more efficient and simpler than the older BlockReader
+ * implementation. It should be renamed to RemoteBlockReader
+ * once we are confident in it.
+ */
+@InterfaceAudience.Private
+public class RemoteBlockReader2  implements BlockReader {
+
+  static final Logger LOG = LoggerFactory.getLogger(RemoteBlockReader2.class);
+  static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB;
+
+  final private Peer peer;
+  final private DatanodeID datanodeID;
+  final private PeerCache peerCache;
+  final private long blockId;
+  private final ReadableByteChannel in;
+
+  private DataChecksum checksum;
+  private final PacketReceiver packetReceiver = new PacketReceiver(true);
+
+  private ByteBuffer curDataSlice = null;
+
+  /** offset in block of the last chunk received */
+  private long lastSeqNo = -1;
+
+  /** offset in block where reader wants to actually read */
+  private long startOffset;
+  private final String filename;
+
+  private final int bytesPerChecksum;
+  private final int checksumSize;
+
+  /**
+   * The total number of bytes we need to transfer from the DN.
+   * This is the amount that the user has requested plus some padding
+   * at the beginning so that the read can begin on a chunk boundary.
+   */
+  private long bytesNeededToFinish;
+
+  /**
+   * True if we are reading from a local DataNode.
+   */
+  private final boolean isLocal;
+
+  private final boolean verifyChecksum;
+
+  private boolean sentStatusCode = false;
+
+  @VisibleForTesting
+  public Peer getPeer() {
+    return peer;
+  }
+  
+  @Override
+  public synchronized int read(byte[] buf, int off, int len) 
+                               throws IOException {
+
+    UUID randomId = null;
+    if (LOG.isTraceEnabled()) {
+      randomId = UUID.randomUUID();
+      LOG.trace(String.format("Starting read #%s file %s from datanode %s",
+        randomId.toString(), this.filename,
+        this.datanodeID.getHostName()));
+    }
+
+    if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
+      TraceScope scope = Trace.startSpan(
+          "RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
+      try {
+        readNextPacket();
+      } finally {
+        scope.close();
+      }
+    }
+
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Finishing read #" + randomId));
+    }
+
+    if (curDataSlice.remaining() == 0) {
+      // we're at EOF now
+      return -1;
+    }
+    
+    int nRead = Math.min(curDataSlice.remaining(), len);
+    curDataSlice.get(buf, off, nRead);
+    
+    return nRead;
+  }
+
+
+  @Override
+  public synchronized int read(ByteBuffer buf) throws IOException {
+    if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
+      TraceScope scope = Trace.startSpan(
+          "RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
+      try {
+        readNextPacket();
+      } finally {
+        scope.close();
+      }
+    }
+    if (curDataSlice.remaining() == 0) {
+      // we're at EOF now
+      return -1;
+    }
+
+    int nRead = Math.min(curDataSlice.remaining(), buf.remaining());
+    ByteBuffer writeSlice = curDataSlice.duplicate();
+    writeSlice.limit(writeSlice.position() + nRead);
+    buf.put(writeSlice);
+    curDataSlice.position(writeSlice.position());
+
+    return nRead;
+  }
+
+  private void readNextPacket() throws IOException {
+    //Read packet headers.
+    packetReceiver.receiveNextPacket(in);
+
+    PacketHeader curHeader = packetReceiver.getHeader();
+    curDataSlice = packetReceiver.getDataSlice();
+    assert curDataSlice.capacity() == curHeader.getDataLen();
+    
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("DFSClient readNextPacket got header " + curHeader);
+    }
+
+    // Sanity check the lengths
+    if (!curHeader.sanityCheck(lastSeqNo)) {
+         throw new IOException("BlockReader: error in packet header " +
+                               curHeader);
+    }
+    
+    if (curHeader.getDataLen() > 0) {
+      int chunks = 1 + (curHeader.getDataLen() - 1) / bytesPerChecksum;
+      int checksumsLen = chunks * checksumSize;
+
+      assert packetReceiver.getChecksumSlice().capacity() == checksumsLen :
+        "checksum slice capacity=" + packetReceiver.getChecksumSlice().capacity() + 
+          " checksumsLen=" + checksumsLen;
+      
+      lastSeqNo = curHeader.getSeqno();
+      if (verifyChecksum && curDataSlice.remaining() > 0) {
+        // N.B.: the checksum error offset reported here is actually
+        // relative to the start of the block, not the start of the file.
+        // This is slightly misleading, but preserves the behavior from
+        // the older BlockReader.
+        checksum.verifyChunkedSums(curDataSlice,
+            packetReceiver.getChecksumSlice(),
+            filename, curHeader.getOffsetInBlock());
+      }
+      bytesNeededToFinish -= curHeader.getDataLen();
+    }    
+    
+    // First packet will include some data prior to the first byte
+    // the user requested. Skip it.
+    if (curHeader.getOffsetInBlock() < startOffset) {
+      int newPos = (int) (startOffset - curHeader.getOffsetInBlock());
+      curDataSlice.position(newPos);
+    }
+
+    // If we've now satisfied the whole client read, read one last packet
+    // header, which should be empty
+    if (bytesNeededToFinish <= 0) {
+      readTrailingEmptyPacket();
+      if (verifyChecksum) {
+        sendReadResult(Status.CHECKSUM_OK);
+      } else {
+        sendReadResult(Status.SUCCESS);
+      }
+    }
+  }
+  
+  @Override
+  public synchronized long skip(long n) throws IOException {
+    /* How can we make sure we don't throw a ChecksumException, at least
+     * in majority of the cases?. This one throws. */
+    long skipped = 0;
+    while (skipped < n) {
+      long needToSkip = n - skipped;
+      if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
+        readNextPacket();
+      }
+      if (curDataSlice.remaining() == 0) {
+        // we're at EOF now
+        break;
+      }
+
+      int skip = (int)Math.min(curDataSlice.remaining(), needToSkip);
+      curDataSlice.position(curDataSlice.position() + skip);
+      skipped += skip;
+    }
+    return skipped;
+  }
+
+  private void readTrailingEmptyPacket() throws IOException {
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Reading empty packet at end of read");
+    }
+    
+    packetReceiver.receiveNextPacket(in);
+
+    PacketHeader trailer = packetReceiver.getHeader();
+    if (!trailer.isLastPacketInBlock() ||
+       trailer.getDataLen() != 0) {
+      throw new IOException("Expected empty end-of-read packet! Header: " +
+                            trailer);
+    }
+  }
+
+  protected RemoteBlockReader2(String file, String bpid, long blockId,
+      DataChecksum checksum, boolean verifyChecksum,
+      long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
+      DatanodeID datanodeID, PeerCache peerCache) {
+    this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
+        createSocketAddr(datanodeID.getXferAddr()));
+    // Path is used only for printing block and file information in debug
+    this.peer = peer;
+    this.datanodeID = datanodeID;
+    this.in = peer.getInputStreamChannel();
+    this.checksum = checksum;
+    this.verifyChecksum = verifyChecksum;
+    this.startOffset = Math.max( startOffset, 0 );
+    this.filename = file;
+    this.peerCache = peerCache;
+    this.blockId = blockId;
+
+    // The total number of bytes that we need to transfer from the DN is
+    // the amount that the user wants (bytesToRead), plus the padding at
+    // the beginning in order to chunk-align. Note that the DN may elect
+    // to send more than this amount if the read starts/ends mid-chunk.
+    this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
+    bytesPerChecksum = this.checksum.getBytesPerChecksum();
+    checksumSize = this.checksum.getChecksumSize();
+  }
+
+
+  @Override
+  public synchronized void close() throws IOException {
+    packetReceiver.close();
+    startOffset = -1;
+    checksum = null;
+    if (peerCache != null && sentStatusCode) {
+      peerCache.put(datanodeID, peer);
+    } else {
+      peer.close();
+    }
+
+    // in will be closed when its Socket is closed.
+  }
+  
+  /**
+   * When the reader reaches end of the read, it sends a status response
+   * (e.g. CHECKSUM_OK) to the DN. Failure to do so could lead to the DN
+   * closing our connection (which we will re-open), but won't affect
+   * data correctness.
+   */
+  void sendReadResult(Status statusCode) {
+    assert !sentStatusCode : "already sent status code to " + peer;
+    try {
+      writeReadResult(peer.getOutputStream(), statusCode);
+      sentStatusCode = true;
+    } catch (IOException e) {
+      // It's ok not to be able to send this. But something is probably wrong.
+      LOG.info("Could not send read status (" + statusCode + ") to datanode " +
+               peer.getRemoteAddressString() + ": " + e.getMessage());
+    }
+  }
+
+  /**
+   * Serialize the actual read result on the wire.
+   */
+  static void writeReadResult(OutputStream out, Status statusCode)
+      throws IOException {
+    
+    ClientReadStatusProto.newBuilder()
+      .setStatus(statusCode)
+      .build()
+      .writeDelimitedTo(out);
+
+    out.flush();
+  }
+  
+  /**
+   * File name to print when accessing a block directly (from servlets)
+   * @param s Address of the block location
+   * @param poolId Block pool ID of the block
+   * @param blockId Block ID of the block
+   * @return string that has a file name for debug purposes
+   */
+  public static String getFileName(final InetSocketAddress s,
+      final String poolId, final long blockId) {
+    return s.toString() + ":" + poolId + ":" + blockId;
+  }
+
+  @Override
+  public int readAll(byte[] buf, int offset, int len) throws IOException {
+    return BlockReaderUtil.readAll(this, buf, offset, len);
+  }
+
+  @Override
+  public void readFully(byte[] buf, int off, int len) throws IOException {
+    BlockReaderUtil.readFully(this, buf, off, len);
+  }
+  
+  /**
+   * Create a new BlockReader specifically to satisfy a read.
+   * This method also sends the OP_READ_BLOCK request.
+   *
+   * @param file  File location
+   * @param block  The block object
+   * @param blockToken  The block token for security
+   * @param startOffset  The read offset, relative to block head
+   * @param len  The number of bytes to read
+   * @param verifyChecksum  Whether to verify checksum
+   * @param clientName  Client name
+   * @param peer  The Peer to use
+   * @param datanodeID  The DatanodeID this peer is connected to
+   * @return New BlockReader instance, or null on error.
+   */
+  public static BlockReader newBlockReader(String file,
+                                     ExtendedBlock block,
+                                     Token<BlockTokenIdentifier> blockToken,
+                                     long startOffset, long len,
+                                     boolean verifyChecksum,
+                                     String clientName,
+                                     Peer peer, DatanodeID datanodeID,
+                                     PeerCache peerCache,
+                                     CachingStrategy cachingStrategy) throws IOException {
+    // in and out will be closed when sock is closed (by the caller)
+    final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
+          peer.getOutputStream()));
+    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
+        verifyChecksum, cachingStrategy);
+
+    //
+    // Get bytes in block
+    //
+    DataInputStream in = new DataInputStream(peer.getInputStream());
+
+    BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
+        PBHelperClient.vintPrefixed(in));
+    checkSuccess(status, peer, block, file);
+    ReadOpChecksumInfoProto checksumInfo =
+      status.getReadOpChecksumInfo();
+    DataChecksum checksum = DataTransferProtoUtil.fromProto(
+        checksumInfo.getChecksum());
+    //Warning when we get CHECKSUM_NULL?
+
+    // Read the first chunk offset.
+    long firstChunkOffset = checksumInfo.getChunkOffset();
+
+    if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
+        firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
+      throw new IOException("BlockReader: error in first chunk offset (" +
+                            firstChunkOffset + ") startOffset is " +
+                            startOffset + " for file " + file);
+    }
+
+    return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
+        checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer,
+        datanodeID, peerCache);
+  }
+
+  static void checkSuccess(
+      BlockOpResponseProto status, Peer peer,
+      ExtendedBlock block, String file)
+      throws IOException {
+    String logInfo = "for OP_READ_BLOCK"
+      + ", self=" + peer.getLocalAddressString()
+      + ", remote=" + peer.getRemoteAddressString()
+      + ", for file " + file
+      + ", for pool " + block.getBlockPoolId()
+      + " block " + block.getBlockId() + "_" + block.getGenerationStamp();
+    DataTransferProtoUtil.checkBlockOpStatus(status, logInfo);
+  }
+  
+  @Override
+  public int available() throws IOException {
+    // An optimistic estimate of how much data is available
+    // to us without doing network I/O.
+    return TCP_WINDOW_SIZE;
+  }
+  
+  @Override
+  public boolean isLocal() {
+    return isLocal;
+  }
+  
+  @Override
+  public boolean isShortCircuit() {
+    return false;
+  }
+
+  @Override
+  public ClientMmap getClientMmap(EnumSet<ReadOption> opts) {
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
new file mode 100644
index 0000000..c9966a7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
@@ -0,0 +1,214 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto;
+import org.apache.hadoop.hdfs.util.ByteBufferOutputStream;
+
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.Shorts;
+import com.google.common.primitives.Ints;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+/**
+ * Header data for each packet that goes through the read/write pipelines.
+ * Includes all of the information about the packet, excluding checksums and
+ * actual data.
+ * 
+ * This data includes:
+ *  - the offset in bytes into the HDFS block of the data in this packet
+ *  - the sequence number of this packet in the pipeline
+ *  - whether or not this is the last packet in the pipeline
+ *  - the length of the data in this packet
+ *  - whether or not this packet should be synced by the DNs.
+ *  
+ * When serialized, this header is written out as a protocol buffer, preceded
+ * by a 4-byte integer representing the full packet length, and a 2-byte short
+ * representing the header length.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class PacketHeader {
+  private static final int MAX_PROTO_SIZE = 
+    PacketHeaderProto.newBuilder()
+      .setOffsetInBlock(0)
+      .setSeqno(0)
+      .setLastPacketInBlock(false)
+      .setDataLen(0)
+      .setSyncBlock(false)
+      .build().getSerializedSize();
+  public static final int PKT_LENGTHS_LEN =
+      Ints.BYTES + Shorts.BYTES;
+  public static final int PKT_MAX_HEADER_LEN =
+      PKT_LENGTHS_LEN + MAX_PROTO_SIZE;
+
+  private int packetLen;
+  private PacketHeaderProto proto;
+
+  public PacketHeader() {
+  }
+
+  public PacketHeader(int packetLen, long offsetInBlock, long seqno,
+                      boolean lastPacketInBlock, int dataLen, boolean syncBlock) {
+    this.packetLen = packetLen;
+    Preconditions.checkArgument(packetLen >= Ints.BYTES,
+        "packet len %s should always be at least 4 bytes",
+        packetLen);
+    
+    PacketHeaderProto.Builder builder = PacketHeaderProto.newBuilder()
+      .setOffsetInBlock(offsetInBlock)
+      .setSeqno(seqno)
+      .setLastPacketInBlock(lastPacketInBlock)
+      .setDataLen(dataLen);
+      
+    if (syncBlock) {
+      // Only set syncBlock if it is specified.
+      // This is wire-incompatible with Hadoop 2.0.0-alpha due to HDFS-3721
+      // because it changes the length of the packet header, and BlockReceiver
+      // in that version did not support variable-length headers.
+      builder.setSyncBlock(syncBlock);
+    }
+      
+    proto = builder.build();
+  }
+
+  public int getDataLen() {
+    return proto.getDataLen();
+  }
+
+  public boolean isLastPacketInBlock() {
+    return proto.getLastPacketInBlock();
+  }
+
+  public long getSeqno() {
+    return proto.getSeqno();
+  }
+
+  public long getOffsetInBlock() {
+    return proto.getOffsetInBlock();
+  }
+
+  public int getPacketLen() {
+    return packetLen;
+  }
+
+  public boolean getSyncBlock() {
+    return proto.getSyncBlock();
+  }
+
+  @Override
+  public String toString() {
+    return "PacketHeader with packetLen=" + packetLen +
+      " header data: " + 
+      proto.toString();
+  }
+  
+  public void setFieldsFromData(
+      int packetLen, byte[] headerData) throws InvalidProtocolBufferException {
+    this.packetLen = packetLen;
+    proto = PacketHeaderProto.parseFrom(headerData);
+  }
+  
+  public void readFields(ByteBuffer buf) throws IOException {
+    packetLen = buf.getInt();
+    short protoLen = buf.getShort();
+    byte[] data = new byte[protoLen];
+    buf.get(data);
+    proto = PacketHeaderProto.parseFrom(data);
+  }
+  
+  public void readFields(DataInputStream in) throws IOException {
+    this.packetLen = in.readInt();
+    short protoLen = in.readShort();
+    byte[] data = new byte[protoLen];
+    in.readFully(data);
+    proto = PacketHeaderProto.parseFrom(data);
+  }
+
+  /**
+   * @return the number of bytes necessary to write out this header,
+   * including the length-prefixing of the payload and header
+   */
+  public int getSerializedSize() {
+    return PKT_LENGTHS_LEN + proto.getSerializedSize();
+  }
+
+  /**
+   * Write the header into the buffer.
+   * This requires that PKT_HEADER_LEN bytes are available.
+   */
+  public void putInBuffer(final ByteBuffer buf) {
+    assert proto.getSerializedSize() <= MAX_PROTO_SIZE
+      : "Expected " + (MAX_PROTO_SIZE) + " got: " + proto.getSerializedSize();
+    try {
+      buf.putInt(packetLen);
+      buf.putShort((short) proto.getSerializedSize());
+      proto.writeTo(new ByteBufferOutputStream(buf));
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+  
+  public void write(DataOutputStream out) throws IOException {
+    assert proto.getSerializedSize() <= MAX_PROTO_SIZE
+    : "Expected " + (MAX_PROTO_SIZE) + " got: " + proto.getSerializedSize();
+    out.writeInt(packetLen);
+    out.writeShort(proto.getSerializedSize());
+    proto.writeTo(out);
+  }
+  
+  public byte[] getBytes() {
+    ByteBuffer buf = ByteBuffer.allocate(getSerializedSize());
+    putInBuffer(buf);
+    return buf.array();
+  }
+
+  /**
+   * Perform a sanity check on the packet, returning true if it is sane.
+   * @param lastSeqNo the previous sequence number received - we expect the current
+   * sequence number to be larger by 1.
+   */
+  public boolean sanityCheck(long lastSeqNo) {
+    // We should only have a non-positive data length for the last packet
+    if (proto.getDataLen() <= 0 && !proto.getLastPacketInBlock()) return false;
+    // The last packet should not contain data
+    if (proto.getLastPacketInBlock() && proto.getDataLen() != 0) return false;
+    // Seqnos should always increase by 1 with each packet received
+    if (proto.getSeqno() != lastSeqNo + 1) return false;
+    return true;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (!(o instanceof PacketHeader)) return false;
+    PacketHeader other = (PacketHeader)o;
+    return this.proto.equals(other.proto);
+  }
+
+  @Override
+  public int hashCode() {
+    return (int)proto.getSeqno();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
new file mode 100644
index 0000000..c4093b1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
@@ -0,0 +1,310 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import java.io.Closeable;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.ReadableByteChannel;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.util.DirectBufferPool;
+import org.apache.hadoop.io.IOUtils;
+
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.Ints;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Class to handle reading packets one-at-a-time from the wire.
+ * These packets are used both for reading and writing data to/from
+ * DataNodes.
+ */
+@InterfaceAudience.Private
+public class PacketReceiver implements Closeable {
+
+  /**
+   * The max size of any single packet. This prevents OOMEs when
+   * invalid data is sent.
+   */
+  private static final int MAX_PACKET_SIZE = 16 * 1024 * 1024;
+
+  static final Logger LOG = LoggerFactory.getLogger(PacketReceiver.class);
+  
+  private static final DirectBufferPool bufferPool = new DirectBufferPool();
+  private final boolean useDirectBuffers;
+
+  /**
+   * The entirety of the most recently read packet.
+   * The first PKT_LENGTHS_LEN bytes of this buffer are the
+   * length prefixes.
+   */
+  private ByteBuffer curPacketBuf = null;
+  
+  /**
+   * A slice of {@link #curPacketBuf} which contains just the checksums.
+   */
+  private ByteBuffer curChecksumSlice = null;
+  
+  /**
+   * A slice of {@link #curPacketBuf} which contains just the data.
+   */
+  private ByteBuffer curDataSlice = null;
+
+  /**
+   * The packet header of the most recently read packet.
+   */
+  private PacketHeader curHeader;
+  
+  public PacketReceiver(boolean useDirectBuffers) {
+    this.useDirectBuffers = useDirectBuffers;
+    reallocPacketBuf(PacketHeader.PKT_LENGTHS_LEN);
+  }
+
+  public PacketHeader getHeader() {
+    return curHeader;
+  }
+
+  public ByteBuffer getDataSlice() {
+    return curDataSlice;
+  }
+  
+  public ByteBuffer getChecksumSlice() {
+    return curChecksumSlice;
+  }
+
+  /**
+   * Reads all of the data for the next packet into the appropriate buffers.
+   * 
+   * The data slice and checksum slice members will be set to point to the
+   * user data and corresponding checksums. The header will be parsed and
+   * set.
+   */
+  public void receiveNextPacket(ReadableByteChannel in) throws IOException {
+    doRead(in, null);
+  }
+
+  /**
+   * @see #receiveNextPacket(ReadableByteChannel)
+   */
+  public void receiveNextPacket(InputStream in) throws IOException {
+    doRead(null, in);
+  }
+
+  private void doRead(ReadableByteChannel ch, InputStream in)
+      throws IOException {
+    // Each packet looks like:
+    //   PLEN    HLEN      HEADER     CHECKSUMS  DATA
+    //   32-bit  16-bit   <protobuf>  <variable length>
+    //
+    // PLEN:      Payload length
+    //            = length(PLEN) + length(CHECKSUMS) + length(DATA)
+    //            This length includes its own encoded length in
+    //            the sum for historical reasons.
+    //
+    // HLEN:      Header length
+    //            = length(HEADER)
+    //
+    // HEADER:    the actual packet header fields, encoded in protobuf
+    // CHECKSUMS: the crcs for the data chunk. May be missing if
+    //            checksums were not requested
+    // DATA       the actual block data
+    Preconditions.checkState(curHeader == null || !curHeader.isLastPacketInBlock());
+
+    curPacketBuf.clear();
+    curPacketBuf.limit(PacketHeader.PKT_LENGTHS_LEN);
+    doReadFully(ch, in, curPacketBuf);
+    curPacketBuf.flip();
+    int payloadLen = curPacketBuf.getInt();
+    
+    if (payloadLen < Ints.BYTES) {
+      // The "payload length" includes its own length. Therefore it
+      // should never be less than 4 bytes
+      throw new IOException("Invalid payload length " +
+          payloadLen);
+    }
+    int dataPlusChecksumLen = payloadLen - Ints.BYTES;
+    int headerLen = curPacketBuf.getShort();
+    if (headerLen < 0) {
+      throw new IOException("Invalid header length " + headerLen);
+    }
+    
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("readNextPacket: dataPlusChecksumLen = " + dataPlusChecksumLen +
+          " headerLen = " + headerLen);
+    }
+    
+    // Sanity check the buffer size so we don't allocate too much memory
+    // and OOME.
+    int totalLen = payloadLen + headerLen;
+    if (totalLen < 0 || totalLen > MAX_PACKET_SIZE) {
+      throw new IOException("Incorrect value for packet payload size: " +
+                            payloadLen);
+    }
+
+    // Make sure we have space for the whole packet, and
+    // read it.
+    reallocPacketBuf(PacketHeader.PKT_LENGTHS_LEN +
+        dataPlusChecksumLen + headerLen);
+    curPacketBuf.clear();
+    curPacketBuf.position(PacketHeader.PKT_LENGTHS_LEN);
+    curPacketBuf.limit(PacketHeader.PKT_LENGTHS_LEN +
+        dataPlusChecksumLen + headerLen);
+    doReadFully(ch, in, curPacketBuf);
+    curPacketBuf.flip();
+    curPacketBuf.position(PacketHeader.PKT_LENGTHS_LEN);
+
+    // Extract the header from the front of the buffer (after the length prefixes)
+    byte[] headerBuf = new byte[headerLen];
+    curPacketBuf.get(headerBuf);
+    if (curHeader == null) {
+      curHeader = new PacketHeader();
+    }
+    curHeader.setFieldsFromData(payloadLen, headerBuf);
+    
+    // Compute the sub-slices of the packet
+    int checksumLen = dataPlusChecksumLen - curHeader.getDataLen();
+    if (checksumLen < 0) {
+      throw new IOException("Invalid packet: data length in packet header " + 
+          "exceeds data length received. dataPlusChecksumLen=" +
+          dataPlusChecksumLen + " header: " + curHeader); 
+    }
+    
+    reslicePacket(headerLen, checksumLen, curHeader.getDataLen());
+  }
+  
+  /**
+   * Rewrite the last-read packet on the wire to the given output stream.
+   */
+  public void mirrorPacketTo(DataOutputStream mirrorOut) throws IOException {
+    Preconditions.checkState(!useDirectBuffers,
+        "Currently only supported for non-direct buffers");
+    mirrorOut.write(curPacketBuf.array(),
+        curPacketBuf.arrayOffset(),
+        curPacketBuf.remaining());
+  }
+
+  
+  private static void doReadFully(ReadableByteChannel ch, InputStream in,
+      ByteBuffer buf) throws IOException {
+    if (ch != null) {
+      readChannelFully(ch, buf);
+    } else {
+      Preconditions.checkState(!buf.isDirect(),
+          "Must not use direct buffers with InputStream API");
+      IOUtils.readFully(in, buf.array(),
+          buf.arrayOffset() + buf.position(),
+          buf.remaining());
+      buf.position(buf.position() + buf.remaining());
+    }
+  }
+
+  private void reslicePacket(
+      int headerLen, int checksumsLen, int dataLen) {
+    // Packet structure (refer to doRead() for details):
+    //   PLEN    HLEN      HEADER     CHECKSUMS  DATA
+    //   32-bit  16-bit   <protobuf>  <variable length>
+    //   |--- lenThroughHeader ----|
+    //   |----------- lenThroughChecksums   ----|
+    //   |------------------- lenThroughData    ------| 
+    int lenThroughHeader = PacketHeader.PKT_LENGTHS_LEN + headerLen;
+    int lenThroughChecksums = lenThroughHeader + checksumsLen;
+    int lenThroughData = lenThroughChecksums + dataLen;
+
+    assert dataLen >= 0 : "invalid datalen: " + dataLen;
+    assert curPacketBuf.position() == lenThroughHeader;
+    assert curPacketBuf.limit() == lenThroughData :
+      "headerLen= " + headerLen + " clen=" + checksumsLen + " dlen=" + dataLen +
+      " rem=" + curPacketBuf.remaining();
+
+    // Slice the checksums.
+    curPacketBuf.position(lenThroughHeader);
+    curPacketBuf.limit(lenThroughChecksums);
+    curChecksumSlice = curPacketBuf.slice();
+
+    // Slice the data.
+    curPacketBuf.position(lenThroughChecksums);
+    curPacketBuf.limit(lenThroughData);
+    curDataSlice = curPacketBuf.slice();
+    
+    // Reset buffer to point to the entirety of the packet (including
+    // length prefixes)
+    curPacketBuf.position(0);
+    curPacketBuf.limit(lenThroughData);
+  }
+
+  
+  private static void readChannelFully(ReadableByteChannel ch, ByteBuffer buf)
+      throws IOException {
+    while (buf.remaining() > 0) {
+      int n = ch.read(buf);
+      if (n < 0) {
+        throw new IOException("Premature EOF reading from " + ch);
+      }
+    }
+  }
+  
+  private void reallocPacketBuf(int atLeastCapacity) {
+    // Realloc the buffer if this packet is longer than the previous
+    // one.
+    if (curPacketBuf == null ||
+        curPacketBuf.capacity() < atLeastCapacity) {
+      ByteBuffer newBuf;
+      if (useDirectBuffers) {
+        newBuf = bufferPool.getBuffer(atLeastCapacity);
+      } else {
+        newBuf = ByteBuffer.allocate(atLeastCapacity);
+      }
+      // If reallocing an existing buffer, copy the old packet length
+      // prefixes over
+      if (curPacketBuf != null) {
+        curPacketBuf.flip();
+        newBuf.put(curPacketBuf);
+      }
+      
+      returnPacketBufToPool();
+      curPacketBuf = newBuf;
+    }
+  }
+  
+  private void returnPacketBufToPool() {
+    if (curPacketBuf != null && curPacketBuf.isDirect()) {
+      bufferPool.returnBuffer(curPacketBuf);
+      curPacketBuf = null;
+    }
+  }
+
+  @Override // Closeable
+  public void close() {
+    returnPacketBufToPool();
+  }
+  
+  @Override
+  protected void finalize() throws Throwable {
+    try {
+      // just in case it didn't get closed, we
+      // may as well still try to return the buffer
+      returnPacketBufToPool();
+    } finally {
+      super.finalize();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java
new file mode 100644
index 0000000..31d4dcc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * OutputStream that writes into a {@link ByteBuffer}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class ByteBufferOutputStream extends OutputStream {
+
+  private final ByteBuffer buf;
+
+  public ByteBufferOutputStream(ByteBuffer buf) {
+    this.buf = buf;
+  }
+
+  @Override
+  public void write(int b) throws IOException {
+    buf.put((byte)b);
+  }
+
+  @Override
+  public void write(byte[] b, int off, int len) throws IOException {
+    buf.put(b, off, len);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7b5979e..ef8fac5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -867,6 +867,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8980. Remove unnecessary block replacement in INodeFile. (jing9)
 
+    HDFS-8990. Move RemoteBlockReader to hdfs-client module.
+    (Mingliang via wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 3c49ef7..268a5b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -203,7 +203,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     DataEncryptionKeyFactory {
   public static final Log LOG = LogFactory.getLog(DFSClient.class);
   public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
-  static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
 
   private final Configuration conf;
   private final DfsClientConf dfsClientConf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
deleted file mode 100644
index 015e154..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
+++ /dev/null
@@ -1,508 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.EnumSet;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.FSInputChecker;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.ReadOption;
-import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
-import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
-import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.DataChecksum;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-
-
-/**
- * @deprecated this is an old implementation that is being left around
- * in case any issues spring up with the new {@link RemoteBlockReader2} implementation.
- * It will be removed in the next release.
- */
-@InterfaceAudience.Private
-@Deprecated
-public class RemoteBlockReader extends FSInputChecker implements BlockReader {
-  private final Peer peer;
-  private final DatanodeID datanodeID;
-  private final DataInputStream in;
-  private DataChecksum checksum;
-
-  /** offset in block of the last chunk received */
-  private long lastChunkOffset = -1;
-  private long lastChunkLen = -1;
-  private long lastSeqNo = -1;
-
-  /** offset in block where reader wants to actually read */
-  private long startOffset;
-
-  private final long blockId;
-
-  /** offset in block of of first chunk - may be less than startOffset
-      if startOffset is not chunk-aligned */
-  private final long firstChunkOffset;
-
-  private final int bytesPerChecksum;
-  private final int checksumSize;
-
-  /**
-   * The total number of bytes we need to transfer from the DN.
-   * This is the amount that the user has requested plus some padding
-   * at the beginning so that the read can begin on a chunk boundary.
-   */
-  private final long bytesNeededToFinish;
-  
-  /**
-   * True if we are reading from a local DataNode.
-   */
-  private final boolean isLocal;
-
-  private boolean eos = false;
-  private boolean sentStatusCode = false;
-  
-  ByteBuffer checksumBytes = null;
-  /** Amount of unread data in the current received packet */
-  int dataLeft = 0;
-  
-  private final PeerCache peerCache;
-  
-  /* FSInputChecker interface */
-  
-  /* same interface as inputStream java.io.InputStream#read()
-   * used by DFSInputStream#read()
-   * This violates one rule when there is a checksum error:
-   * "Read should not modify user buffer before successful read"
-   * because it first reads the data to user buffer and then checks
-   * the checksum.
-   */
-  @Override
-  public synchronized int read(byte[] buf, int off, int len) 
-                               throws IOException {
-    
-    // This has to be set here, *before* the skip, since we can
-    // hit EOS during the skip, in the case that our entire read
-    // is smaller than the checksum chunk.
-    boolean eosBefore = eos;
-
-    //for the first read, skip the extra bytes at the front.
-    if (lastChunkLen < 0 && startOffset > firstChunkOffset && len > 0) {
-      // Skip these bytes. But don't call this.skip()!
-      int toSkip = (int)(startOffset - firstChunkOffset);
-      if ( super.readAndDiscard(toSkip) != toSkip ) {
-        // should never happen
-        throw new IOException("Could not skip required number of bytes");
-      }
-    }
-    
-    int nRead = super.read(buf, off, len);
-
-    // if eos was set in the previous read, send a status code to the DN
-    if (eos && !eosBefore && nRead >= 0) {
-      if (needChecksum()) {
-        sendReadResult(peer, Status.CHECKSUM_OK);
-      } else {
-        sendReadResult(peer, Status.SUCCESS);
-      }
-    }
-    return nRead;
-  }
-
-  @Override
-  public synchronized long skip(long n) throws IOException {
-    /* How can we make sure we don't throw a ChecksumException, at least
-     * in majority of the cases?. This one throws. */  
-    long nSkipped = 0;
-    while (nSkipped < n) {
-      int toSkip = (int)Math.min(n-nSkipped, Integer.MAX_VALUE);
-      int ret = readAndDiscard(toSkip);
-      if (ret <= 0) {
-        return nSkipped;
-      }
-      nSkipped += ret;
-    }
-    return nSkipped;
-  }
-
-  @Override
-  public int read() throws IOException {
-    throw new IOException("read() is not expected to be invoked. " +
-                          "Use read(buf, off, len) instead.");
-  }
-  
-  @Override
-  public boolean seekToNewSource(long targetPos) throws IOException {
-    /* Checksum errors are handled outside the BlockReader. 
-     * DFSInputStream does not always call 'seekToNewSource'. In the 
-     * case of pread(), it just tries a different replica without seeking.
-     */ 
-    return false;
-  }
-  
-  @Override
-  public void seek(long pos) throws IOException {
-    throw new IOException("Seek() is not supported in BlockInputChecker");
-  }
-
-  @Override
-  protected long getChunkPosition(long pos) {
-    throw new RuntimeException("getChunkPosition() is not supported, " +
-                               "since seek is not required");
-  }
-  
-  /**
-   * Makes sure that checksumBytes has enough capacity 
-   * and limit is set to the number of checksum bytes needed 
-   * to be read.
-   */
-  private void adjustChecksumBytes(int dataLen) {
-    int requiredSize = 
-      ((dataLen + bytesPerChecksum - 1)/bytesPerChecksum)*checksumSize;
-    if (checksumBytes == null || requiredSize > checksumBytes.capacity()) {
-      checksumBytes =  ByteBuffer.wrap(new byte[requiredSize]);
-    } else {
-      checksumBytes.clear();
-    }
-    checksumBytes.limit(requiredSize);
-  }
-  
-  @Override
-  protected synchronized int readChunk(long pos, byte[] buf, int offset, 
-                                       int len, byte[] checksumBuf) 
-                                       throws IOException {
-    TraceScope scope =
-        Trace.startSpan("RemoteBlockReader#readChunk(" + blockId + ")",
-            Sampler.NEVER);
-    try {
-      return readChunkImpl(pos, buf, offset, len, checksumBuf);
-    } finally {
-      scope.close();
-    }
-  }
-
-  private synchronized int readChunkImpl(long pos, byte[] buf, int offset,
-                                     int len, byte[] checksumBuf)
-                                     throws IOException {
-    // Read one chunk.
-    if (eos) {
-      // Already hit EOF
-      return -1;
-    }
-    
-    // Read one DATA_CHUNK.
-    long chunkOffset = lastChunkOffset;
-    if ( lastChunkLen > 0 ) {
-      chunkOffset += lastChunkLen;
-    }
-    
-    // pos is relative to the start of the first chunk of the read.
-    // chunkOffset is relative to the start of the block.
-    // This makes sure that the read passed from FSInputChecker is the
-    // for the same chunk we expect to be reading from the DN.
-    if ( (pos + firstChunkOffset) != chunkOffset ) {
-      throw new IOException("Mismatch in pos : " + pos + " + " + 
-                            firstChunkOffset + " != " + chunkOffset);
-    }
-
-    // Read next packet if the previous packet has been read completely.
-    if (dataLeft <= 0) {
-      //Read packet headers.
-      PacketHeader header = new PacketHeader();
-      header.readFields(in);
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("DFSClient readChunk got header " + header);
-      }
-
-      // Sanity check the lengths
-      if (!header.sanityCheck(lastSeqNo)) {
-           throw new IOException("BlockReader: error in packet header " +
-                                 header);
-      }
-
-      lastSeqNo = header.getSeqno();
-      dataLeft = header.getDataLen();
-      adjustChecksumBytes(header.getDataLen());
-      if (header.getDataLen() > 0) {
-        IOUtils.readFully(in, checksumBytes.array(), 0,
-                          checksumBytes.limit());
-      }
-    }
-
-    // Sanity checks
-    assert len >= bytesPerChecksum;
-    assert checksum != null;
-    assert checksumSize == 0 || (checksumBuf.length % checksumSize == 0);
-
-
-    int checksumsToRead, bytesToRead;
-
-    if (checksumSize > 0) {
-
-      // How many chunks left in our packet - this is a ceiling
-      // since we may have a partial chunk at the end of the file
-      int chunksLeft = (dataLeft - 1) / bytesPerChecksum + 1;
-
-      // How many chunks we can fit in databuffer
-      //  - note this is a floor since we always read full chunks
-      int chunksCanFit = Math.min(len / bytesPerChecksum,
-                                  checksumBuf.length / checksumSize);
-
-      // How many chunks should we read
-      checksumsToRead = Math.min(chunksLeft, chunksCanFit);
-      // How many bytes should we actually read
-      bytesToRead = Math.min(
-        checksumsToRead * bytesPerChecksum, // full chunks
-        dataLeft); // in case we have a partial
-    } else {
-      // no checksum
-      bytesToRead = Math.min(dataLeft, len);
-      checksumsToRead = 0;
-    }
-
-    if ( bytesToRead > 0 ) {
-      // Assert we have enough space
-      assert bytesToRead <= len;
-      assert checksumBytes.remaining() >= checksumSize * checksumsToRead;
-      assert checksumBuf.length >= checksumSize * checksumsToRead;
-      IOUtils.readFully(in, buf, offset, bytesToRead);
-      checksumBytes.get(checksumBuf, 0, checksumSize * checksumsToRead);
-    }
-
-    dataLeft -= bytesToRead;
-    assert dataLeft >= 0;
-
-    lastChunkOffset = chunkOffset;
-    lastChunkLen = bytesToRead;
-
-    // If there's no data left in the current packet after satisfying
-    // this read, and we have satisfied the client read, we expect
-    // an empty packet header from the DN to signify this.
-    // Note that pos + bytesToRead may in fact be greater since the
-    // DN finishes off the entire last chunk.
-    if (dataLeft == 0 &&
-        pos + bytesToRead >= bytesNeededToFinish) {
-
-      // Read header
-      PacketHeader hdr = new PacketHeader();
-      hdr.readFields(in);
-
-      if (!hdr.isLastPacketInBlock() ||
-          hdr.getDataLen() != 0) {
-        throw new IOException("Expected empty end-of-read packet! Header: " +
-                              hdr);
-      }
-
-      eos = true;
-    }
-
-    if ( bytesToRead == 0 ) {
-      return -1;
-    }
-
-    return bytesToRead;
-  }
-  
-  private RemoteBlockReader(String file, String bpid, long blockId,
-      DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
-      long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
-      DatanodeID datanodeID, PeerCache peerCache) {
-    // Path is used only for printing block and file information in debug
-    super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
-                    ":" + bpid + ":of:"+ file)/*too non path-like?*/,
-          1, verifyChecksum,
-          checksum.getChecksumSize() > 0? checksum : null, 
-          checksum.getBytesPerChecksum(),
-          checksum.getChecksumSize());
-
-    this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
-        createSocketAddr(datanodeID.getXferAddr()));
-    
-    this.peer = peer;
-    this.datanodeID = datanodeID;
-    this.in = in;
-    this.checksum = checksum;
-    this.startOffset = Math.max( startOffset, 0 );
-    this.blockId = blockId;
-
-    // The total number of bytes that we need to transfer from the DN is
-    // the amount that the user wants (bytesToRead), plus the padding at
-    // the beginning in order to chunk-align. Note that the DN may elect
-    // to send more than this amount if the read starts/ends mid-chunk.
-    this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
-
-    this.firstChunkOffset = firstChunkOffset;
-    lastChunkOffset = firstChunkOffset;
-    lastChunkLen = -1;
-
-    bytesPerChecksum = this.checksum.getBytesPerChecksum();
-    checksumSize = this.checksum.getChecksumSize();
-    this.peerCache = peerCache;
-  }
-
-  /**
-   * Create a new BlockReader specifically to satisfy a read.
-   * This method also sends the OP_READ_BLOCK request.
-   *
-   * @param file  File location
-   * @param block  The block object
-   * @param blockToken  The block token for security
-   * @param startOffset  The read offset, relative to block head
-   * @param len  The number of bytes to read
-   * @param bufferSize  The IO buffer size (not the client buffer size)
-   * @param verifyChecksum  Whether to verify checksum
-   * @param clientName  Client name
-   * @return New BlockReader instance, or null on error.
-   */
-  public static RemoteBlockReader newBlockReader(String file,
-                                     ExtendedBlock block, 
-                                     Token<BlockTokenIdentifier> blockToken,
-                                     long startOffset, long len,
-                                     int bufferSize, boolean verifyChecksum,
-                                     String clientName, Peer peer,
-                                     DatanodeID datanodeID,
-                                     PeerCache peerCache,
-                                     CachingStrategy cachingStrategy)
-                                       throws IOException {
-    // in and out will be closed when sock is closed (by the caller)
-    final DataOutputStream out =
-        new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
-    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
-        verifyChecksum, cachingStrategy);
-    
-    //
-    // Get bytes in block, set streams
-    //
-
-    DataInputStream in = new DataInputStream(
-        new BufferedInputStream(peer.getInputStream(), bufferSize));
-    
-    BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
-        PBHelperClient.vintPrefixed(in));
-    RemoteBlockReader2.checkSuccess(status, peer, block, file);
-    ReadOpChecksumInfoProto checksumInfo =
-      status.getReadOpChecksumInfo();
-    DataChecksum checksum = DataTransferProtoUtil.fromProto(
-        checksumInfo.getChecksum());
-    //Warning when we get CHECKSUM_NULL?
-    
-    // Read the first chunk offset.
-    long firstChunkOffset = checksumInfo.getChunkOffset();
-    
-    if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
-        firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
-      throw new IOException("BlockReader: error in first chunk offset (" +
-                            firstChunkOffset + ") startOffset is " + 
-                            startOffset + " for file " + file);
-    }
-
-    return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
-        in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
-        peer, datanodeID, peerCache);
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    startOffset = -1;
-    checksum = null;
-    if (peerCache != null & sentStatusCode) {
-      peerCache.put(datanodeID, peer);
-    } else {
-      peer.close();
-    }
-
-    // in will be closed when its Socket is closed.
-  }
-  
-  @Override
-  public void readFully(byte[] buf, int readOffset, int amtToRead)
-      throws IOException {
-    IOUtils.readFully(this, buf, readOffset, amtToRead);
-  }
-
-  @Override
-  public int readAll(byte[] buf, int offset, int len) throws IOException {
-    return readFully(this, buf, offset, len);
-  }
-
-  /**
-   * When the reader reaches end of the read, it sends a status response
-   * (e.g. CHECKSUM_OK) to the DN. Failure to do so could lead to the DN
-   * closing our connection (which we will re-open), but won't affect
-   * data correctness.
-   */
-  void sendReadResult(Peer peer, Status statusCode) {
-    assert !sentStatusCode : "already sent status code to " + peer;
-    try {
-      RemoteBlockReader2.writeReadResult(peer.getOutputStream(), statusCode);
-      sentStatusCode = true;
-    } catch (IOException e) {
-      // It's ok not to be able to send this. But something is probably wrong.
-      LOG.info("Could not send read status (" + statusCode + ") to datanode " +
-               peer.getRemoteAddressString() + ": " + e.getMessage());
-    }
-  }
-
-  @Override
-  public int read(ByteBuffer buf) throws IOException {
-    throw new UnsupportedOperationException("readDirect unsupported in RemoteBlockReader");
-  }
-  
-  @Override
-  public int available() throws IOException {
-    // An optimistic estimate of how much data is available
-    // to us without doing network I/O.
-    return DFSClient.TCP_WINDOW_SIZE;
-  }
-
-  @Override
-  public boolean isLocal() {
-    return isLocal;
-  }
-  
-  @Override
-  public boolean isShortCircuit() {
-    return false;
-  }
-
-  @Override
-  public ClientMmap getClientMmap(EnumSet<ReadOption> opts) {
-    return null;
-  }
-}


[20/50] [abbrv] hadoop git commit: Revert "MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple mappers. Contributed by Brahma Reddy Battula."

Posted by vv...@apache.org.
Revert "MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple mappers. Contributed by Brahma Reddy Battula."

This reverts commit 75a2560e51387ea31ef4609ef434475bbbc628f7.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00804e24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00804e24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00804e24

Branch: refs/heads/YARN-3926
Commit: 00804e24579083d03a67b86c2c57e70b910dda8d
Parents: 5652131
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Sep 2 12:22:16 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Sep 2 12:22:16 2015 +0900

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                  |  3 ---
 .../src/test/java/org/apache/hadoop/hdfs/NNBench.java | 14 ++++----------
 2 files changed, 4 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00804e24/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 27af9f9..88cca41 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -509,9 +509,6 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-6389. Fix BaileyBorweinPlouffe CLI usage message. (Brahma Reddy Battula
     via devaraj)
 
-    MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple
-    mappers. (Brahma Reddy Battula via aajisaka)
-
     MAPREDUCE-6373. The logger reports total input paths but it is referring
     to input files. (Bibin A Chundatt via devaraj)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00804e24/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
index 8a1f1ba..b6c0104 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
@@ -57,8 +57,6 @@ import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reducer;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 /**
  * This program executes a specified operation that applies load to 
@@ -689,9 +687,6 @@ public class NNBench {
       dataDirName = conf.get("test.nnbench.datadir.name");
       op = conf.get("test.nnbench.operation");
       readFile = conf.getBoolean("test.nnbench.readFileAfterOpen", false);
-      int taskId =
-          TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID))
-              .getTaskID().getId();
       
       long totalTimeTPmS = 0l;
       long startTimeTPmS = 0l;
@@ -704,19 +699,18 @@ public class NNBench {
       successfulFileOps = 0l;
       
       if (barrier()) {
-        String filePrefix = "file_" + taskId + "_";
         if (op.equals(OP_CREATE_WRITE)) {
           startTimeTPmS = System.currentTimeMillis();
-          doCreateWriteOp(filePrefix, reporter);
+          doCreateWriteOp("file_" + hostName + "_", reporter);
         } else if (op.equals(OP_OPEN_READ)) {
           startTimeTPmS = System.currentTimeMillis();
-          doOpenReadOp(filePrefix, reporter);
+          doOpenReadOp("file_" + hostName + "_", reporter);
         } else if (op.equals(OP_RENAME)) {
           startTimeTPmS = System.currentTimeMillis();
-          doRenameOp(filePrefix, reporter);
+          doRenameOp("file_" + hostName + "_", reporter);
         } else if (op.equals(OP_DELETE)) {
           startTimeTPmS = System.currentTimeMillis();
-          doDeleteOp(filePrefix, reporter);
+          doDeleteOp("file_" + hostName + "_", reporter);
         }
         
         endTimeTPms = System.currentTimeMillis();


[18/50] [abbrv] hadoop git commit: YARN-3222. Added the missing CHANGES.txt entry.

Posted by vv...@apache.org.
YARN-3222. Added the missing CHANGES.txt entry.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46207671
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46207671
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46207671

Branch: refs/heads/YARN-3926
Commit: 4620767156ecc43424bc6c7c4d50519e2563cc69
Parents: 0eb9b19
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Tue Sep 1 13:43:10 2015 -0700
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Tue Sep 1 13:43:10 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46207671/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e2b1307..b33111c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1701,6 +1701,9 @@ Release 2.7.0 - 2015-04-20
     YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a queue's
     available resource-limit from the parent queue. (Wangda Tan via vinodkv)
 
+    YARN-3222. Fixed RMNode to send scheduler events in sequential order when a
+    node reconnects. (Rohith Sharma K S via jianhe)
+
     YARN-3131. YarnClientImpl should check FAILED and KILLED state in
     submitApplication (Chang Li via jlowe)
     


[36/50] [abbrv] hadoop git commit: HDFS-8939. Test(S)WebHdfsFileContextMainOperations failing on branch-2. Contributed by Chris Nauroth.

Posted by vv...@apache.org.
HDFS-8939. Test(S)WebHdfsFileContextMainOperations failing on branch-2. Contributed by Chris Nauroth.

(cherry picked from commit 9871f57d41990b2763d4c23d9040aeb685b4a80f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2d2c180
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2d2c180
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2d2c180

Branch: refs/heads/YARN-3926
Commit: c2d2c1802a11e3e11a953b23b0eccbf4d107de59
Parents: 53c38cc
Author: Jakob Homan <jg...@gmail.com>
Authored: Thu Sep 3 13:25:53 2015 -0700
Committer: Jakob Homan <jg...@gmail.com>
Committed: Thu Sep 3 13:26:54 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt          |  3 +++
 .../src/main/java/org/apache/hadoop/fs/SWebHdfs.java | 15 ++++++++++++++-
 .../src/main/java/org/apache/hadoop/fs/WebHdfs.java  | 14 +++++++++++++-
 3 files changed, 30 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2d2c180/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index afc6cf4..42460ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1292,6 +1292,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8964. When validating the edit log, do not read at or beyond the file
     offset that is being written (Zhe Zhang via Colin P. McCabe)
 
+    HDFS-8939. Test(S)WebHdfsFileContextMainOperations failing on branch-2.
+    (Chris Nauroth via jghoman)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2d2c180/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/SWebHdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/SWebHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/SWebHdfs.java
index cd36393..04b5880 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/SWebHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/SWebHdfs.java
@@ -46,6 +46,19 @@ public class SWebHdfs extends DelegateToFileSystem {
    */
   SWebHdfs(URI theUri, Configuration conf)
       throws IOException, URISyntaxException {
-    super(theUri, new SWebHdfsFileSystem(), conf, SCHEME, false);
+    super(theUri, createSWebHdfsFileSystem(conf), conf, SCHEME, false);
+  }
+
+  /**
+   * Returns a new {@link SWebHdfsFileSystem}, with the given configuration.
+   *
+   * @param conf configuration
+   * @return new SWebHdfsFileSystem
+   */
+  private static SWebHdfsFileSystem createSWebHdfsFileSystem(
+      Configuration conf) {
+    SWebHdfsFileSystem fs = new SWebHdfsFileSystem();
+    fs.setConf(conf);
+    return fs;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2d2c180/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/WebHdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/WebHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/WebHdfs.java
index dc4f6d5..d54a6b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/WebHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/WebHdfs.java
@@ -46,6 +46,18 @@ public class WebHdfs extends DelegateToFileSystem {
    */
   WebHdfs(URI theUri, Configuration conf)
       throws IOException, URISyntaxException {
-    super(theUri, new WebHdfsFileSystem(), conf, SCHEME, false);
+    super(theUri, createWebHdfsFileSystem(conf), conf, SCHEME, false);
+  }
+
+  /**
+   * Returns a new {@link WebHdfsFileSystem}, with the given configuration.
+   *
+   * @param conf configuration
+   * @return new WebHdfsFileSystem
+   */
+  private static WebHdfsFileSystem createWebHdfsFileSystem(Configuration conf) {
+    WebHdfsFileSystem fs = new WebHdfsFileSystem();
+    fs.setConf(conf);
+    return fs;
   }
 }


[21/50] [abbrv] hadoop git commit: HDFS-8388. Time and Date format need to be in sync in NameNode UI page. Contributed by Surendra Singh Lilhore.

Posted by vv...@apache.org.
HDFS-8388. Time and Date format need to be in sync in NameNode UI page. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65ccf2b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65ccf2b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65ccf2b1

Branch: refs/heads/YARN-3926
Commit: 65ccf2b1252a5c83755fa24a93cf1d30ee59b2c3
Parents: 00804e2
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Sep 2 14:28:38 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Sep 2 14:28:38 2015 +0900

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Metrics.md               | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 5 +++++
 .../apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java   | 6 ++++++
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html         | 5 ++---
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js           | 6 +++---
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html          | 1 +
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.js            | 2 +-
 .../hadoop-hdfs/src/main/webapps/static/dfs-dust.js          | 8 +++++++-
 9 files changed, 30 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index de706ad..8722968 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -192,6 +192,8 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
 | `PutImageNumOps` | Total number of fsimage uploads to SecondaryNameNode |
 | `PutImageAvgTime` | Average fsimage upload time in milliseconds |
 | `TotalFileOps`| Total number of file operations performed |
+| `NNStarted`| NameNode start time |
+| `NNStartedTimeInMillis`| NameNode start time in milliseconds |
 
 FSNamesystem
 ------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ea398f2..14a9248 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1270,6 +1270,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8950. NameNode refresh doesn't remove DataNodes that are no longer in
     the allowed list (Daniel Templeton)
 
+    HDFS-8388. Time and Date format need to be in sync in NameNode UI page.
+    (Surendra Singh Lilhore via aajisaka)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index f4952f7..adcb1d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -6131,6 +6131,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return getStartTime().toString();
   }
 
+  @Override // NameNodeMXBean
+  public long getNNStartedTimeInMillis() {
+    return startTime;
+  }
+
   @Override  // NameNodeMXBean
   public String getCompileInfo() {
     return VersionInfo.getDate() + " by " + VersionInfo.getUser() +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
index 0e4d445..00c1abe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
@@ -239,6 +239,12 @@ public interface NameNodeMXBean {
   public String getNNStarted();
 
   /**
+   * Gets the NN start time in milliseconds.
+   * @return the NN start time in msec
+   */
+  long getNNStartedTimeInMillis();
+
+  /**
    * Get the compilation information which contains date, user and branch
    *
    * @return the compilation information, as a JSON string.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 38808ca..36f8bfe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -130,9 +130,9 @@
   <tr><th>Namenode ID:</th><td>{NamenodeID}</td></tr>
 {/HAInfo}
 {#nn}
-  <tr><th>Started:</th><td>{NNStarted}</td></tr>
+  <tr><th>Started:</th><td>{NNStartedTimeInMillis|date_tostring}</td></tr>
   <tr><th>Version:</th><td>{Version}</td></tr>
-  <tr><th>Compiled:</th><td>{CompileInfo}</td></tr>
+  <tr><th>Compiled:</th><td>{CompileInfo|format_compile_info}</td></tr>
   <tr><th>Cluster ID:</th><td>{ClusterId}</td></tr>
   <tr><th>Block Pool ID:</th><td>{BlockPoolId}</td></tr>
 {/nn}
@@ -423,7 +423,6 @@ There are no reported volume failures.
 </script><script type="text/javascript" src="/static/bootstrap-3.0.2/js/bootstrap.min.js">
 </script><script type="text/javascript" src="/static/dataTables.bootstrap.js">
 </script><script type="text/javascript" src="/static/moment.min.js">
-</script><script type="text/javascript" src="/static/moment.min.js">
 </script><script type="text/javascript" src="/static/dust-full-2.0.0.min.js">
 </script><script type="text/javascript" src="/static/dust-helpers-1.1.1.min.js">
 </script><script type="text/javascript" src="/static/dfs-dust.js">

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
index 03b27fe..9bc1b5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
@@ -57,7 +57,7 @@
 
       'helper_date_tostring' : function (chunk, ctx, bodies, params) {
         var value = dust.helpers.tap(params.value, chunk, ctx);
-        return chunk.write('' + new Date(Number(value)).toLocaleString());
+        return chunk.write('' + moment(Number(value)).format('ddd MMM DD HH:mm:ss ZZ YYYY'));
       }
     };
 
@@ -175,7 +175,7 @@
     var HELPERS = {
       'helper_relative_time' : function (chunk, ctx, bodies, params) {
         var value = dust.helpers.tap(params.value, chunk, ctx);
-        return chunk.write(moment().subtract(Number(value), 'seconds').format('YYYY-MM-DD HH:mm:ss'));
+        return chunk.write(moment().subtract(Number(value), 'seconds').format('ddd MMM DD HH:mm:ss ZZ YYYY'));
       },
       'helper_usage_bar' : function (chunk, ctx, bodies, params) {
         var value = dust.helpers.tap(params.value, chunk, ctx);
@@ -262,7 +262,7 @@
     var HELPERS = {
       'helper_date_tostring' : function (chunk, ctx, bodies, params) {
         var value = dust.helpers.tap(params.value, chunk, ctx);
-        return chunk.write('' + new Date(Number(value)).toLocaleString());
+        return chunk.write('' + moment(Number(value)).format('ddd MMM DD HH:mm:ss ZZ YYYY'));
       }
     };
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index fbea6ad..e805915 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -199,6 +199,7 @@
     </script><script type="text/javascript" src="/static/dust-helpers-1.1.1.min.js">
     </script><script type="text/javascript" src="/static/dfs-dust.js">
     </script><script type="text/javascript" src="explorer.js">
+    </script><script type="text/javascript" src="/static/moment.min.js">
     </script>
   </body>
 </html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 5572880..46f48b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -146,7 +146,7 @@
     var HELPERS = {
       'helper_date_tostring' : function (chunk, ctx, bodies, params) {
         var value = dust.helpers.tap(params.value, chunk, ctx);
-        return chunk.write('' + new Date(Number(value)).toLocaleString());
+        return chunk.write('' + moment(Number(value)).format('ddd MMM DD HH:mm:ss ZZ YYYY'));
       }
     };
     var url = '/webhdfs/v1' + encode_path(dir) + '?op=LISTSTATUS';

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js
index 3c8efd9..466e058 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js
@@ -58,9 +58,15 @@
     },
 
     'date_tostring' : function (v) {
-      return new Date(Number(v)).toLocaleString();
+      return moment(Number(v)).format('ddd MMM DD HH:mm:ss ZZ YYYY');
     },
 
+    'format_compile_info' : function (v) {
+      var info = v.split(" by ")
+      var date = moment(info[0]).format('ddd MMM DD HH:mm:ss ZZ YYYY');
+      return date.concat(" by ").concat(info[1]);
+     },
+
     'helper_to_permission': function (v) {
       var symbols = [ '---', '--x', '-w-', '-wx', 'r--', 'r-x', 'rw-', 'rwx' ];
       var vInt = parseInt(v, 8);


[16/50] [abbrv] hadoop git commit: HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be closed in finally block. Contributed by Kiran Kumar M R and Sanghyun Yun.

Posted by vv...@apache.org.
HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be closed in finally block. Contributed by Kiran Kumar M R and Sanghyun Yun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd149ade
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd149ade
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd149ade

Branch: refs/heads/YARN-3926
Commit: dd149adeace8727864371c5a1484c6534f8b450b
Parents: 2e251a7
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Sep 2 02:01:51 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Wed Sep 2 02:01:51 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt       |  3 +++
 .../src/main/java/org/apache/hadoop/fs/FileUtil.java  | 14 +++++++-------
 2 files changed, 10 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd149ade/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4eef964..70252d6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1123,6 +1123,9 @@ Release 2.7.2 - UNRELEASED
     HADOOP-12359. hadoop fs -getmerge doc is wrong.
     (Jagadesh Kiran N via aajisaka)
 
+    HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be
+    closed in finally block. (Kiran Kumar M R and Sanghyun Yun via ozawa)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd149ade/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 8abb4eb..3c0e90d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -742,15 +742,15 @@ public class FileUtil {
 
     int count;
     byte data[] = new byte[2048];
-    BufferedOutputStream outputStream = new BufferedOutputStream(
-        new FileOutputStream(outputFile));
+    try (BufferedOutputStream outputStream = new BufferedOutputStream(
+        new FileOutputStream(outputFile));) {
 
-    while ((count = tis.read(data)) != -1) {
-      outputStream.write(data, 0, count);
-    }
+      while ((count = tis.read(data)) != -1) {
+        outputStream.write(data, 0, count);
+      }
 
-    outputStream.flush();
-    outputStream.close();
+      outputStream.flush();
+    }
   }
 
   /**


[37/50] [abbrv] hadoop git commit: HDFS-9002. Move o.a.h.hdfs.net/*Peer classes to hdfs-client. Contributed by Mingliang Liu.

Posted by vv...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 6f16d83..5b60307 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.protocolPB;
 import static com.google.common.base.Preconditions.checkNotNull;
 import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
     .EncryptionZoneProto;
-import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto;
 import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto;
 
 import java.io.IOException;
@@ -2315,7 +2314,7 @@ public class PBHelper {
     return EncryptionZoneProto.newBuilder()
         .setId(zone.getId())
         .setPath(zone.getPath())
-        .setSuite(convert(zone.getSuite()))
+        .setSuite(PBHelperClient.convert(zone.getSuite()))
         .setCryptoProtocolVersion(convert(zone.getVersion()))
         .setKeyName(zone.getKeyName())
         .build();
@@ -2323,7 +2322,7 @@ public class PBHelper {
 
   public static EncryptionZone convert(EncryptionZoneProto proto) {
     return new EncryptionZone(proto.getId(), proto.getPath(),
-        convert(proto.getSuite()), convert(proto.getCryptoProtocolVersion()),
+        PBHelperClient.convert(proto.getSuite()), convert(proto.getCryptoProtocolVersion()),
         proto.getKeyName());
   }
 
@@ -2624,106 +2623,6 @@ public class PBHelper {
     return GetEditsFromTxidResponseProto.newBuilder().setEventsList(
         builder.build()).build();
   }
-  
-  public static CipherOptionProto convert(CipherOption option) {
-    if (option != null) {
-      CipherOptionProto.Builder builder = CipherOptionProto.
-          newBuilder();
-      if (option.getCipherSuite() != null) {
-        builder.setSuite(convert(option.getCipherSuite()));
-      }
-      if (option.getInKey() != null) {
-        builder.setInKey(ByteString.copyFrom(option.getInKey()));
-      }
-      if (option.getInIv() != null) {
-        builder.setInIv(ByteString.copyFrom(option.getInIv()));
-      }
-      if (option.getOutKey() != null) {
-        builder.setOutKey(ByteString.copyFrom(option.getOutKey()));
-      }
-      if (option.getOutIv() != null) {
-        builder.setOutIv(ByteString.copyFrom(option.getOutIv()));
-      }
-      return builder.build();
-    }
-    return null;
-  }
-  
-  public static CipherOption convert(CipherOptionProto proto) {
-    if (proto != null) {
-      CipherSuite suite = null;
-      if (proto.getSuite() != null) {
-        suite = convert(proto.getSuite());
-      }
-      byte[] inKey = null;
-      if (proto.getInKey() != null) {
-        inKey = proto.getInKey().toByteArray();
-      }
-      byte[] inIv = null;
-      if (proto.getInIv() != null) {
-        inIv = proto.getInIv().toByteArray();
-      }
-      byte[] outKey = null;
-      if (proto.getOutKey() != null) {
-        outKey = proto.getOutKey().toByteArray();
-      }
-      byte[] outIv = null;
-      if (proto.getOutIv() != null) {
-        outIv = proto.getOutIv().toByteArray();
-      }
-      return new CipherOption(suite, inKey, inIv, outKey, outIv);
-    }
-    return null;
-  }
-  
-  public static List<CipherOptionProto> convertCipherOptions(
-      List<CipherOption> options) {
-    if (options != null) {
-      List<CipherOptionProto> protos = 
-          Lists.newArrayListWithCapacity(options.size());
-      for (CipherOption option : options) {
-        protos.add(convert(option));
-      }
-      return protos;
-    }
-    return null;
-  }
-  
-  public static List<CipherOption> convertCipherOptionProtos(
-      List<CipherOptionProto> protos) {
-    if (protos != null) {
-      List<CipherOption> options = 
-          Lists.newArrayListWithCapacity(protos.size());
-      for (CipherOptionProto proto : protos) {
-        options.add(convert(proto));
-      }
-      return options;
-    }
-    return null;
-  }
-
-  public static CipherSuiteProto convert(CipherSuite suite) {
-    switch (suite) {
-    case UNKNOWN:
-      return CipherSuiteProto.UNKNOWN;
-    case AES_CTR_NOPADDING:
-      return CipherSuiteProto.AES_CTR_NOPADDING;
-    default:
-      return null;
-    }
-  }
-
-  public static CipherSuite convert(CipherSuiteProto proto) {
-    switch (proto) {
-    case AES_CTR_NOPADDING:
-      return CipherSuite.AES_CTR_NOPADDING;
-    default:
-      // Set to UNKNOWN and stash the unknown enum value
-      CipherSuite suite = CipherSuite.UNKNOWN;
-      suite.setUnknownValue(proto.getNumber());
-      return suite;
-    }
-  }
 
   public static List<CryptoProtocolVersionProto> convert(
       CryptoProtocolVersion[] versions) {
@@ -2776,7 +2675,7 @@ public class PBHelper {
       return null;
     }
     return HdfsProtos.FileEncryptionInfoProto.newBuilder()
-        .setSuite(convert(info.getCipherSuite()))
+        .setSuite(PBHelperClient.convert(info.getCipherSuite()))
         .setCryptoProtocolVersion(convert(info.getCryptoProtocolVersion()))
         .setKey(getByteString(info.getEncryptedDataEncryptionKey()))
         .setIv(getByteString(info.getIV()))
@@ -2803,7 +2702,7 @@ public class PBHelper {
       return null;
     }
     return HdfsProtos.ZoneEncryptionInfoProto.newBuilder()
-        .setSuite(convert(suite))
+        .setSuite(PBHelperClient.convert(suite))
         .setCryptoProtocolVersion(convert(version))
         .setKeyName(keyName)
         .build();
@@ -2814,7 +2713,7 @@ public class PBHelper {
     if (proto == null) {
       return null;
     }
-    CipherSuite suite = convert(proto.getSuite());
+    CipherSuite suite = PBHelperClient.convert(proto.getSuite());
     CryptoProtocolVersion version = convert(proto.getCryptoProtocolVersion());
     byte[] key = proto.getKey().toByteArray();
     byte[] iv = proto.getIv().toByteArray();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index e88fa49..ced085a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.security.AccessControlException;
 
 import java.io.IOException;
@@ -282,10 +283,9 @@ class FSDirXAttrOp {
         final HdfsProtos.ZoneEncryptionInfoProto ezProto =
             HdfsProtos.ZoneEncryptionInfoProto.parseFrom(xattr.getValue());
         fsd.ezManager.addEncryptionZone(inode.getId(),
-                                        PBHelper.convert(ezProto.getSuite()),
-                                        PBHelper.convert(
-                                            ezProto.getCryptoProtocolVersion()),
-                                        ezProto.getKeyName());
+            PBHelperClient.convert(ezProto.getSuite()),
+            PBHelper.convert(ezProto.getCryptoProtocolVersion()),
+            ezProto.getKeyName());
       }
 
       if (!isFile && SECURITY_XATTR_UNREADABLE_BY_SUPERUSER.equals(xaName)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 734d3c0..8c74e48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
@@ -1004,7 +1005,7 @@ public class FSDirectory implements Closeable {
                   HdfsProtos.ZoneEncryptionInfoProto.parseFrom(
                       xattr.getValue());
               ezManager.unprotectedAddEncryptionZone(inode.getId(),
-                  PBHelper.convert(ezProto.getSuite()),
+                  PBHelperClient.convert(ezProto.getSuite()),
                   PBHelper.convert(ezProto.getCryptoProtocolVersion()),
                   ezProto.getKeyName());
             } catch (InvalidProtocolBufferException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 7ebe859..b5dbc46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -45,11 +45,11 @@ import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.BlockReaderFactory;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.RemotePeerFactory;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -884,7 +884,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
                 try {
                   s.connect(addr, HdfsConstants.READ_TIMEOUT);
                   s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
-                  peer = TcpPeerServer.peerFromSocketAndKey(
+                  peer = DFSUtilClient.peerFromSocketAndKey(
                         dfs.getSaslDataTransferClient(), s, NamenodeFsck.this,
                         blockToken, datanodeId);
                 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
index 6d8bec3..7b0ef02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
@@ -32,7 +32,6 @@ import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -204,7 +203,7 @@ public class BlockReaderTestUtil {
           try {
             sock.connect(addr, HdfsConstants.READ_TIMEOUT);
             sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
-            peer = TcpPeerServer.peerFromSocket(sock);
+            peer = DFSUtilClient.peerFromSocket(sock);
           } finally {
             if (peer == null) {
               IOUtils.closeQuietly(sock);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 24e0965..40414c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -21,13 +21,13 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
index bf19c40..9c02e9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
@@ -77,7 +77,7 @@ public class TestEncryptedTransfer {
     conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
     if (resolverClazz != null){
-      conf.set(DFSConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS, resolverClazz);
+      conf.set(HdfsClientConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS, resolverClazz);
     }
   }
   
@@ -209,7 +209,7 @@ public class TestEncryptedTransfer {
     MiniDFSCluster cluster = null;
     try {
       Configuration conf = new Configuration();
-      conf.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY,
+      conf.set(HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY,
           "AES/CTR/NoPadding");
       cluster = new MiniDFSCluster.Builder(conf).build();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
index 403b21e..9e660b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
index 16ddc75..0cbf266 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY;
 import static org.junit.Assert.assertArrayEquals;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
index 1da92a1..b40e52a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.qjournal;
 import static org.junit.Assert.*;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index 234eb22..79a15e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -41,12 +41,12 @@ import org.apache.hadoop.hdfs.ClientContext;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.RemotePeerFactory;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -171,7 +171,7 @@ public class TestBlockTokenWithDFS {
               try {
                 sock.connect(addr, HdfsConstants.READ_TIMEOUT);
                 sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
-                peer = TcpPeerServer.peerFromSocket(sock);
+                peer = DFSUtilClient.peerFromSocket(sock);
               } finally {
                 if (peer == null) {
                   IOUtils.closeSocket(sock);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed78b14e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 4e37161..cb50edc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -43,12 +43,12 @@ import org.apache.hadoop.hdfs.BlockReaderFactory;
 import org.apache.hadoop.hdfs.ClientContext;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.RemotePeerFactory;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -525,7 +525,7 @@ public class TestDataNodeVolumeFailure {
           try {
             sock.connect(addr, HdfsConstants.READ_TIMEOUT);
             sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
-            peer = TcpPeerServer.peerFromSocket(sock);
+            peer = DFSUtilClient.peerFromSocket(sock);
           } finally {
             if (peer == null) {
               IOUtils.closeSocket(sock);


[05/50] [abbrv] hadoop git commit: HDFS-8990. Move RemoteBlockReader to hdfs-client module. Contributed by Mingliang Liu.

Posted by vv...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
deleted file mode 100644
index 2a77cb6..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
+++ /dev/null
@@ -1,477 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.io.BufferedOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.nio.channels.ReadableByteChannel;
-import java.util.EnumSet;
-import java.util.UUID;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.ReadOption;
-import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
-import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver;
-import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
-import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.DataChecksum;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * This is a wrapper around connection to datanode
- * and understands checksum, offset etc.
- *
- * Terminology:
- * <dl>
- * <dt>block</dt>
- *   <dd>The hdfs block, typically large (~64MB).
- *   </dd>
- * <dt>chunk</dt>
- *   <dd>A block is divided into chunks, each comes with a checksum.
- *       We want transfers to be chunk-aligned, to be able to
- *       verify checksums.
- *   </dd>
- * <dt>packet</dt>
- *   <dd>A grouping of chunks used for transport. It contains a
- *       header, followed by checksum data, followed by real data.
- *   </dd>
- * </dl>
- * Please see DataNode for the RPC specification.
- *
- * This is a new implementation introduced in Hadoop 0.23 which
- * is more efficient and simpler than the older BlockReader
- * implementation. It should be renamed to RemoteBlockReader
- * once we are confident in it.
- */
-@InterfaceAudience.Private
-public class RemoteBlockReader2  implements BlockReader {
-
-  static final Log LOG = LogFactory.getLog(RemoteBlockReader2.class);
-  
-  final private Peer peer;
-  final private DatanodeID datanodeID;
-  final private PeerCache peerCache;
-  final private long blockId;
-  private final ReadableByteChannel in;
-  private DataChecksum checksum;
-  
-  private final PacketReceiver packetReceiver = new PacketReceiver(true);
-  private ByteBuffer curDataSlice = null;
-
-  /** offset in block of the last chunk received */
-  private long lastSeqNo = -1;
-
-  /** offset in block where reader wants to actually read */
-  private long startOffset;
-  private final String filename;
-
-  private final int bytesPerChecksum;
-  private final int checksumSize;
-
-  /**
-   * The total number of bytes we need to transfer from the DN.
-   * This is the amount that the user has requested plus some padding
-   * at the beginning so that the read can begin on a chunk boundary.
-   */
-  private long bytesNeededToFinish;
-
-  /**
-   * True if we are reading from a local DataNode.
-   */
-  private final boolean isLocal;
-
-  private final boolean verifyChecksum;
-
-  private boolean sentStatusCode = false;
-
-  @VisibleForTesting
-  public Peer getPeer() {
-    return peer;
-  }
-  
-  @Override
-  public synchronized int read(byte[] buf, int off, int len) 
-                               throws IOException {
-
-    UUID randomId = null;
-    if (LOG.isTraceEnabled()) {
-      randomId = UUID.randomUUID();
-      LOG.trace(String.format("Starting read #%s file %s from datanode %s",
-        randomId.toString(), this.filename,
-        this.datanodeID.getHostName()));
-    }
-
-    if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
-      TraceScope scope = Trace.startSpan(
-          "RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
-      try {
-        readNextPacket();
-      } finally {
-        scope.close();
-      }
-    }
-
-    if (LOG.isTraceEnabled()) {
-      LOG.trace(String.format("Finishing read #" + randomId));
-    }
-
-    if (curDataSlice.remaining() == 0) {
-      // we're at EOF now
-      return -1;
-    }
-    
-    int nRead = Math.min(curDataSlice.remaining(), len);
-    curDataSlice.get(buf, off, nRead);
-    
-    return nRead;
-  }
-
-
-  @Override
-  public synchronized int read(ByteBuffer buf) throws IOException {
-    if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
-      TraceScope scope = Trace.startSpan(
-          "RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
-      try {
-        readNextPacket();
-      } finally {
-        scope.close();
-      }
-    }
-    if (curDataSlice.remaining() == 0) {
-      // we're at EOF now
-      return -1;
-    }
-
-    int nRead = Math.min(curDataSlice.remaining(), buf.remaining());
-    ByteBuffer writeSlice = curDataSlice.duplicate();
-    writeSlice.limit(writeSlice.position() + nRead);
-    buf.put(writeSlice);
-    curDataSlice.position(writeSlice.position());
-
-    return nRead;
-  }
-
-  private void readNextPacket() throws IOException {
-    //Read packet headers.
-    packetReceiver.receiveNextPacket(in);
-
-    PacketHeader curHeader = packetReceiver.getHeader();
-    curDataSlice = packetReceiver.getDataSlice();
-    assert curDataSlice.capacity() == curHeader.getDataLen();
-    
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("DFSClient readNextPacket got header " + curHeader);
-    }
-
-    // Sanity check the lengths
-    if (!curHeader.sanityCheck(lastSeqNo)) {
-         throw new IOException("BlockReader: error in packet header " +
-                               curHeader);
-    }
-    
-    if (curHeader.getDataLen() > 0) {
-      int chunks = 1 + (curHeader.getDataLen() - 1) / bytesPerChecksum;
-      int checksumsLen = chunks * checksumSize;
-
-      assert packetReceiver.getChecksumSlice().capacity() == checksumsLen :
-        "checksum slice capacity=" + packetReceiver.getChecksumSlice().capacity() + 
-          " checksumsLen=" + checksumsLen;
-      
-      lastSeqNo = curHeader.getSeqno();
-      if (verifyChecksum && curDataSlice.remaining() > 0) {
-        // N.B.: the checksum error offset reported here is actually
-        // relative to the start of the block, not the start of the file.
-        // This is slightly misleading, but preserves the behavior from
-        // the older BlockReader.
-        checksum.verifyChunkedSums(curDataSlice,
-            packetReceiver.getChecksumSlice(),
-            filename, curHeader.getOffsetInBlock());
-      }
-      bytesNeededToFinish -= curHeader.getDataLen();
-    }    
-    
-    // First packet will include some data prior to the first byte
-    // the user requested. Skip it.
-    if (curHeader.getOffsetInBlock() < startOffset) {
-      int newPos = (int) (startOffset - curHeader.getOffsetInBlock());
-      curDataSlice.position(newPos);
-    }
-
-    // If we've now satisfied the whole client read, read one last packet
-    // header, which should be empty
-    if (bytesNeededToFinish <= 0) {
-      readTrailingEmptyPacket();
-      if (verifyChecksum) {
-        sendReadResult(Status.CHECKSUM_OK);
-      } else {
-        sendReadResult(Status.SUCCESS);
-      }
-    }
-  }
-  
-  @Override
-  public synchronized long skip(long n) throws IOException {
-    /* How can we make sure we don't throw a ChecksumException, at least
-     * in majority of the cases?. This one throws. */
-    long skipped = 0;
-    while (skipped < n) {
-      long needToSkip = n - skipped;
-      if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
-        readNextPacket();
-      }
-      if (curDataSlice.remaining() == 0) {
-        // we're at EOF now
-        break;
-      }
-
-      int skip = (int)Math.min(curDataSlice.remaining(), needToSkip);
-      curDataSlice.position(curDataSlice.position() + skip);
-      skipped += skip;
-    }
-    return skipped;
-  }
-
-  private void readTrailingEmptyPacket() throws IOException {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Reading empty packet at end of read");
-    }
-    
-    packetReceiver.receiveNextPacket(in);
-
-    PacketHeader trailer = packetReceiver.getHeader();
-    if (!trailer.isLastPacketInBlock() ||
-       trailer.getDataLen() != 0) {
-      throw new IOException("Expected empty end-of-read packet! Header: " +
-                            trailer);
-    }
-  }
-
-  protected RemoteBlockReader2(String file, String bpid, long blockId,
-      DataChecksum checksum, boolean verifyChecksum,
-      long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
-      DatanodeID datanodeID, PeerCache peerCache) {
-    this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
-        createSocketAddr(datanodeID.getXferAddr()));
-    // Path is used only for printing block and file information in debug
-    this.peer = peer;
-    this.datanodeID = datanodeID;
-    this.in = peer.getInputStreamChannel();
-    this.checksum = checksum;
-    this.verifyChecksum = verifyChecksum;
-    this.startOffset = Math.max( startOffset, 0 );
-    this.filename = file;
-    this.peerCache = peerCache;
-    this.blockId = blockId;
-
-    // The total number of bytes that we need to transfer from the DN is
-    // the amount that the user wants (bytesToRead), plus the padding at
-    // the beginning in order to chunk-align. Note that the DN may elect
-    // to send more than this amount if the read starts/ends mid-chunk.
-    this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
-    bytesPerChecksum = this.checksum.getBytesPerChecksum();
-    checksumSize = this.checksum.getChecksumSize();
-  }
-
-
-  @Override
-  public synchronized void close() throws IOException {
-    packetReceiver.close();
-    startOffset = -1;
-    checksum = null;
-    if (peerCache != null && sentStatusCode) {
-      peerCache.put(datanodeID, peer);
-    } else {
-      peer.close();
-    }
-
-    // in will be closed when its Socket is closed.
-  }
-  
-  /**
-   * When the reader reaches end of the read, it sends a status response
-   * (e.g. CHECKSUM_OK) to the DN. Failure to do so could lead to the DN
-   * closing our connection (which we will re-open), but won't affect
-   * data correctness.
-   */
-  void sendReadResult(Status statusCode) {
-    assert !sentStatusCode : "already sent status code to " + peer;
-    try {
-      writeReadResult(peer.getOutputStream(), statusCode);
-      sentStatusCode = true;
-    } catch (IOException e) {
-      // It's ok not to be able to send this. But something is probably wrong.
-      LOG.info("Could not send read status (" + statusCode + ") to datanode " +
-               peer.getRemoteAddressString() + ": " + e.getMessage());
-    }
-  }
-
-  /**
-   * Serialize the actual read result on the wire.
-   */
-  static void writeReadResult(OutputStream out, Status statusCode)
-      throws IOException {
-    
-    ClientReadStatusProto.newBuilder()
-      .setStatus(statusCode)
-      .build()
-      .writeDelimitedTo(out);
-
-    out.flush();
-  }
-  
-  /**
-   * File name to print when accessing a block directly (from servlets)
-   * @param s Address of the block location
-   * @param poolId Block pool ID of the block
-   * @param blockId Block ID of the block
-   * @return string that has a file name for debug purposes
-   */
-  public static String getFileName(final InetSocketAddress s,
-      final String poolId, final long blockId) {
-    return s.toString() + ":" + poolId + ":" + blockId;
-  }
-
-  @Override
-  public int readAll(byte[] buf, int offset, int len) throws IOException {
-    return BlockReaderUtil.readAll(this, buf, offset, len);
-  }
-
-  @Override
-  public void readFully(byte[] buf, int off, int len) throws IOException {
-    BlockReaderUtil.readFully(this, buf, off, len);
-  }
-  
-  /**
-   * Create a new BlockReader specifically to satisfy a read.
-   * This method also sends the OP_READ_BLOCK request.
-   *
-   * @param file  File location
-   * @param block  The block object
-   * @param blockToken  The block token for security
-   * @param startOffset  The read offset, relative to block head
-   * @param len  The number of bytes to read
-   * @param verifyChecksum  Whether to verify checksum
-   * @param clientName  Client name
-   * @param peer  The Peer to use
-   * @param datanodeID  The DatanodeID this peer is connected to
-   * @return New BlockReader instance, or null on error.
-   */
-  public static BlockReader newBlockReader(String file,
-                                     ExtendedBlock block,
-                                     Token<BlockTokenIdentifier> blockToken,
-                                     long startOffset, long len,
-                                     boolean verifyChecksum,
-                                     String clientName,
-                                     Peer peer, DatanodeID datanodeID,
-                                     PeerCache peerCache,
-                                     CachingStrategy cachingStrategy) throws IOException {
-    // in and out will be closed when sock is closed (by the caller)
-    final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
-          peer.getOutputStream()));
-    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
-        verifyChecksum, cachingStrategy);
-
-    //
-    // Get bytes in block
-    //
-    DataInputStream in = new DataInputStream(peer.getInputStream());
-
-    BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
-        PBHelperClient.vintPrefixed(in));
-    checkSuccess(status, peer, block, file);
-    ReadOpChecksumInfoProto checksumInfo =
-      status.getReadOpChecksumInfo();
-    DataChecksum checksum = DataTransferProtoUtil.fromProto(
-        checksumInfo.getChecksum());
-    //Warning when we get CHECKSUM_NULL?
-
-    // Read the first chunk offset.
-    long firstChunkOffset = checksumInfo.getChunkOffset();
-
-    if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
-        firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
-      throw new IOException("BlockReader: error in first chunk offset (" +
-                            firstChunkOffset + ") startOffset is " +
-                            startOffset + " for file " + file);
-    }
-
-    return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
-        checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer,
-        datanodeID, peerCache);
-  }
-
-  static void checkSuccess(
-      BlockOpResponseProto status, Peer peer,
-      ExtendedBlock block, String file)
-      throws IOException {
-    String logInfo = "for OP_READ_BLOCK"
-      + ", self=" + peer.getLocalAddressString()
-      + ", remote=" + peer.getRemoteAddressString()
-      + ", for file " + file
-      + ", for pool " + block.getBlockPoolId()
-      + " block " + block.getBlockId() + "_" + block.getGenerationStamp();
-    DataTransferProtoUtil.checkBlockOpStatus(status, logInfo);
-  }
-  
-  @Override
-  public int available() throws IOException {
-    // An optimistic estimate of how much data is available
-    // to us without doing network I/O.
-    return DFSClient.TCP_WINDOW_SIZE;
-  }
-  
-  @Override
-  public boolean isLocal() {
-    return isLocal;
-  }
-  
-  @Override
-  public boolean isShortCircuit() {
-    return false;
-  }
-
-  @Override
-  public ClientMmap getClientMmap(EnumSet<ReadOption> opts) {
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
deleted file mode 100644
index c9966a7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol.datatransfer;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto;
-import org.apache.hadoop.hdfs.util.ByteBufferOutputStream;
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Shorts;
-import com.google.common.primitives.Ints;
-import com.google.protobuf.InvalidProtocolBufferException;
-
-/**
- * Header data for each packet that goes through the read/write pipelines.
- * Includes all of the information about the packet, excluding checksums and
- * actual data.
- * 
- * This data includes:
- *  - the offset in bytes into the HDFS block of the data in this packet
- *  - the sequence number of this packet in the pipeline
- *  - whether or not this is the last packet in the pipeline
- *  - the length of the data in this packet
- *  - whether or not this packet should be synced by the DNs.
- *  
- * When serialized, this header is written out as a protocol buffer, preceded
- * by a 4-byte integer representing the full packet length, and a 2-byte short
- * representing the header length.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class PacketHeader {
-  private static final int MAX_PROTO_SIZE = 
-    PacketHeaderProto.newBuilder()
-      .setOffsetInBlock(0)
-      .setSeqno(0)
-      .setLastPacketInBlock(false)
-      .setDataLen(0)
-      .setSyncBlock(false)
-      .build().getSerializedSize();
-  public static final int PKT_LENGTHS_LEN =
-      Ints.BYTES + Shorts.BYTES;
-  public static final int PKT_MAX_HEADER_LEN =
-      PKT_LENGTHS_LEN + MAX_PROTO_SIZE;
-
-  private int packetLen;
-  private PacketHeaderProto proto;
-
-  public PacketHeader() {
-  }
-
-  public PacketHeader(int packetLen, long offsetInBlock, long seqno,
-                      boolean lastPacketInBlock, int dataLen, boolean syncBlock) {
-    this.packetLen = packetLen;
-    Preconditions.checkArgument(packetLen >= Ints.BYTES,
-        "packet len %s should always be at least 4 bytes",
-        packetLen);
-    
-    PacketHeaderProto.Builder builder = PacketHeaderProto.newBuilder()
-      .setOffsetInBlock(offsetInBlock)
-      .setSeqno(seqno)
-      .setLastPacketInBlock(lastPacketInBlock)
-      .setDataLen(dataLen);
-      
-    if (syncBlock) {
-      // Only set syncBlock if it is specified.
-      // This is wire-incompatible with Hadoop 2.0.0-alpha due to HDFS-3721
-      // because it changes the length of the packet header, and BlockReceiver
-      // in that version did not support variable-length headers.
-      builder.setSyncBlock(syncBlock);
-    }
-      
-    proto = builder.build();
-  }
-
-  public int getDataLen() {
-    return proto.getDataLen();
-  }
-
-  public boolean isLastPacketInBlock() {
-    return proto.getLastPacketInBlock();
-  }
-
-  public long getSeqno() {
-    return proto.getSeqno();
-  }
-
-  public long getOffsetInBlock() {
-    return proto.getOffsetInBlock();
-  }
-
-  public int getPacketLen() {
-    return packetLen;
-  }
-
-  public boolean getSyncBlock() {
-    return proto.getSyncBlock();
-  }
-
-  @Override
-  public String toString() {
-    return "PacketHeader with packetLen=" + packetLen +
-      " header data: " + 
-      proto.toString();
-  }
-  
-  public void setFieldsFromData(
-      int packetLen, byte[] headerData) throws InvalidProtocolBufferException {
-    this.packetLen = packetLen;
-    proto = PacketHeaderProto.parseFrom(headerData);
-  }
-  
-  public void readFields(ByteBuffer buf) throws IOException {
-    packetLen = buf.getInt();
-    short protoLen = buf.getShort();
-    byte[] data = new byte[protoLen];
-    buf.get(data);
-    proto = PacketHeaderProto.parseFrom(data);
-  }
-  
-  public void readFields(DataInputStream in) throws IOException {
-    this.packetLen = in.readInt();
-    short protoLen = in.readShort();
-    byte[] data = new byte[protoLen];
-    in.readFully(data);
-    proto = PacketHeaderProto.parseFrom(data);
-  }
-
-  /**
-   * @return the number of bytes necessary to write out this header,
-   * including the length-prefixing of the payload and header
-   */
-  public int getSerializedSize() {
-    return PKT_LENGTHS_LEN + proto.getSerializedSize();
-  }
-
-  /**
-   * Write the header into the buffer.
-   * This requires that PKT_HEADER_LEN bytes are available.
-   */
-  public void putInBuffer(final ByteBuffer buf) {
-    assert proto.getSerializedSize() <= MAX_PROTO_SIZE
-      : "Expected " + (MAX_PROTO_SIZE) + " got: " + proto.getSerializedSize();
-    try {
-      buf.putInt(packetLen);
-      buf.putShort((short) proto.getSerializedSize());
-      proto.writeTo(new ByteBufferOutputStream(buf));
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-  
-  public void write(DataOutputStream out) throws IOException {
-    assert proto.getSerializedSize() <= MAX_PROTO_SIZE
-    : "Expected " + (MAX_PROTO_SIZE) + " got: " + proto.getSerializedSize();
-    out.writeInt(packetLen);
-    out.writeShort(proto.getSerializedSize());
-    proto.writeTo(out);
-  }
-  
-  public byte[] getBytes() {
-    ByteBuffer buf = ByteBuffer.allocate(getSerializedSize());
-    putInBuffer(buf);
-    return buf.array();
-  }
-
-  /**
-   * Perform a sanity check on the packet, returning true if it is sane.
-   * @param lastSeqNo the previous sequence number received - we expect the current
-   * sequence number to be larger by 1.
-   */
-  public boolean sanityCheck(long lastSeqNo) {
-    // We should only have a non-positive data length for the last packet
-    if (proto.getDataLen() <= 0 && !proto.getLastPacketInBlock()) return false;
-    // The last packet should not contain data
-    if (proto.getLastPacketInBlock() && proto.getDataLen() != 0) return false;
-    // Seqnos should always increase by 1 with each packet received
-    if (proto.getSeqno() != lastSeqNo + 1) return false;
-    return true;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (!(o instanceof PacketHeader)) return false;
-    PacketHeader other = (PacketHeader)o;
-    return this.proto.equals(other.proto);
-  }
-
-  @Override
-  public int hashCode() {
-    return (int)proto.getSeqno();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
deleted file mode 100644
index 3045a13..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
+++ /dev/null
@@ -1,310 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol.datatransfer;
-
-import java.io.Closeable;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteBuffer;
-import java.nio.channels.ReadableByteChannel;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.util.DirectBufferPool;
-import org.apache.hadoop.io.IOUtils;
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Ints;
-
-/**
- * Class to handle reading packets one-at-a-time from the wire.
- * These packets are used both for reading and writing data to/from
- * DataNodes.
- */
-@InterfaceAudience.Private
-public class PacketReceiver implements Closeable {
-
-  /**
-   * The max size of any single packet. This prevents OOMEs when
-   * invalid data is sent.
-   */
-  private static final int MAX_PACKET_SIZE = 16 * 1024 * 1024;
-
-  static final Log LOG = LogFactory.getLog(PacketReceiver.class);
-  
-  private static final DirectBufferPool bufferPool = new DirectBufferPool();
-  private final boolean useDirectBuffers;
-
-  /**
-   * The entirety of the most recently read packet.
-   * The first PKT_LENGTHS_LEN bytes of this buffer are the
-   * length prefixes.
-   */
-  private ByteBuffer curPacketBuf = null;
-  
-  /**
-   * A slice of {@link #curPacketBuf} which contains just the checksums.
-   */
-  private ByteBuffer curChecksumSlice = null;
-  
-  /**
-   * A slice of {@link #curPacketBuf} which contains just the data.
-   */
-  private ByteBuffer curDataSlice = null;
-
-  /**
-   * The packet header of the most recently read packet.
-   */
-  private PacketHeader curHeader;
-  
-  public PacketReceiver(boolean useDirectBuffers) {
-    this.useDirectBuffers = useDirectBuffers;
-    reallocPacketBuf(PacketHeader.PKT_LENGTHS_LEN);
-  }
-
-  public PacketHeader getHeader() {
-    return curHeader;
-  }
-
-  public ByteBuffer getDataSlice() {
-    return curDataSlice;
-  }
-  
-  public ByteBuffer getChecksumSlice() {
-    return curChecksumSlice;
-  }
-
-  /**
-   * Reads all of the data for the next packet into the appropriate buffers.
-   * 
-   * The data slice and checksum slice members will be set to point to the
-   * user data and corresponding checksums. The header will be parsed and
-   * set.
-   */
-  public void receiveNextPacket(ReadableByteChannel in) throws IOException {
-    doRead(in, null);
-  }
-
-  /**
-   * @see #receiveNextPacket(ReadableByteChannel)
-   */
-  public void receiveNextPacket(InputStream in) throws IOException {
-    doRead(null, in);
-  }
-
-  private void doRead(ReadableByteChannel ch, InputStream in)
-      throws IOException {
-    // Each packet looks like:
-    //   PLEN    HLEN      HEADER     CHECKSUMS  DATA
-    //   32-bit  16-bit   <protobuf>  <variable length>
-    //
-    // PLEN:      Payload length
-    //            = length(PLEN) + length(CHECKSUMS) + length(DATA)
-    //            This length includes its own encoded length in
-    //            the sum for historical reasons.
-    //
-    // HLEN:      Header length
-    //            = length(HEADER)
-    //
-    // HEADER:    the actual packet header fields, encoded in protobuf
-    // CHECKSUMS: the crcs for the data chunk. May be missing if
-    //            checksums were not requested
-    // DATA       the actual block data
-    Preconditions.checkState(curHeader == null || !curHeader.isLastPacketInBlock());
-
-    curPacketBuf.clear();
-    curPacketBuf.limit(PacketHeader.PKT_LENGTHS_LEN);
-    doReadFully(ch, in, curPacketBuf);
-    curPacketBuf.flip();
-    int payloadLen = curPacketBuf.getInt();
-    
-    if (payloadLen < Ints.BYTES) {
-      // The "payload length" includes its own length. Therefore it
-      // should never be less than 4 bytes
-      throw new IOException("Invalid payload length " +
-          payloadLen);
-    }
-    int dataPlusChecksumLen = payloadLen - Ints.BYTES;
-    int headerLen = curPacketBuf.getShort();
-    if (headerLen < 0) {
-      throw new IOException("Invalid header length " + headerLen);
-    }
-    
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("readNextPacket: dataPlusChecksumLen = " + dataPlusChecksumLen +
-          " headerLen = " + headerLen);
-    }
-    
-    // Sanity check the buffer size so we don't allocate too much memory
-    // and OOME.
-    int totalLen = payloadLen + headerLen;
-    if (totalLen < 0 || totalLen > MAX_PACKET_SIZE) {
-      throw new IOException("Incorrect value for packet payload size: " +
-                            payloadLen);
-    }
-
-    // Make sure we have space for the whole packet, and
-    // read it.
-    reallocPacketBuf(PacketHeader.PKT_LENGTHS_LEN +
-        dataPlusChecksumLen + headerLen);
-    curPacketBuf.clear();
-    curPacketBuf.position(PacketHeader.PKT_LENGTHS_LEN);
-    curPacketBuf.limit(PacketHeader.PKT_LENGTHS_LEN +
-        dataPlusChecksumLen + headerLen);
-    doReadFully(ch, in, curPacketBuf);
-    curPacketBuf.flip();
-    curPacketBuf.position(PacketHeader.PKT_LENGTHS_LEN);
-
-    // Extract the header from the front of the buffer (after the length prefixes)
-    byte[] headerBuf = new byte[headerLen];
-    curPacketBuf.get(headerBuf);
-    if (curHeader == null) {
-      curHeader = new PacketHeader();
-    }
-    curHeader.setFieldsFromData(payloadLen, headerBuf);
-    
-    // Compute the sub-slices of the packet
-    int checksumLen = dataPlusChecksumLen - curHeader.getDataLen();
-    if (checksumLen < 0) {
-      throw new IOException("Invalid packet: data length in packet header " + 
-          "exceeds data length received. dataPlusChecksumLen=" +
-          dataPlusChecksumLen + " header: " + curHeader); 
-    }
-    
-    reslicePacket(headerLen, checksumLen, curHeader.getDataLen());
-  }
-  
-  /**
-   * Rewrite the last-read packet on the wire to the given output stream.
-   */
-  public void mirrorPacketTo(DataOutputStream mirrorOut) throws IOException {
-    Preconditions.checkState(!useDirectBuffers,
-        "Currently only supported for non-direct buffers");
-    mirrorOut.write(curPacketBuf.array(),
-        curPacketBuf.arrayOffset(),
-        curPacketBuf.remaining());
-  }
-
-  
-  private static void doReadFully(ReadableByteChannel ch, InputStream in,
-      ByteBuffer buf) throws IOException {
-    if (ch != null) {
-      readChannelFully(ch, buf);
-    } else {
-      Preconditions.checkState(!buf.isDirect(),
-          "Must not use direct buffers with InputStream API");
-      IOUtils.readFully(in, buf.array(),
-          buf.arrayOffset() + buf.position(),
-          buf.remaining());
-      buf.position(buf.position() + buf.remaining());
-    }
-  }
-
-  private void reslicePacket(
-      int headerLen, int checksumsLen, int dataLen) {
-    // Packet structure (refer to doRead() for details):
-    //   PLEN    HLEN      HEADER     CHECKSUMS  DATA
-    //   32-bit  16-bit   <protobuf>  <variable length>
-    //   |--- lenThroughHeader ----|
-    //   |----------- lenThroughChecksums   ----|
-    //   |------------------- lenThroughData    ------| 
-    int lenThroughHeader = PacketHeader.PKT_LENGTHS_LEN + headerLen;
-    int lenThroughChecksums = lenThroughHeader + checksumsLen;
-    int lenThroughData = lenThroughChecksums + dataLen;
-
-    assert dataLen >= 0 : "invalid datalen: " + dataLen;
-    assert curPacketBuf.position() == lenThroughHeader;
-    assert curPacketBuf.limit() == lenThroughData :
-      "headerLen= " + headerLen + " clen=" + checksumsLen + " dlen=" + dataLen +
-      " rem=" + curPacketBuf.remaining();
-
-    // Slice the checksums.
-    curPacketBuf.position(lenThroughHeader);
-    curPacketBuf.limit(lenThroughChecksums);
-    curChecksumSlice = curPacketBuf.slice();
-
-    // Slice the data.
-    curPacketBuf.position(lenThroughChecksums);
-    curPacketBuf.limit(lenThroughData);
-    curDataSlice = curPacketBuf.slice();
-    
-    // Reset buffer to point to the entirety of the packet (including
-    // length prefixes)
-    curPacketBuf.position(0);
-    curPacketBuf.limit(lenThroughData);
-  }
-
-  
-  private static void readChannelFully(ReadableByteChannel ch, ByteBuffer buf)
-      throws IOException {
-    while (buf.remaining() > 0) {
-      int n = ch.read(buf);
-      if (n < 0) {
-        throw new IOException("Premature EOF reading from " + ch);
-      }
-    }
-  }
-  
-  private void reallocPacketBuf(int atLeastCapacity) {
-    // Realloc the buffer if this packet is longer than the previous
-    // one.
-    if (curPacketBuf == null ||
-        curPacketBuf.capacity() < atLeastCapacity) {
-      ByteBuffer newBuf;
-      if (useDirectBuffers) {
-        newBuf = bufferPool.getBuffer(atLeastCapacity);
-      } else {
-        newBuf = ByteBuffer.allocate(atLeastCapacity);
-      }
-      // If reallocing an existing buffer, copy the old packet length
-      // prefixes over
-      if (curPacketBuf != null) {
-        curPacketBuf.flip();
-        newBuf.put(curPacketBuf);
-      }
-      
-      returnPacketBufToPool();
-      curPacketBuf = newBuf;
-    }
-  }
-  
-  private void returnPacketBufToPool() {
-    if (curPacketBuf != null && curPacketBuf.isDirect()) {
-      bufferPool.returnBuffer(curPacketBuf);
-      curPacketBuf = null;
-    }
-  }
-
-  @Override // Closeable
-  public void close() {
-    returnPacketBufToPool();
-  }
-  
-  @Override
-  protected void finalize() throws Throwable {
-    try {
-      // just in case it didn't get closed, we
-      // may as well still try to return the buffer
-      returnPacketBufToPool();
-    } finally {
-      super.finalize();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java
deleted file mode 100644
index 31d4dcc..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.util;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * OutputStream that writes into a {@link ByteBuffer}.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Stable
-public class ByteBufferOutputStream extends OutputStream {
-
-  private final ByteBuffer buf;
-
-  public ByteBufferOutputStream(ByteBuffer buf) {
-    this.buf = buf;
-  }
-
-  @Override
-  public void write(int b) throws IOException {
-    buf.put((byte)b);
-  }
-
-  @Override
-  public void write(byte[] b, int off, int len) throws IOException {
-    buf.put(b, off, len);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java
index 8dd3d6f..5ff343a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java
@@ -24,10 +24,10 @@ import static org.mockito.Mockito.verify;
 
 import java.util.List;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -41,7 +41,7 @@ public class TestClientBlockVerification {
   static LocatedBlock testBlock = null;
 
   static {
-    ((Log4JLogger)RemoteBlockReader2.LOG).getLogger().setLevel(Level.ALL);
+    GenericTestUtils.setLogLevel(RemoteBlockReader2.LOG, Level.ALL);
   }
   @BeforeClass
   public static void setupCluster() throws Exception {


[34/50] [abbrv] hadoop git commit: HDFS-9009. Send metrics logs to NullAppender by default. (Arpit Agarwal)

Posted by vv...@apache.org.
HDFS-9009. Send metrics logs to NullAppender by default. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/524ba870
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/524ba870
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/524ba870

Branch: refs/heads/YARN-3926
Commit: 524ba8708b8e3e17e806748e1f819dec2183bf94
Parents: 0ebc658
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Sep 3 11:14:47 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Sep 3 11:14:47 2015 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/main/conf/log4j.properties                  | 3 ++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 2 ++
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/524ba870/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 1012e20..c26fed4 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -152,7 +152,8 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
 # NameNode metrics logging.
 # The default is to retain two namenode-metrics.log files up to 64MB each.
 #
-log4j.logger.NameNodeMetricsLog=INFO,NNMETRICSRFA
+namenode.metrics.logger=INFO,NullAppender
+log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
 log4j.additivity.NameNodeMetricsLog=false
 log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
 log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log

http://git-wip-us.apache.org/repos/asf/hadoop/blob/524ba870/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 275beb2..275dce2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1287,6 +1287,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8885. ByteRangeInputStream used in webhdfs does not override
     available(). (Shradha Revankar via aajisaka)
 
+    HDFS-9009. Send metrics logs to NullAppender by default. (Arpit Agarwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[27/50] [abbrv] hadoop git commit: HDFS-8890. Allow admin to specify which blockpools the balancer should run on. (Chris Trezzo via mingma)

Posted by vv...@apache.org.
HDFS-8890. Allow admin to specify which blockpools the balancer should run on. (Chris Trezzo via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d31a41c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d31a41c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d31a41c3

Branch: refs/heads/YARN-3926
Commit: d31a41c35927f02f2fb40d19380b5df4bb2b6d57
Parents: de928d5
Author: Ming Ma <mi...@apache.org>
Authored: Wed Sep 2 15:55:42 2015 -0700
Committer: Ming Ma <mi...@apache.org>
Committed: Wed Sep 2 15:55:42 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hadoop/hdfs/server/balancer/Balancer.java   |  82 ++++++---
 .../src/site/markdown/HDFSCommands.md           |   2 +
 .../hdfs/server/balancer/TestBalancer.java      |  43 ++++-
 .../TestBalancerWithMultipleNameNodes.java      | 179 ++++++++++++++++---
 5 files changed, 253 insertions(+), 56 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31a41c3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7a685f5..e68c011 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -880,6 +880,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-328. Improve fs -setrep error message for invalid replication factors.
     (Daniel Templeton via wang)
 
+    HDFS-8890. Allow admin to specify which blockpools the balancer should run
+    on. (Chris Trezzo via mingma)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31a41c3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index fe6e4c3..259b280 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -179,6 +179,8 @@ public class Balancer {
       + "\tExcludes the specified datanodes."
       + "\n\t[-include [-f <hosts-file> | <comma-separated list of hosts>]]"
       + "\tIncludes only the specified datanodes."
+      + "\n\t[-blockpools <comma-separated list of blockpool ids>]"
+      + "\tThe balancer will only run on blockpools included in this list."
       + "\n\t[-idleiterations <idleiterations>]"
       + "\tNumber of consecutive idle iterations (-1 for Infinite) before "
       + "exit."
@@ -652,22 +654,27 @@ public class Balancer {
         done = true;
         Collections.shuffle(connectors);
         for(NameNodeConnector nnc : connectors) {
-          final Balancer b = new Balancer(nnc, p, conf);
-          final Result r = b.runOneIteration();
-          r.print(iteration, System.out);
-
-          // clean all lists
-          b.resetData(conf);
-          if (r.exitStatus == ExitStatus.IN_PROGRESS) {
-            done = false;
-          } else if (r.exitStatus != ExitStatus.SUCCESS) {
-            //must be an error statue, return.
-            return r.exitStatus.getExitCode();
-          }
-        }
+          if (p.blockpools.size() == 0
+              || p.blockpools.contains(nnc.getBlockpoolID())) {
+            final Balancer b = new Balancer(nnc, p, conf);
+            final Result r = b.runOneIteration();
+            r.print(iteration, System.out);
+
+            // clean all lists
+            b.resetData(conf);
+            if (r.exitStatus == ExitStatus.IN_PROGRESS) {
+              done = false;
+            } else if (r.exitStatus != ExitStatus.SUCCESS) {
+              // must be an error statue, return.
+              return r.exitStatus.getExitCode();
+            }
 
-        if (!done) {
-          Thread.sleep(sleeptime);
+            if (!done) {
+              Thread.sleep(sleeptime);
+            }
+          } else {
+            LOG.info("Skipping blockpool " + nnc.getBlockpoolID());
+          }
         }
       }
     } finally {
@@ -699,12 +706,12 @@ public class Balancer {
   }
 
   static class Parameters {
-    static final Parameters DEFAULT = new Parameters(
-        BalancingPolicy.Node.INSTANCE, 10.0,
-        NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
-        Collections.<String>emptySet(), Collections.<String>emptySet(),
-        Collections.<String>emptySet(),
-        false);
+    static final Parameters DEFAULT =
+        new Parameters(BalancingPolicy.Node.INSTANCE, 10.0,
+            NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
+            Collections.<String> emptySet(), Collections.<String> emptySet(),
+            Collections.<String> emptySet(), Collections.<String> emptySet(),
+            false);
 
     final BalancingPolicy policy;
     final double threshold;
@@ -718,19 +725,25 @@ public class Balancer {
      */
     final Set<String> sourceNodes;
     /**
+     * A set of block pools to run the balancer on.
+     */
+    final Set<String> blockpools;
+    /**
      * Whether to run the balancer during upgrade.
      */
     final boolean runDuringUpgrade;
 
     Parameters(BalancingPolicy policy, double threshold, int maxIdleIteration,
         Set<String> excludedNodes, Set<String> includedNodes,
-        Set<String> sourceNodes, boolean runDuringUpgrade) {
+        Set<String> sourceNodes, Set<String> blockpools,
+        boolean runDuringUpgrade) {
       this.policy = policy;
       this.threshold = threshold;
       this.maxIdleIteration = maxIdleIteration;
       this.excludedNodes = excludedNodes;
       this.includedNodes = includedNodes;
       this.sourceNodes = sourceNodes;
+      this.blockpools = blockpools;
       this.runDuringUpgrade = runDuringUpgrade;
     }
 
@@ -742,10 +755,11 @@ public class Balancer {
               + " #excluded nodes = %s,"
               + " #included nodes = %s,"
               + " #source nodes = %s,"
+              + " #blockpools = %s,"
               + " run during upgrade = %s]",
-          Balancer.class.getSimpleName(), getClass().getSimpleName(),
-          policy, threshold, maxIdleIteration,
-          excludedNodes.size(), includedNodes.size(), sourceNodes.size(),
+          Balancer.class.getSimpleName(), getClass().getSimpleName(), policy,
+          threshold, maxIdleIteration, excludedNodes.size(),
+          includedNodes.size(), sourceNodes.size(), blockpools.size(),
           runDuringUpgrade);
     }
   }
@@ -789,6 +803,7 @@ public class Balancer {
       Set<String> excludedNodes = Parameters.DEFAULT.excludedNodes;
       Set<String> includedNodes = Parameters.DEFAULT.includedNodes;
       Set<String> sourceNodes = Parameters.DEFAULT.sourceNodes;
+      Set<String> blockpools = Parameters.DEFAULT.blockpools;
       boolean runDuringUpgrade = Parameters.DEFAULT.runDuringUpgrade;
 
       if (args != null) {
@@ -828,6 +843,14 @@ public class Balancer {
             } else if ("-source".equalsIgnoreCase(args[i])) {
               sourceNodes = new HashSet<>();
               i = processHostList(args, i, "source", sourceNodes);
+            } else if ("-blockpools".equalsIgnoreCase(args[i])) {
+              checkArgument(
+                  ++i < args.length,
+                  "blockpools value is missing: args = "
+                      + Arrays.toString(args));
+              blockpools = parseBlockPoolList(args[i]);
+              LOG.info("Balancer will run on the following blockpools: "
+                  + blockpools.toString());
             } else if ("-idleiterations".equalsIgnoreCase(args[i])) {
               checkArgument(++i < args.length,
                   "idleiterations value is missing: args = " + Arrays
@@ -853,8 +876,8 @@ public class Balancer {
         }
       }
       
-      return new Parameters(policy, threshold, maxIdleIteration,
-          excludedNodes, includedNodes, sourceNodes, runDuringUpgrade);
+      return new Parameters(policy, threshold, maxIdleIteration, excludedNodes,
+          includedNodes, sourceNodes, blockpools, runDuringUpgrade);
     }
 
     private static int processHostList(String[] args, int i, String type,
@@ -881,6 +904,11 @@ public class Balancer {
       return i;
     }
 
+    private static Set<String> parseBlockPoolList(String string) {
+      String[] addrs = StringUtils.getTrimmedStrings(string);
+      return new HashSet<String>(Arrays.asList(addrs));
+    }
+
     private static void printUsage(PrintStream out) {
       out.println(USAGE + "\n");
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31a41c3/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 1c2026c..8bbcbb8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -266,6 +266,7 @@ Usage:
               [-policy <policy>]
               [-exclude [-f <hosts-file> | <comma-separated list of hosts>]]
               [-include [-f <hosts-file> | <comma-separated list of hosts>]]
+              [-blockpools <comma-separated list of blockpool ids>]
               [-idleiterations <idleiterations>]
 
 | COMMAND\_OPTION | Description |
@@ -274,6 +275,7 @@ Usage:
 | `-threshold` \<threshold\> | Percentage of disk capacity. This overwrites the default threshold. |
 | `-exclude -f` \<hosts-file\> \| \<comma-separated list of hosts\> | Excludes the specified datanodes from being balanced by the balancer. |
 | `-include -f` \<hosts-file\> \| \<comma-separated list of hosts\> | Includes only the specified datanodes to be balanced by the balancer. |
+| `-blockpools` \<comma-separated list of blockpool ids\> | The balancer will only run on blockpools included in this list. |
 | `-idleiterations` \<iterations\> | Maximum number of idle iterations before exit. This overwrites the default idleiterations(5). |
 
 Runs a cluster balancing utility. An administrator can simply press Ctrl-C to stop the rebalancing process. See [Balancer](./HdfsUserGuide.html#Balancer) for more details.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31a41c3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index c1ed758..b0223d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -644,7 +644,7 @@ public class TestBalancer {
             Balancer.Parameters.DEFAULT.maxIdleIteration,
             nodes.getNodesToBeExcluded(), nodes.getNodesToBeIncluded(),
             Balancer.Parameters.DEFAULT.sourceNodes,
-            false);
+            Balancer.Parameters.DEFAULT.blockpools, false);
       }
 
       int expectedExcludedNodes = 0;
@@ -885,7 +885,7 @@ public class TestBalancer {
           Balancer.Parameters.DEFAULT.maxIdleIteration,
           datanodes, Balancer.Parameters.DEFAULT.includedNodes,
           Balancer.Parameters.DEFAULT.sourceNodes,
-          false);
+          Balancer.Parameters.DEFAULT.blockpools, false);
       final int r = Balancer.run(namenodes, p, conf);
       assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
     } finally {
@@ -1067,6 +1067,34 @@ public class TestBalancer {
     } catch (IllegalArgumentException e) {
 
     }
+
+    parameters = new String[] { "-blockpools" };
+    try {
+      Balancer.Cli.parse(parameters);
+      fail("IllegalArgumentException is expected when a value "
+          + "is not specified for the blockpool flag");
+    } catch (IllegalArgumentException e) {
+
+    }
+  }
+
+  @Test
+  public void testBalancerCliParseBlockpools() {
+    String[] parameters = new String[] { "-blockpools", "bp-1,bp-2,bp-3" };
+    Balancer.Parameters p = Balancer.Cli.parse(parameters);
+    assertEquals(3, p.blockpools.size());
+
+    parameters = new String[] { "-blockpools", "bp-1" };
+    p = Balancer.Cli.parse(parameters);
+    assertEquals(1, p.blockpools.size());
+
+    parameters = new String[] { "-blockpools", "bp-1,,bp-2" };
+    p = Balancer.Cli.parse(parameters);
+    assertEquals(3, p.blockpools.size());
+
+    parameters = new String[] { "-blockpools", "bp-1," };
+    p = Balancer.Cli.parse(parameters);
+    assertEquals(1, p.blockpools.size());
   }
 
   /**
@@ -1385,7 +1413,7 @@ public class TestBalancer {
               Parameters.DEFAULT.excludedNodes,
               Parameters.DEFAULT.includedNodes,
               Parameters.DEFAULT.sourceNodes,
-              true);
+              Balancer.Parameters.DEFAULT.blockpools, true);
       assertEquals(ExitStatus.SUCCESS.getExitCode(),
           Balancer.run(namenodes, runDuringUpgrade, conf));
 
@@ -1588,7 +1616,8 @@ public class TestBalancer {
             BalancingPolicy.Node.INSTANCE, 1,
             NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
             Collections.<String> emptySet(), Collections.<String> emptySet(),
-            Collections.<String> emptySet(), false);
+            Collections.<String> emptySet(),
+            Balancer.Parameters.DEFAULT.blockpools, false);
 
         conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
         final int r = Balancer.run(namenodes, p, conf);
@@ -1607,7 +1636,7 @@ public class TestBalancer {
           BalancingPolicy.Node.INSTANCE, 1,
           NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
           Collections.<String> emptySet(), Collections.<String> emptySet(),
-          sourceNodes, false);
+          sourceNodes, Balancer.Parameters.DEFAULT.blockpools, false);
 
         conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
         final int r = Balancer.run(namenodes, p, conf);
@@ -1622,7 +1651,7 @@ public class TestBalancer {
           BalancingPolicy.Node.INSTANCE, 1,
           NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
           Collections.<String> emptySet(), Collections.<String> emptySet(),
-          sourceNodes, false);
+          sourceNodes, Balancer.Parameters.DEFAULT.blockpools, false);
 
         conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
         final int r = Balancer.run(namenodes, p, conf);
@@ -1639,7 +1668,7 @@ public class TestBalancer {
           BalancingPolicy.Node.INSTANCE, 1,
           NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
           Collections.<String> emptySet(), Collections.<String> emptySet(),
-          sourceNodes, false);
+          sourceNodes, Balancer.Parameters.DEFAULT.blockpools, false);
 
         conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
         final int r = Balancer.run(namenodes, p, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31a41c3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
index f51757c..b07ad89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
@@ -21,8 +21,13 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Random;
+import java.util.Set;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
@@ -42,6 +47,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Test;
@@ -60,6 +67,7 @@ public class TestBalancerWithMultipleNameNodes {
   private static final long CAPACITY = 500L;
   private static final String RACK0 = "/rack0";
   private static final String RACK1 = "/rack1";
+  private static final String RACK2 = "/rack2";
 
   private static final String FILE_NAME = "/tmp.txt";
   private static final Path FILE_PATH = new Path(FILE_NAME);
@@ -76,16 +84,20 @@ public class TestBalancerWithMultipleNameNodes {
     final MiniDFSCluster cluster;
     final ClientProtocol[] clients;
     final short replication;
-    
+    final Balancer.Parameters parameters;
+
     Suite(MiniDFSCluster cluster, final int nNameNodes, final int nDataNodes,
-        Configuration conf) throws IOException {
+        Balancer.Parameters parameters, Configuration conf) throws IOException {
       this.conf = conf;
       this.cluster = cluster;
       clients = new ClientProtocol[nNameNodes];
       for(int i = 0; i < nNameNodes; i++) {
         clients[i] = cluster.getNameNode(i).getRpcServer();
       }
-      replication = (short)Math.max(1, nDataNodes - 1);
+      // hard coding replication factor to 1 so logical and raw HDFS size are
+      // equal
+      replication = 1;
+      this.parameters = parameters;
     }
   }
 
@@ -104,11 +116,9 @@ public class TestBalancerWithMultipleNameNodes {
       ) throws IOException, InterruptedException, TimeoutException {
     final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
     for(int n = 0; n < s.clients.length; n++) {
-      final long fileLen = size/s.replication;
-      createFile(s, n, fileLen);
-
-      final List<LocatedBlock> locatedBlocks = s.clients[n].getBlockLocations(
-          FILE_NAME, 0, fileLen).getLocatedBlocks();
+      createFile(s, n, size);
+      final List<LocatedBlock> locatedBlocks =
+          s.clients[n].getBlockLocations(FILE_NAME, 0, size).getLocatedBlocks();
 
       final int numOfBlocks = locatedBlocks.size();
       blocks[n] = new ExtendedBlock[numOfBlocks];
@@ -151,9 +161,14 @@ public class TestBalancerWithMultipleNameNodes {
     wait(s.clients, totalUsed, totalCapacity);
     LOG.info("BALANCER 1");
 
+    // get storage reports for relevant blockpools so that we can compare
+    // blockpool usages after balancer has run
+    Map<Integer, DatanodeStorageReport[]> preBalancerPoolUsages =
+        getStorageReports(s);
+
     // start rebalancing
     final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(s.conf);
-    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, s.conf);
+    final int r = Balancer.run(namenodes, s.parameters, s.conf);
     Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
 
     LOG.info("BALANCER 2");
@@ -189,7 +204,7 @@ public class TestBalancerWithMultipleNameNodes {
       balanced = true;
       for(int d = 0; d < used.length; d++) {
         final double p = used[d]*100.0/cap[d];
-        balanced = p <= avg + Balancer.Parameters.DEFAULT.threshold;
+        balanced = p <= avg + s.parameters.threshold;
         if (!balanced) {
           if (i % 100 == 0) {
             LOG.warn("datanodes " + d + " is not yet balanced: "
@@ -203,6 +218,89 @@ public class TestBalancerWithMultipleNameNodes {
       }
     }
     LOG.info("BALANCER 6");
+    // cluster is balanced, verify that only selected blockpools were touched
+    Map<Integer, DatanodeStorageReport[]> postBalancerPoolUsages =
+        getStorageReports(s);
+    Assert.assertEquals(preBalancerPoolUsages.size(),
+        postBalancerPoolUsages.size());
+    for (Map.Entry<Integer, DatanodeStorageReport[]> entry
+        : preBalancerPoolUsages.entrySet()) {
+      compareTotalPoolUsage(entry.getValue(),
+          postBalancerPoolUsages.get(entry.getKey()));
+    }
+  }
+
+  /**
+   * Compare the total blockpool usage on each datanode to ensure that nothing
+   * was balanced.
+   *
+   * @param preReports storage reports from pre balancer run
+   * @param postReports storage reports from post balancer run
+   */
+  private static void compareTotalPoolUsage(DatanodeStorageReport[] preReports,
+      DatanodeStorageReport[] postReports) {
+    Assert.assertNotNull(preReports);
+    Assert.assertNotNull(postReports);
+    Assert.assertEquals(preReports.length, postReports.length);
+    for (DatanodeStorageReport preReport : preReports) {
+      String dnUuid = preReport.getDatanodeInfo().getDatanodeUuid();
+      for(DatanodeStorageReport postReport : postReports) {
+        if(postReport.getDatanodeInfo().getDatanodeUuid().equals(dnUuid)) {
+          Assert.assertEquals(getTotalPoolUsage(preReport),
+              getTotalPoolUsage(postReport));
+          LOG.info("Comparision of datanode pool usage pre/post balancer run. "
+              + "PrePoolUsage: " + getTotalPoolUsage(preReport)
+              + ", PostPoolUsage: " + getTotalPoolUsage(postReport));
+          break;
+        }
+      }
+    }
+  }
+
+  private static long getTotalPoolUsage(DatanodeStorageReport report) {
+    long usage = 0L;
+    for (StorageReport sr : report.getStorageReports()) {
+      usage += sr.getBlockPoolUsed();
+    }
+    return usage;
+  }
+
+  /**
+   * Get the storage reports for all blockpools that were not specified by the
+   * balancer blockpool parameters. If none were specified then the parameter
+   * was not set and do not return any reports.
+   *
+   * @param s suite for the test
+   * @return a map of storage reports where the key is the blockpool index
+   * @throws IOException
+   */
+  private static Map<Integer,
+    DatanodeStorageReport[]> getStorageReports(Suite s) throws IOException {
+    Map<Integer, DatanodeStorageReport[]> reports =
+        new HashMap<Integer, DatanodeStorageReport[]>();
+    if (s.parameters.blockpools.size() == 0) {
+      // the blockpools parameter was not set, so we don't need to track any
+      // blockpools.
+      return Collections.emptyMap();
+    }
+    for (int i = 0; i < s.clients.length; i++) {
+      if (s.parameters.blockpools.contains(s.cluster.getNamesystem(i)
+          .getBlockPoolId())) {
+        // we want to ensure that blockpools not specified by the balancer
+        // parameters were left alone. Therefore, if the pool was specified,
+        // skip it. Note: this code assumes the clients in the suite are ordered
+        // the same way that they are indexed via cluster#getNamesystem(index).
+        continue;
+      } else {
+        LOG.info("Tracking usage of blockpool id: "
+            + s.cluster.getNamesystem(i).getBlockPoolId());
+        reports.put(i,
+            s.clients[i].getDatanodeStorageReport(DatanodeReportType.LIVE));
+      }
+    }
+    LOG.info("Tracking " + reports.size()
+        + " blockpool(s) for pre/post balancer usage.");
+    return reports;
   }
 
   private static void sleep(long ms) {
@@ -220,25 +318,31 @@ public class TestBalancerWithMultipleNameNodes {
   }
 
   /**
-   * First start a cluster and fill the cluster up to a certain size.
-   * Then redistribute blocks according the required distribution.
-   * Finally, balance the cluster.
-   * 
+   * First start a cluster and fill the cluster up to a certain size. Then
+   * redistribute blocks according the required distribution. Finally, balance
+   * the cluster.
+   *
    * @param nNameNodes Number of NameNodes
-   * @param distributionPerNN The distribution for each NameNode. 
+   * @param nNameNodesToBalance Number of NameNodes to run the balancer on
+   * @param distributionPerNN The distribution for each NameNode.
    * @param capacities Capacities of the datanodes
    * @param racks Rack names
    * @param conf Configuration
    */
   private void unevenDistribution(final int nNameNodes,
-      long distributionPerNN[], long capacities[], String[] racks,
-      Configuration conf) throws Exception {
+      final int nNameNodesToBalance, long distributionPerNN[],
+      long capacities[], String[] racks, Configuration conf) throws Exception {
     LOG.info("UNEVEN 0");
     final int nDataNodes = distributionPerNN.length;
     if (capacities.length != nDataNodes || racks.length != nDataNodes) {
       throw new IllegalArgumentException("Array length is not the same");
     }
 
+    if (nNameNodesToBalance > nNameNodes) {
+      throw new IllegalArgumentException("Number of namenodes to balance is "
+          + "greater than the number of namenodes.");
+    }
+
     // calculate total space that need to be filled
     final long usedSpacePerNN = TestBalancer.sum(distributionPerNN);
 
@@ -248,7 +352,7 @@ public class TestBalancerWithMultipleNameNodes {
       LOG.info("UNEVEN 1");
       final MiniDFSCluster cluster = new MiniDFSCluster
           .Builder(new Configuration(conf))
-          .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+              .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
           .numDataNodes(nDataNodes)
           .racks(racks)
           .simulatedCapacities(capacities)
@@ -258,7 +362,7 @@ public class TestBalancerWithMultipleNameNodes {
         cluster.waitActive();
         DFSTestUtil.setFederatedConfiguration(cluster, conf);
         LOG.info("UNEVEN 3");
-        final Suite s = new Suite(cluster, nNameNodes, nDataNodes, conf);
+        final Suite s = new Suite(cluster, nNameNodes, nDataNodes, null, conf);
         blocks = generateBlocks(s, usedSpacePerNN);
         LOG.info("UNEVEN 4");
       } finally {
@@ -280,7 +384,20 @@ public class TestBalancerWithMultipleNameNodes {
       try {
         cluster.waitActive();
         LOG.info("UNEVEN 12");
-        final Suite s = new Suite(cluster, nNameNodes, nDataNodes, conf);
+        Set<String> blockpools = new HashSet<String>();
+        for (int i = 0; i < nNameNodesToBalance; i++) {
+          blockpools.add(cluster.getNamesystem(i).getBlockPoolId());
+        }
+        Balancer.Parameters params =
+            new Balancer.Parameters(Balancer.Parameters.DEFAULT.policy,
+                Balancer.Parameters.DEFAULT.threshold,
+                Balancer.Parameters.DEFAULT.maxIdleIteration,
+                Balancer.Parameters.DEFAULT.excludedNodes,
+                Balancer.Parameters.DEFAULT.includedNodes,
+                Balancer.Parameters.DEFAULT.sourceNodes, blockpools,
+                Balancer.Parameters.DEFAULT.runDuringUpgrade);
+        final Suite s =
+            new Suite(cluster, nNameNodes, nDataNodes, params, conf);
         for(int n = 0; n < nNameNodes; n++) {
           // redistribute blocks
           final Block[][] blocksDN = TestBalancer.distributeBlocks(
@@ -336,7 +453,9 @@ public class TestBalancerWithMultipleNameNodes {
     try {
       cluster.waitActive();
       LOG.info("RUN_TEST 1");
-      final Suite s = new Suite(cluster, nNameNodes, nDataNodes, conf);
+      final Suite s =
+          new Suite(cluster, nNameNodes, nDataNodes,
+              Balancer.Parameters.DEFAULT, conf);
       long totalCapacity = TestBalancer.sum(capacities);
 
       LOG.info("RUN_TEST 2");
@@ -378,10 +497,26 @@ public class TestBalancerWithMultipleNameNodes {
   @Test
   public void testUnevenDistribution() throws Exception {
     final Configuration conf = createConf();
-    unevenDistribution(2,
+    unevenDistribution(2, 2,
         new long[] {30*CAPACITY/100, 5*CAPACITY/100},
         new long[]{CAPACITY, CAPACITY},
         new String[] {RACK0, RACK1},
         conf);
   }
+
+  @Test
+  public void testBalancing1OutOf2Blockpools() throws Exception {
+    final Configuration conf = createConf();
+    unevenDistribution(2, 1, new long[] { 30 * CAPACITY / 100,
+        5 * CAPACITY / 100 }, new long[] { CAPACITY, CAPACITY }, new String[] {
+        RACK0, RACK1 }, conf);
+  }
+
+  @Test
+  public void testBalancing2OutOf3Blockpools() throws Exception {
+    final Configuration conf = createConf();
+    unevenDistribution(3, 2, new long[] { 30 * CAPACITY / 100,
+        5 * CAPACITY / 100, 10 * CAPACITY / 100 }, new long[] { CAPACITY,
+        CAPACITY, CAPACITY }, new String[] { RACK0, RACK1, RACK2 }, conf);
+  }
 }


[11/50] [abbrv] hadoop git commit: HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.

Posted by vv...@apache.org.
HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ad3556e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ad3556e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ad3556e

Branch: refs/heads/YARN-3926
Commit: 7ad3556ed38560585579172aa68356f37b2288c8
Parents: 24f6a7c
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Aug 31 18:17:14 2015 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Aug 31 18:17:14 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java  | 2 +-
 .../src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad3556e/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 95eb677..0f52d22 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -756,6 +756,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs.
     (Anu Engineer via xyao)
 
+    HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.
+    (wang)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad3556e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index 7fad990..ea4d9b1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -77,7 +77,7 @@ import org.junit.Test;
  * </p>
  */
 
-public class ViewFileSystemBaseTest {
+abstract public class ViewFileSystemBaseTest {
   FileSystem fsView;  // the view file system - the mounts are here
   FileSystem fsTarget;  // the target file system - the mount will point here
   Path targetTestRoot;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad3556e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
index d8ab539..ceebb26 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
@@ -76,7 +76,7 @@ import org.mockito.Mockito;
  *     @AfterClass    public static void ClusterShutdownAtEnd()
  * </p>
  */
-public class ViewFsBaseTest {
+abstract public class ViewFsBaseTest {
   FileContext fcView; // the view file system - the mounts are here
   FileContext fcTarget; // the target file system - the mount will point here
   Path targetTestRoot;


[02/50] [abbrv] hadoop git commit: HDFS-8155. Support OAuth2 in WebHDFS.

Posted by vv...@apache.org.
HDFS-8155. Support OAuth2 in WebHDFS.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/837fb75e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/837fb75e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/837fb75e

Branch: refs/heads/YARN-3926
Commit: 837fb75e8e03b2f016bcea2f4605106a5022491c
Parents: 6ab2d19
Author: Jakob Homan <jg...@gmail.com>
Authored: Sat Aug 29 18:37:05 2015 -0700
Committer: Jakob Homan <jg...@gmail.com>
Committed: Sat Aug 29 18:37:05 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   5 +
 .../hdfs/client/HdfsClientConfigKeys.java       |   8 +
 .../hadoop/hdfs/web/URLConnectionFactory.java   |  30 ++-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      |  15 +-
 .../hdfs/web/oauth2/AccessTokenProvider.java    |  66 ++++++
 .../hdfs/web/oauth2/AccessTokenTimer.java       | 103 +++++++++
 .../ConfCredentialBasedAccessTokenProvider.java |  62 ++++++
 ...onfRefreshTokenBasedAccessTokenProvider.java | 146 +++++++++++++
 .../CredentialBasedAccessTokenProvider.java     | 135 ++++++++++++
 .../oauth2/OAuth2ConnectionConfigurator.java    |  79 +++++++
 .../hadoop/hdfs/web/oauth2/OAuth2Constants.java |  46 ++++
 .../apache/hadoop/hdfs/web/oauth2/Utils.java    |  63 ++++++
 .../hadoop/hdfs/web/oauth2/package-info.java    |  26 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml         |   6 +
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md    |  25 +++
 .../hadoop/hdfs/web/TestWebHDFSOAuth2.java      | 216 +++++++++++++++++++
 .../hdfs/web/oauth2/TestAccessTokenTimer.java   |  63 ++++++
 ...ClientCredentialTimeBasedTokenRefresher.java | 138 ++++++++++++
 ...TestRefreshTokenTimeBasedTokenRefresher.java | 138 ++++++++++++
 20 files changed, 1369 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index aeaa980..68bd289 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -32,6 +32,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
 
   <dependencies>
     <dependency>
+      <groupId>com.squareup.okhttp</groupId>
+      <artifactId>okhttp</artifactId>
+      <version>2.4.0</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
       <scope>provided</scope>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 7b1e438..96bc8d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -36,6 +36,14 @@ public interface HdfsClientConfigKeys {
   String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
       "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
 
+  String DFS_WEBHDFS_OAUTH_ENABLED_KEY = "dfs.webhdfs.oauth2.enabled";
+  boolean DFS_WEBHDFS_OAUTH_ENABLED_DEFAULT = false;
+
+  String OAUTH_CLIENT_ID_KEY = "dfs.webhdfs.oauth2.client.id";
+  String OAUTH_REFRESH_URL_KEY = "dfs.webhdfs.oauth2.refresh.url";
+
+  String ACCESS_TOKEN_PROVIDER_KEY = "dfs.webhdfs.oauth2.access.token.provider";
+
   String PREFIX = "dfs.client.";
   String  DFS_NAMESERVICES = "dfs.nameservices";
   int     DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index a5e02f2..4c23241 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -31,6 +31,7 @@ import javax.net.ssl.SSLSocketFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.web.oauth2.OAuth2ConnectionConfigurator;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
@@ -77,15 +78,42 @@ public class URLConnectionFactory {
    * try to load SSL certificates when it is specified.
    */
   public static URLConnectionFactory newDefaultURLConnectionFactory(Configuration conf) {
+    ConnectionConfigurator conn = getSSLConnectionConfiguration(conf);
+
+    return new URLConnectionFactory(conn);
+  }
+
+  private static ConnectionConfigurator
+      getSSLConnectionConfiguration(Configuration conf) {
     ConnectionConfigurator conn = null;
     try {
       conn = newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf);
     } catch (Exception e) {
       LOG.debug(
-          "Cannot load customized ssl related configuration. Fallback to system-generic settings.",
+          "Cannot load customized ssl related configuration. Fallback to" +
+              " system-generic settings.",
           e);
       conn = DEFAULT_TIMEOUT_CONN_CONFIGURATOR;
     }
+
+    return conn;
+  }
+
+  /**
+   * Construct a new URLConnectionFactory that supports OAut-based connections.
+   * It will also try to load the SSL configuration when they are specified.
+   */
+  public static URLConnectionFactory
+      newOAuth2URLConnectionFactory(Configuration conf) throws IOException {
+    ConnectionConfigurator conn = null;
+    try {
+      ConnectionConfigurator sslConnConfigurator
+          = newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf);
+
+      conn = new OAuth2ConnectionConfigurator(conf, sslConnConfigurator);
+    } catch (Exception e) {
+      throw new IOException("Unable to load OAuth2 connection factory.", e);
+    }
     return new URLConnectionFactory(conn);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index ee5238a..a75e78f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -149,8 +149,19 @@ public class WebHdfsFileSystem extends FileSystem
         HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
         HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
 
-    connectionFactory = URLConnectionFactory
-        .newDefaultURLConnectionFactory(conf);
+    boolean isOAuth = conf.getBoolean(
+        HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_KEY,
+        HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_DEFAULT);
+
+    if(isOAuth) {
+      LOG.info("Enabling OAuth2 in WebHDFS");
+      connectionFactory = URLConnectionFactory
+          .newOAuth2URLConnectionFactory(conf);
+    } else {
+      LOG.info("Not enabling OAuth2 in WebHDFS");
+      connectionFactory = URLConnectionFactory
+          .newDefaultURLConnectionFactory(conf);
+    }
 
 
     ugi = UserGroupInformation.getCurrentUser();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenProvider.java
new file mode 100644
index 0000000..99e153d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenProvider.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+
+import java.io.IOException;
+
+/**
+ * Provide an OAuth2 access token to be used to authenticate http calls in
+ * WebHDFS.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class AccessTokenProvider implements Configurable {
+  private Configuration conf;
+
+  /**
+   * Obtain the access token that should be added to http connection's header.
+   * Will be called for each connection, so implementations should be
+   * performant. Implementations are responsible for any refreshing of
+   * the token.
+   * 
+   * @return Access token to be added to connection header.
+   */
+  abstract String getAccessToken() throws IOException;
+
+  /**
+   * Return the conf.
+   *
+   * @return the conf.
+   */
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  /**
+   * Set the conf.
+   *
+   * @param configuration  New configuration.
+   */
+  @Override
+  public void setConf(Configuration configuration) {
+    this.conf = configuration;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenTimer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenTimer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenTimer.java
new file mode 100644
index 0000000..aa05dd6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenTimer.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.Timer;
+
+/**
+ * Access tokens generally expire.  This timer helps keep track of that.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class AccessTokenTimer {
+  public static final long EXPIRE_BUFFER_MS = 30 * 1000L;
+
+  private final Timer timer;
+
+  /**
+   * When the current access token will expire in milliseconds since
+   * epoch.
+   */
+  private long nextRefreshMSSinceEpoch;
+
+  public AccessTokenTimer() {
+    this(new Timer());
+  }
+
+  /**
+   * 
+   * @param timer Timer instance for unit testing
+   */
+  public AccessTokenTimer(Timer timer) {
+    this.timer = timer;
+    this.nextRefreshMSSinceEpoch = 0;
+  }
+
+  /** 
+   * Set when the access token will expire as reported by the oauth server,
+   * ie in seconds from now.
+   * @param expiresIn Access time expiration as reported by OAuth server
+   */
+  public void setExpiresIn(String expiresIn) {
+    this.nextRefreshMSSinceEpoch = convertExpiresIn(timer, expiresIn);
+  }
+
+  /**
+   * Set when the access token will expire in milliseconds from epoch,
+   * as required by the WebHDFS configuration.  This is a bit hacky and lame.
+   * 
+   * @param expiresInMSSinceEpoch Access time expiration in ms since epoch.
+   */
+  public void setExpiresInMSSinceEpoch(String expiresInMSSinceEpoch){
+    this.nextRefreshMSSinceEpoch = Long.parseLong(expiresInMSSinceEpoch);
+  }
+
+  /**
+   * Get next time we should refresh the token.
+   * 
+   * @return Next time since epoch we'll need to refresh the token.
+   */
+  public long getNextRefreshMSSinceEpoch() {
+    return nextRefreshMSSinceEpoch;
+  }
+  
+  /**
+   * Return true if the current token has expired or will expire within the
+   * EXPIRE_BUFFER_MS (to give ample wiggle room for the call to be made to
+   * the server).
+   */
+  public boolean shouldRefresh() {
+    long lowerLimit = nextRefreshMSSinceEpoch - EXPIRE_BUFFER_MS;
+    long currTime = timer.now();
+    return currTime > lowerLimit;
+  }
+  
+  /**
+   * The expires_in param from OAuth is in seconds-from-now.  Convert to
+   * milliseconds-from-epoch
+   */
+  static Long convertExpiresIn(Timer timer, String expiresInSecs) {
+    long expiresSecs = Long.parseLong(expiresInSecs);
+    long expiresMs = expiresSecs * 1000;
+    return timer.now() + expiresMs;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfCredentialBasedAccessTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfCredentialBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfCredentialBasedAccessTokenProvider.java
new file mode 100644
index 0000000..b56dbde
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfCredentialBasedAccessTokenProvider.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Timer;
+
+import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
+
+/**
+ * Obtain an access token via a a credential (provided through the
+ * Configuration) using the 
+ * <a href="https://tools.ietf.org/html/rfc6749#section-4.4">
+ *   Client Credentials Grant workflow</a>.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ConfCredentialBasedAccessTokenProvider
+    extends CredentialBasedAccessTokenProvider {
+  private String credential;
+
+  public ConfCredentialBasedAccessTokenProvider() {
+  }
+
+  public ConfCredentialBasedAccessTokenProvider(Timer timer) {
+    super(timer);
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    credential = notNull(conf, OAUTH_CREDENTIAL_KEY);
+  }
+
+  @Override
+  public String getCredential() {
+    if(credential == null) {
+      throw new IllegalArgumentException("Credential has not been " +
+          "provided in configuration");
+    }
+    
+    return credential;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
new file mode 100644
index 0000000..1e80451
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import com.squareup.okhttp.OkHttpClient;
+import com.squareup.okhttp.Request;
+import com.squareup.okhttp.RequestBody;
+import com.squareup.okhttp.Response;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.util.Timer;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_ID;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.GRANT_TYPE;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.REFRESH_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.URLENCODED;
+import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
+
+/**
+ * Supply a access token obtained via a refresh token (provided through the
+ * Configuration using the second half of the
+ * <a href="https://tools.ietf.org/html/rfc6749#section-4.1">
+ *   Authorization Code Grant workflow</a>.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ConfRefreshTokenBasedAccessTokenProvider
+    extends AccessTokenProvider {
+  
+  public static final String OAUTH_REFRESH_TOKEN_KEY
+      = "dfs.webhdfs.oauth2.refresh.token";
+  public static final String OAUTH_REFRESH_TOKEN_EXPIRES_KEY
+      = "dfs.webhdfs.oauth2.refresh.token.expires.ms.since.epoch";
+
+  private AccessTokenTimer accessTokenTimer;
+  
+  private String accessToken;
+  
+  private String refreshToken;
+  
+  private String clientId;
+  
+  private String refreshURL;
+
+  
+  public ConfRefreshTokenBasedAccessTokenProvider() {
+    this.accessTokenTimer = new AccessTokenTimer();
+  }
+  
+  public ConfRefreshTokenBasedAccessTokenProvider(Timer timer) {
+    this.accessTokenTimer = new AccessTokenTimer(timer);
+  }
+  
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    refreshToken = notNull(conf, (OAUTH_REFRESH_TOKEN_KEY));
+    
+    accessTokenTimer.setExpiresInMSSinceEpoch(
+        notNull(conf, OAUTH_REFRESH_TOKEN_EXPIRES_KEY));
+
+    clientId = notNull(conf, OAUTH_CLIENT_ID_KEY);
+    refreshURL = notNull(conf, OAUTH_REFRESH_URL_KEY);
+    
+  }
+
+  @Override
+  public synchronized String getAccessToken() throws IOException {
+    if(accessTokenTimer.shouldRefresh()) {
+      refresh();
+    }
+    
+    return accessToken;
+  }
+  
+  void refresh() throws IOException {
+    try {
+      OkHttpClient client = new OkHttpClient();
+      client.setConnectTimeout(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
+          TimeUnit.MILLISECONDS);
+      client.setReadTimeout(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
+                TimeUnit.MILLISECONDS);
+
+      String bodyString = Utils.postBody(GRANT_TYPE, REFRESH_TOKEN,
+          REFRESH_TOKEN, refreshToken,
+          CLIENT_ID, clientId);
+
+      RequestBody body = RequestBody.create(URLENCODED, bodyString);
+
+      Request request = new Request.Builder()
+          .url(refreshURL)
+          .post(body)
+          .build();
+      Response responseBody = client.newCall(request).execute();
+
+      if (responseBody.code() != HttpStatus.SC_OK) {
+        throw new IllegalArgumentException("Received invalid http response: "
+            + responseBody.code() + ", text = " + responseBody.toString());
+      }
+
+      ObjectMapper mapper = new ObjectMapper();
+      Map<?, ?> response = mapper.reader(Map.class)
+          .readValue(responseBody.body().string());
+
+
+      String newExpiresIn = response.get(EXPIRES_IN).toString();
+      accessTokenTimer.setExpiresIn(newExpiresIn);
+
+      accessToken = response.get(ACCESS_TOKEN).toString();
+    } catch (Exception e) {
+      throw new IOException("Exception while refreshing access token", e);
+    }
+  }
+  
+  public String getRefreshToken() {
+    return refreshToken;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
new file mode 100644
index 0000000..c058e05
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import com.squareup.okhttp.OkHttpClient;
+import com.squareup.okhttp.Request;
+import com.squareup.okhttp.RequestBody;
+import com.squareup.okhttp.Response;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.util.Timer;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_CREDENTIALS;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_ID;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_SECRET;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.GRANT_TYPE;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.URLENCODED;
+import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
+
+/**
+ * Obtain an access token via the credential-based OAuth2 workflow.  This
+ * abstract class requires only that implementations provide the credential,
+ * which the class then uses to obtain a refresh token.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class CredentialBasedAccessTokenProvider
+    extends AccessTokenProvider {
+  public static final String OAUTH_CREDENTIAL_KEY
+      = "dfs.webhdfs.oauth2.credential";
+  
+  private AccessTokenTimer timer;
+  
+  private String clientId;
+  
+  private String refreshURL;
+  
+  private String accessToken;
+  
+  private boolean initialCredentialObtained = false;
+
+  CredentialBasedAccessTokenProvider() {
+    this.timer = new AccessTokenTimer();
+  }
+  
+  CredentialBasedAccessTokenProvider(Timer timer) {
+    this.timer = new AccessTokenTimer(timer);
+  }
+  
+  abstract String getCredential();
+
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    clientId = notNull(conf, OAUTH_CLIENT_ID_KEY);
+    refreshURL = notNull(conf, OAUTH_REFRESH_URL_KEY);
+  }
+
+  @Override
+  public synchronized String getAccessToken() throws IOException {
+    if(timer.shouldRefresh() || !initialCredentialObtained) {
+      refresh();
+      initialCredentialObtained = true;
+    }
+    
+    return accessToken;
+  }
+  
+  void refresh() throws IOException {
+    try {
+      OkHttpClient client = new OkHttpClient();
+      client.setConnectTimeout(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
+          TimeUnit.MILLISECONDS);
+      client.setReadTimeout(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
+          TimeUnit.MILLISECONDS);
+
+      String bodyString = Utils.postBody(CLIENT_SECRET, getCredential(),
+          GRANT_TYPE, CLIENT_CREDENTIALS,
+          CLIENT_ID, clientId);
+
+      RequestBody body = RequestBody.create(URLENCODED, bodyString);
+
+      Request request = new Request.Builder()
+          .url(refreshURL)
+          .post(body)
+          .build();
+      Response responseBody = client.newCall(request).execute();
+
+      if (responseBody.code() != HttpStatus.SC_OK) {
+        throw new IllegalArgumentException("Received invalid http response: "
+            + responseBody.code() + ", text = " + responseBody.toString());
+      }
+
+      ObjectMapper mapper = new ObjectMapper();
+      Map<?, ?> response = mapper.reader(Map.class)
+          .readValue(responseBody.body().string());
+      
+      String newExpiresIn = response.get(EXPIRES_IN).toString();
+      timer.setExpiresIn(newExpiresIn);
+
+      accessToken = response.get(ACCESS_TOKEN).toString();
+
+    } catch (Exception e) {
+      throw new IOException("Unable to obtain access token from credential", e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2ConnectionConfigurator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2ConnectionConfigurator.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2ConnectionConfigurator.java
new file mode 100644
index 0000000..f334b24
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2ConnectionConfigurator.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.util.ReflectionUtils;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.ACCESS_TOKEN_PROVIDER_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
+
+/**
+ * Configure a connection to use OAuth2 authentication.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class OAuth2ConnectionConfigurator implements ConnectionConfigurator {
+  
+  public static final String HEADER = "Bearer ";
+  
+  private final AccessTokenProvider accessTokenProvider;
+ 
+  private ConnectionConfigurator sslConfigurator = null;
+  
+  public OAuth2ConnectionConfigurator(Configuration conf) {
+    this(conf, null);
+  }
+  
+  @SuppressWarnings("unchecked")
+  public OAuth2ConnectionConfigurator(Configuration conf,
+                                      ConnectionConfigurator sslConfigurator) {
+    this.sslConfigurator = sslConfigurator;
+    
+    notNull(conf, ACCESS_TOKEN_PROVIDER_KEY);
+    
+    Class accessTokenProviderClass = conf.getClass(ACCESS_TOKEN_PROVIDER_KEY,
+        ConfCredentialBasedAccessTokenProvider.class,
+        AccessTokenProvider.class);
+    
+    accessTokenProvider = (AccessTokenProvider) ReflectionUtils
+        .newInstance(accessTokenProviderClass, conf);
+    accessTokenProvider.setConf(conf);
+  }
+  
+  @Override
+  public HttpURLConnection configure(HttpURLConnection conn)
+      throws IOException {
+    if(sslConfigurator != null) {
+      sslConfigurator.configure(conn);
+    }
+    
+    String accessToken = accessTokenProvider.getAccessToken();
+    
+    conn.setRequestProperty("AUTHORIZATION", HEADER + accessToken);
+    
+    return conn;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2Constants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2Constants.java
new file mode 100644
index 0000000..190a1f5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2Constants.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import com.squareup.okhttp.MediaType;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Sundry constants relating to OAuth2 within WebHDFS.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class OAuth2Constants {
+  private OAuth2Constants() { /** Private constructor. **/ }
+
+  public static final MediaType URLENCODED
+      = MediaType.parse("application/x-www-form-urlencoded; charset=utf-8");
+  
+  /* Constants for OAuth protocol */ 
+  public static final String ACCESS_TOKEN = "access_token";
+  public static final String BEARER = "bearer";
+  public static final String CLIENT_CREDENTIALS = "client_credentials";
+  public static final String CLIENT_ID = "client_id";
+  public static final String CLIENT_SECRET = "client_secret";
+  public static final String EXPIRES_IN = "expires_in";
+  public static final String GRANT_TYPE = "grant_type";
+  public static final String REFRESH_TOKEN = "refresh_token";
+  public static final String TOKEN_TYPE = "token_type";
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/Utils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/Utils.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/Utils.java
new file mode 100644
index 0000000..939798d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/Utils.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+final class Utils {
+  private Utils() { /* Private constructor */ }
+
+  public static String notNull(Configuration conf, String key) {
+    String value = conf.get(key);
+
+    if(value == null) {
+      throw new IllegalArgumentException("No value for " + key +
+          " found in conf file.");
+    }
+
+    return value;
+  }
+  
+  public static String postBody(String ... kv)
+      throws UnsupportedEncodingException {
+    if(kv.length % 2 != 0) {
+      throw new IllegalArgumentException("Arguments must be key value pairs");
+    }
+    StringBuilder sb = new StringBuilder();
+    int i = 0;
+    
+    while(i < kv.length) {
+      if(i > 0) {
+        sb.append("&");
+      }
+      sb.append(URLEncoder.encode(kv[i++], "UTF-8"));
+      sb.append("=");
+      sb.append(URLEncoder.encode(kv[i++], "UTF-8"));
+    }
+    
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/package-info.java
new file mode 100644
index 0000000..aeb581f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * OAuth2-based WebHDFS authentication.
+ */
+@InterfaceAudience.Public
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6f46ea5..3382f81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -357,6 +357,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8131. Implement a space balanced block placement policy (Liu Shaohui
     via kihwal)
 
+    HDFS-8155. Support OAuth2 in WebHDFS. (jghoman)
+
   IMPROVEMENTS
 
     HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 1a29ad3..d0c2dc7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -213,6 +213,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>leveldbjni-all</artifactId>
       <version>1.8</version>
     </dependency>
+    <dependency>
+      <groupId>org.mock-server</groupId>
+      <artifactId>mockserver-netty</artifactId>
+      <version>3.9.2</version>
+      <scope>test</scope>
+    </dependency>
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.bouncycastle</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 20b9d73..d0a0fe0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -221,6 +221,31 @@ Below are examples using the `curl` command tool.
 
 See also: [Authentication for Hadoop HTTP web-consoles](../hadoop-common/HttpAuthentication.html)
 
+Additionally, WebHDFS supports OAuth2 on the client side. The Namenode and Datanodes do not currently support clients using OAuth2 but other backends that implement the WebHDFS REST interface may.
+
+WebHDFS supports two type of OAuth2 code grants (user-provided refresh and access token or user provided credential) by default and provides a pluggable mechanism for implementing other OAuth2 authentications per the [OAuth2 RFC](https://tools.ietf.org/html/rfc6749), or custom authentications.  When using either of the provided code grant mechanisms, the WebHDFS client will refresh the access token as necessary.
+
+OAuth2 should only be enabled for clients not running with Kerberos SPENGO.
+
+| OAuth2 code grant mechanism | Description | Value of `dfs.webhdfs.oauth2.access.token.provider` that implements code grant |
+|:---- |:---- |:----|
+| Authorization Code Grant | The user provides an initial access token and refresh token, which are then used to authenticate WebHDFS requests and obtain replacement access tokens, respectively. | org.apache.hadoop.hdfs.web.oauth2.ConfRefreshTokenBasedAccessTokenProvider |
+| Client Credentials Grant | The user provides a credential which is used to obtain access tokens, which are then used to authenticate WebHDFS requests. | org.apache.hadoop.hdfs.web.oauth2.ConfCredentialBasedAccessTokenProvider |
+
+
+The following properties control OAuth2 authentication.
+
+| OAuth2 related property | Description |
+|:---- |:---- |
+| `dfs.webhdfs.oauth2.enabled` | Boolean to enable/disable OAuth2 authentication |
+| `dfs.webhdfs.oauth2.access.token.provider` | Class name of an implementation of `org.apache.hadoop.hdfs.web.oauth.AccessTokenProvider.`  Two are provided with the code, as described above, or the user may specify a user-provided implementation. The default value for this configuration key is the `ConfCredentialBasedAccessTokenProvider` implementation. |
+| `dfs.webhdfs.oauth2.client.id` | Client id used to obtain access token with either credential or refresh token |
+| `dfs.webhdfs.oauth2.refresh.url` | URL against which to post for obtaining bearer token with either credential or refresh token |
+| `dfs.webhdfs.oauth2.access.token` | (required if using ConfRefreshTokenBasedAccessTokenProvider) Initial access token with which to authenticate |
+| `dfs.webhdfs.oauth2.refresh.token` | (required if using ConfRefreshTokenBasedAccessTokenProvider) Initial refresh token to use to obtain new access tokens  |
+| `dfs.webhdfs.oauth2.refresh.token.expires.ms.since.epoch` | (required if using ConfRefreshTokenBasedAccessTokenProvider) Access token expiration measured in milliseconds since Jan 1, 1970.  *Note this is a different value than provided by OAuth providers and has been munged as described in interface to be suitable for a client application*  |
+| `dfs.webhdfs.oauth2.credential` | (required if using ConfCredentialBasedAccessTokenProvider).  Credential used to obtain initial and subsequent access tokens. |
+
 Proxy Users
 -----------
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
new file mode 100644
index 0000000..e2f6230
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.web.oauth2.ConfCredentialBasedAccessTokenProvider;
+import org.apache.hadoop.hdfs.web.oauth2.CredentialBasedAccessTokenProvider;
+import org.apache.hadoop.hdfs.web.oauth2.OAuth2ConnectionConfigurator;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockserver.client.server.MockServerClient;
+import org.mockserver.integration.ClientAndServer;
+import org.mockserver.model.Header;
+import org.mockserver.model.HttpRequest;
+import org.mockserver.model.HttpResponse;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.ACCESS_TOKEN_PROVIDER_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.TOKEN_TYPE;
+import static org.junit.Assert.assertEquals;
+import static org.mockserver.integration.ClientAndServer.startClientAndServer;
+import static org.mockserver.matchers.Times.exactly;
+import static org.mockserver.model.HttpRequest.request;
+import static org.mockserver.model.HttpResponse.response;
+
+public class TestWebHDFSOAuth2 {
+  public static final Log LOG = LogFactory.getLog(TestWebHDFSOAuth2.class);
+
+  private ClientAndServer mockWebHDFS;
+  private ClientAndServer mockOAuthServer;
+
+  public final static int WEBHDFS_PORT = 7552;
+  public final static int OAUTH_PORT = 7553;
+
+  public final static Header CONTENT_TYPE_APPLICATION_JSON = new Header("Content-Type", "application/json");
+
+  public final static String AUTH_TOKEN = "0123456789abcdef";
+  public final static Header AUTH_TOKEN_HEADER = new Header("AUTHORIZATION", OAuth2ConnectionConfigurator.HEADER + AUTH_TOKEN);
+
+  @Before
+  public void startMockOAuthServer() {
+    mockOAuthServer = startClientAndServer(OAUTH_PORT);
+  }
+  @Before
+  public void startMockWebHDFSServer() {
+    System.setProperty("hadoop.home.dir", System.getProperty("user.dir"));
+
+    mockWebHDFS = startClientAndServer(WEBHDFS_PORT);
+  }
+
+  @Test
+  public void listStatusReturnsAsExpected() throws URISyntaxException, IOException {
+    MockServerClient mockWebHDFSServerClient = new MockServerClient("localhost", WEBHDFS_PORT);
+    MockServerClient mockOAuthServerClient = new MockServerClient("localhost", OAUTH_PORT);
+
+    HttpRequest oauthServerRequest = getOAuthServerMockRequest(mockOAuthServerClient);
+
+    HttpRequest fileSystemRequest = request()
+        .withMethod("GET")
+        .withPath(WebHdfsFileSystem.PATH_PREFIX + "/test1/test2")
+        .withHeader(AUTH_TOKEN_HEADER);
+
+    try {
+      mockWebHDFSServerClient.when(fileSystemRequest,
+          exactly(1)
+      )
+          .respond(
+              response()
+                  .withStatusCode(HttpStatus.SC_OK)
+                  .withHeaders(
+                      CONTENT_TYPE_APPLICATION_JSON
+                  )
+                  .withBody("{\n" +
+                      "  \"FileStatuses\":\n" +
+                      "  {\n" +
+                      "    \"FileStatus\":\n" +
+                      "    [\n" +
+                      "      {\n" +
+                      "        \"accessTime\"      : 1320171722771,\n" +
+                      "        \"blockSize\"       : 33554432,\n" +
+                      "        \"group\"           : \"supergroup\",\n" +
+                      "        \"length\"          : 24930,\n" +
+                      "        \"modificationTime\": 1320171722771,\n" +
+                      "        \"owner\"           : \"webuser\",\n" +
+                      "        \"pathSuffix\"      : \"a.patch\",\n" +
+                      "        \"permission\"      : \"644\",\n" +
+                      "        \"replication\"     : 1,\n" +
+                      "        \"type\"            : \"FILE\"\n" +
+                      "      },\n" +
+                      "      {\n" +
+                      "        \"accessTime\"      : 0,\n" +
+                      "        \"blockSize\"       : 0,\n" +
+                      "        \"group\"           : \"supergroup\",\n" +
+                      "        \"length\"          : 0,\n" +
+                      "        \"modificationTime\": 1320895981256,\n" +
+                      "        \"owner\"           : \"szetszwo\",\n" +
+                      "        \"pathSuffix\"      : \"bar\",\n" +
+                      "        \"permission\"      : \"711\",\n" +
+                      "        \"replication\"     : 0,\n" +
+                      "        \"type\"            : \"DIRECTORY\"\n" +
+                      "      }\n" +
+                      "    ]\n" +
+                      "  }\n" +
+                      "}\n")
+          );
+
+      FileSystem fs = new WebHdfsFileSystem();
+      Configuration conf = getConfiguration();
+      conf.set(OAUTH_REFRESH_URL_KEY, "http://localhost:" + OAUTH_PORT + "/refresh");
+      conf.set(CredentialBasedAccessTokenProvider.OAUTH_CREDENTIAL_KEY, "credential");
+
+      URI uri = new URI("webhdfs://localhost:" + WEBHDFS_PORT);
+      fs.initialize(uri, conf);
+
+      FileStatus[] ls = fs.listStatus(new Path("/test1/test2"));
+
+      mockOAuthServer.verify(oauthServerRequest);
+      mockWebHDFSServerClient.verify(fileSystemRequest);
+
+      assertEquals(2, ls.length);
+      assertEquals("a.patch", ls[0].getPath().getName());
+      assertEquals("bar", ls[1].getPath().getName());
+
+      fs.close();
+    } finally {
+      mockWebHDFSServerClient.clear(fileSystemRequest);
+      mockOAuthServerClient.clear(oauthServerRequest);
+    }
+  }
+
+  private HttpRequest getOAuthServerMockRequest(MockServerClient mockServerClient) throws IOException {
+    HttpRequest expectedRequest = request()
+        .withMethod("POST")
+        .withPath("/refresh")
+        .withBody("client_secret=credential&grant_type=client_credentials&client_id=MY_CLIENTID");
+    
+    Map<String, Object> map = new TreeMap<>();
+    
+    map.put(EXPIRES_IN, "0987654321");
+    map.put(TOKEN_TYPE, "bearer");
+    map.put(ACCESS_TOKEN, AUTH_TOKEN);
+
+    ObjectMapper mapper = new ObjectMapper();
+    
+    HttpResponse resp = response()
+        .withStatusCode(HttpStatus.SC_OK)
+        .withHeaders(
+            CONTENT_TYPE_APPLICATION_JSON
+        )
+        .withBody(mapper.writeValueAsString(map));
+
+    mockServerClient
+        .when(expectedRequest, exactly(1))
+        .respond(resp);
+
+    return expectedRequest;
+  }
+
+  public Configuration getConfiguration() {
+    Configuration conf = new Configuration();
+
+    // Configs for OAuth2
+    conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_KEY, true);
+    conf.set(OAUTH_CLIENT_ID_KEY, "MY_CLIENTID");
+
+    conf.set(ACCESS_TOKEN_PROVIDER_KEY,
+        ConfCredentialBasedAccessTokenProvider.class.getName());
+
+    return conf;
+
+  }
+
+  @After
+  public void stopMockWebHDFSServer() {
+      mockWebHDFS.stop();
+  }
+
+  @After
+  public void stopMockOAuthServer() {
+    mockOAuthServer.stop();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
new file mode 100644
index 0000000..c387b1e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.util.Timer;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class TestAccessTokenTimer {
+  @Test
+  public void expireConversionWorks() {
+    Timer mockTimer = mock(Timer.class);
+    when(mockTimer.now())
+        .thenReturn(5l);
+    
+    AccessTokenTimer timer = new AccessTokenTimer(mockTimer);
+    
+    timer.setExpiresIn("3");
+    assertEquals(3005, timer.getNextRefreshMSSinceEpoch());
+    
+    assertTrue(timer.shouldRefresh());
+  }
+  
+  @Test
+  public void shouldRefreshIsCorrect() {
+    Timer mockTimer = mock(Timer.class);
+    when(mockTimer.now())
+        .thenReturn(500l)
+        .thenReturn(1000000l + 500l);
+    
+    AccessTokenTimer timer = new AccessTokenTimer(mockTimer);
+    
+    timer.setExpiresInMSSinceEpoch("1000000");
+    
+    assertFalse(timer.shouldRefresh());
+    assertTrue(timer.shouldRefresh());
+    
+    verify(mockTimer, times(2)).now();
+  } 
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
new file mode 100644
index 0000000..c259b30
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Timer;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.Test;
+import org.mockserver.client.server.MockServerClient;
+import org.mockserver.integration.ClientAndServer;
+import org.mockserver.model.Header;
+import org.mockserver.model.HttpRequest;
+import org.mockserver.model.HttpResponse;
+import org.mockserver.model.Parameter;
+import org.mockserver.model.ParameterBody;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.ACCESS_TOKEN_PROVIDER_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_CREDENTIALS;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_ID;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_SECRET;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.GRANT_TYPE;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.TOKEN_TYPE;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.mockserver.integration.ClientAndServer.startClientAndServer;
+import static org.mockserver.matchers.Times.exactly;
+import static org.mockserver.model.HttpRequest.request;
+import static org.mockserver.model.HttpResponse.response;
+
+public class TestClientCredentialTimeBasedTokenRefresher {
+  public final static Header CONTENT_TYPE_APPLICATION_JSON
+      = new Header("Content-Type", "application/json");
+
+  public final static String CLIENT_ID_FOR_TESTING = "joebob";
+
+  public Configuration buildConf(String credential, String tokenExpires,
+                                 String clientId, String refreshURL) {
+    // Configurations are simple enough that it's not worth mocking them out.
+    Configuration conf = new Configuration();
+    conf.set(CredentialBasedAccessTokenProvider.OAUTH_CREDENTIAL_KEY,
+        credential);
+    conf.set(ACCESS_TOKEN_PROVIDER_KEY,
+        ConfCredentialBasedAccessTokenProvider.class.getName());
+    conf.set(OAUTH_CLIENT_ID_KEY, clientId);
+    conf.set(OAUTH_REFRESH_URL_KEY, refreshURL);
+    return conf;
+  }
+
+  @Test
+  public void refreshUrlIsCorrect() throws IOException {
+    final int PORT = 7552;
+    final String REFRESH_ADDRESS = "http://localhost:" + PORT + "/refresh";
+
+    long tokenExpires = 0;
+
+    Configuration conf = buildConf("myreallycoolcredential",
+        Long.toString(tokenExpires),
+        CLIENT_ID_FOR_TESTING,
+        REFRESH_ADDRESS);
+
+    Timer mockTimer = mock(Timer.class);
+    when(mockTimer.now()).thenReturn(tokenExpires + 1000l);
+
+    AccessTokenProvider credProvider =
+        new ConfCredentialBasedAccessTokenProvider(mockTimer);
+    credProvider.setConf(conf);
+    
+    // Build mock server to receive refresh request
+    ClientAndServer mockServer  = startClientAndServer(PORT);
+
+    HttpRequest expectedRequest = request()
+        .withMethod("POST")
+        .withPath("/refresh")
+        .withBody( 
+        // Note, OkHttp does not sort the param values, so we need to do
+        // it ourselves via the ordering provided to ParameterBody...
+            ParameterBody.params(
+                Parameter.param(CLIENT_SECRET, "myreallycoolcredential"),
+                Parameter.param(GRANT_TYPE, CLIENT_CREDENTIALS),
+                Parameter.param(CLIENT_ID, CLIENT_ID_FOR_TESTING)
+                ));
+
+    MockServerClient mockServerClient = new MockServerClient("localhost", PORT);
+
+    // https://tools.ietf.org/html/rfc6749#section-5.1
+    Map<String, Object> map = new TreeMap<>();
+    
+    map.put(EXPIRES_IN, "0987654321");
+    map.put(TOKEN_TYPE, "bearer");
+    map.put(ACCESS_TOKEN, "new access token");
+
+    ObjectMapper mapper = new ObjectMapper();
+    
+    HttpResponse resp = response()
+        .withStatusCode(HttpStatus.SC_OK)
+        .withHeaders(
+            CONTENT_TYPE_APPLICATION_JSON
+        )
+        .withBody(mapper.writeValueAsString(map));
+
+    mockServerClient
+        .when(expectedRequest, exactly(1))
+        .respond(resp);
+
+    assertEquals("new access token", credProvider.getAccessToken());
+
+    mockServerClient.verify(expectedRequest);
+
+    mockServerClient.clear(expectedRequest);
+    mockServer.stop();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
new file mode 100644
index 0000000..889ad0e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Timer;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.Test;
+import org.mockserver.client.server.MockServerClient;
+import org.mockserver.integration.ClientAndServer;
+import org.mockserver.model.Header;
+import org.mockserver.model.HttpRequest;
+import org.mockserver.model.HttpResponse;
+import org.mockserver.model.Parameter;
+import org.mockserver.model.ParameterBody;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.ConfRefreshTokenBasedAccessTokenProvider.OAUTH_REFRESH_TOKEN_EXPIRES_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.ConfRefreshTokenBasedAccessTokenProvider.OAUTH_REFRESH_TOKEN_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.BEARER;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_ID;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.GRANT_TYPE;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.REFRESH_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.TOKEN_TYPE;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.mockserver.integration.ClientAndServer.startClientAndServer;
+import static org.mockserver.matchers.Times.exactly;
+import static org.mockserver.model.HttpRequest.request;
+import static org.mockserver.model.HttpResponse.response;
+
+public class TestRefreshTokenTimeBasedTokenRefresher {
+
+  public final static Header CONTENT_TYPE_APPLICATION_JSON
+      = new Header("Content-Type", "application/json");
+
+  public Configuration buildConf(String refreshToken, String tokenExpires,
+                                 String clientId, String refreshURL) {
+    // Configurations are simple enough that it's not worth mocking them out.
+    Configuration conf = new Configuration();
+    conf.set(OAUTH_REFRESH_TOKEN_KEY, refreshToken);
+    conf.set(OAUTH_REFRESH_TOKEN_EXPIRES_KEY, tokenExpires);
+    conf.set(OAUTH_CLIENT_ID_KEY, clientId);
+    conf.set(OAUTH_REFRESH_URL_KEY, refreshURL);
+
+    return conf;
+  }
+
+  @Test
+  public void refreshUrlIsCorrect() throws IOException {
+    final int PORT = 7552;
+    final String REFRESH_ADDRESS = "http://localhost:" + PORT + "/refresh";
+
+    long tokenExpires = 0;
+
+    Configuration conf = buildConf("refresh token key",
+        Long.toString(tokenExpires),
+        "joebob",
+        REFRESH_ADDRESS);
+
+    Timer mockTimer = mock(Timer.class);
+    when(mockTimer.now()).thenReturn(tokenExpires + 1000l);
+
+    AccessTokenProvider tokenProvider =
+        new ConfRefreshTokenBasedAccessTokenProvider(mockTimer);
+    tokenProvider.setConf(conf);
+
+    // Build mock server to receive refresh request
+
+    ClientAndServer mockServer  = startClientAndServer(PORT);
+
+    HttpRequest expectedRequest = request()
+        .withMethod("POST")
+        .withPath("/refresh")
+        // Note, OkHttp does not sort the param values, so we need to
+        // do it ourselves via the ordering provided to ParameterBody...
+        .withBody(
+            ParameterBody.params(
+                Parameter.param(CLIENT_ID, "joebob"),
+                Parameter.param(GRANT_TYPE, REFRESH_TOKEN),
+                Parameter.param(REFRESH_TOKEN, "refresh token key")));
+
+    MockServerClient mockServerClient = new MockServerClient("localhost", PORT);
+
+    // https://tools.ietf.org/html/rfc6749#section-5.1
+    Map<String, Object> map = new TreeMap<>();
+
+    map.put(EXPIRES_IN, "0987654321");
+    map.put(TOKEN_TYPE, BEARER);
+    map.put(ACCESS_TOKEN, "new access token");
+
+    ObjectMapper mapper = new ObjectMapper();
+    
+    HttpResponse resp = response()
+        .withStatusCode(HttpStatus.SC_OK)
+        .withHeaders(
+            CONTENT_TYPE_APPLICATION_JSON
+        )
+        .withBody(mapper.writeValueAsString(map));
+
+    mockServerClient
+        .when(expectedRequest, exactly(1))
+        .respond(resp);
+
+    assertEquals("new access token", tokenProvider.getAccessToken());
+
+    mockServerClient.verify(expectedRequest);
+
+    mockServerClient.clear(expectedRequest);
+    mockServer.stop();
+  }
+
+}


[03/50] [abbrv] hadoop git commit: YARN-2945. Fixing the CHANGES.txt to have the right JIRA number.

Posted by vv...@apache.org.
YARN-2945. Fixing the CHANGES.txt to have the right JIRA number.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf831565
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf831565
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf831565

Branch: refs/heads/YARN-3926
Commit: cf831565e8344523e1bd0eaf686ed56a2b48b920
Parents: 837fb75
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Sun Aug 30 20:01:47 2015 -0700
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Sun Aug 30 20:01:47 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf831565/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0b733a4..4201b4f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1453,7 +1453,7 @@ Release 2.7.0 - 2015-04-20
     YARN-2914. [YARN-1492] Potential race condition in Singleton implementation of 
     SharedCacheUploaderMetrics, CleanerMetrics, ClientSCMMetrics. (Varun Saxena via kasha)
 
-    YARN-2964. FSLeafQueue#assignContainer - document the reason for using both write and
+    YARN-2945. FSLeafQueue#assignContainer - document the reason for using both write and
     read locks. (Tsuyoshi Ozawa via kasha)
 
     YARN-2944. InMemorySCMStore can not be instantiated with ReflectionUtils#newInstance.


[28/50] [abbrv] hadoop git commit: YARN-4101. RM should print alert messages if Zookeeper and Resourcemanager gets connection issue. Contributed by Xuan Gong

Posted by vv...@apache.org.
YARN-4101. RM should print alert messages if Zookeeper and Resourcemanager gets connection issue. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09c64ba1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09c64ba1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09c64ba1

Branch: refs/heads/YARN-3926
Commit: 09c64ba1ba8be7a2ac31f4e42efb8c99b682399f
Parents: d31a41c
Author: Jian He <ji...@apache.org>
Authored: Wed Sep 2 17:45:23 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Wed Sep 2 17:45:23 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/ha/ActiveStandbyElector.java     | 4 ++++
 hadoop-yarn-project/CHANGES.txt                             | 3 +++
 .../hadoop/yarn/server/resourcemanager/AdminService.java    | 9 +++++++++
 .../yarn/server/resourcemanager/EmbeddedElectorService.java | 4 ++++
 .../yarn/server/resourcemanager/webapp/AboutBlock.java      | 2 ++
 .../hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java | 5 +++++
 .../yarn/server/resourcemanager/webapp/RMWebAppFilter.java  | 5 ++++-
 .../yarn/server/resourcemanager/webapp/dao/ClusterInfo.java | 6 ++++++
 .../server/resourcemanager/webapp/TestRMWebServices.java    | 8 ++++++--
 9 files changed, 43 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09c64ba1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index e458181..fcbcfdf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -1141,4 +1141,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       ((appData == null) ? "null" : StringUtils.byteToHexString(appData)) + 
       " cb=" + appClient;
   }
+
+  public String getHAZookeeperConnectionState() {
+    return this.zkConnectionState.name();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09c64ba1/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 13fe9b0..5e0c43d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -871,6 +871,9 @@ Release 2.7.2 - UNRELEASED
     YARN-3893. Both RM in active state when Admin#transitionToActive failure 
     from refeshAll() (Bibin A Chundatt via rohithsharmaks)
 
+    YARN-4101. RM should print alert messages if Zookeeper and Resourcemanager
+    gets connection issue. (Xuan Gong via jianhe)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09c64ba1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index d96ed8c..a95e22c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -782,4 +782,13 @@ public class AdminService extends CompositeService implements
       throw logAndWrapException(e, user.getShortUserName(), argName, msg);
     }
   }
+
+  public String getHAZookeeperConnectionState() {
+    if (!rmContext.isHAEnabled()) {
+      return "ResourceManager HA is not enabled.";
+    } else if (!autoFailoverEnabled) {
+      return "Auto Failover is not enabled.";
+    }
+    return this.embeddedElector.getHAZookeeperConnectionState();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09c64ba1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
index c7b7768..73bdca0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
@@ -205,4 +205,8 @@ public class EmbeddedElectorService extends AbstractService
     elector.quitElection(false);
     elector.joinElection(localActiveNodeInfo);
   }
+
+  public String getHAZookeeperConnectionState() {
+    return elector.getHAZookeeperConnectionState();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09c64ba1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
index ea5c48a..4225afd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
@@ -44,6 +44,8 @@ public class AboutBlock extends HtmlBlock {
       _("Cluster ID:", cinfo.getClusterId()).
       _("ResourceManager state:", cinfo.getState()).
       _("ResourceManager HA state:", cinfo.getHAState()).
+      _("ResourceManager HA zookeeper connection state:",
+          cinfo.getHAZookeeperConnectionState()).
       _("ResourceManager RMStateStore:", cinfo.getRMStateStore()).
       _("ResourceManager started on:", Times.format(cinfo.getStartedOn())).
       _("ResourceManager version:", cinfo.getRMBuildVersion() +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09c64ba1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
index a86ed4f..91c3d72 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
@@ -117,4 +117,9 @@ public class RMWebApp extends WebApp implements YarnWebParams {
     }
     return path;
   }
+
+  public String getHAZookeeperConnectionState() {
+    return rm.getRMContext().getRMAdminService()
+      .getHAZookeeperConnectionState();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09c64ba1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
index a8f793a..9f35b6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
@@ -58,6 +58,7 @@ public class RMWebAppFilter extends GuiceContainer {
   private String path;
   private static final int BASIC_SLEEP_TIME = 5;
   private static final int MAX_SLEEP_TIME = 5 * 60;
+  private static final Random randnum = new Random();
 
   @Inject
   public RMWebAppFilter(Injector injector, Configuration conf) {
@@ -126,6 +127,8 @@ public class RMWebAppFilter extends GuiceContainer {
         String redirectMsg =
             doRetry ? "Can not find any active RM. Will retry in next " + next
                 + " seconds." : "There is no active RM right now.";
+        redirectMsg += "\nHA Zookeeper Connection State: "
+            + rmWebApp.getHAZookeeperConnectionState();
         PrintWriter out = response.getWriter();
         out.println(redirectMsg);
         if (doRetry) {
@@ -172,6 +175,6 @@ public class RMWebAppFilter extends GuiceContainer {
 
   private static int calculateExponentialTime(int retries) {
     long baseTime = BASIC_SLEEP_TIME * (1L << retries);
-    return (int) (baseTime * ((new Random()).nextDouble() + 0.5));
+    return (int) (baseTime * (randnum.nextDouble() + 0.5));
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09c64ba1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java
index b529f21..512a5c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java
@@ -43,6 +43,7 @@ public class ClusterInfo {
   protected String hadoopVersion;
   protected String hadoopBuildVersion;
   protected String hadoopVersionBuiltOn;
+  protected String haZooKeeperConnectionState;
 
   public ClusterInfo() {
   } // JAXB needs this
@@ -62,6 +63,8 @@ public class ClusterInfo {
     this.hadoopVersion = VersionInfo.getVersion();
     this.hadoopBuildVersion = VersionInfo.getBuildVersion();
     this.hadoopVersionBuiltOn = VersionInfo.getDate();
+    this.haZooKeeperConnectionState =
+        rm.getRMContext().getRMAdminService().getHAZookeeperConnectionState();
   }
 
   public String getState() {
@@ -108,4 +111,7 @@ public class ClusterInfo {
     return this.startedOn;
   }
 
+  public String getHAZookeeperConnectionState() {
+    return this.haZooKeeperConnectionState;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09c64ba1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 752e99b..530c06f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -285,6 +285,8 @@ public class TestRMWebServices extends JerseyTestBase {
           WebServicesTestUtils.getXmlLong(element, "startedOn"),
           WebServicesTestUtils.getXmlString(element, "state"),
           WebServicesTestUtils.getXmlString(element, "haState"),
+          WebServicesTestUtils.getXmlString(
+              element, "haZooKeeperConnectionState"),
           WebServicesTestUtils.getXmlString(element, "hadoopVersionBuiltOn"),
           WebServicesTestUtils.getXmlString(element, "hadoopBuildVersion"),
           WebServicesTestUtils.getXmlString(element, "hadoopVersion"),
@@ -300,9 +302,10 @@ public class TestRMWebServices extends JerseyTestBase {
       Exception {
     assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("clusterInfo");
-    assertEquals("incorrect number of elements", 11, info.length());
+    assertEquals("incorrect number of elements", 12, info.length());
     verifyClusterGeneric(info.getLong("id"), info.getLong("startedOn"),
         info.getString("state"), info.getString("haState"),
+        info.getString("haZooKeeperConnectionState"),
         info.getString("hadoopVersionBuiltOn"),
         info.getString("hadoopBuildVersion"), info.getString("hadoopVersion"),
         info.getString("resourceManagerVersionBuiltOn"),
@@ -312,7 +315,8 @@ public class TestRMWebServices extends JerseyTestBase {
   }
 
   public void verifyClusterGeneric(long clusterid, long startedon,
-      String state, String haState, String hadoopVersionBuiltOn,
+      String state, String haState, String haZooKeeperConnectionState,
+      String hadoopVersionBuiltOn,
       String hadoopBuildVersion, String hadoopVersion,
       String resourceManagerVersionBuiltOn, String resourceManagerBuildVersion,
       String resourceManagerVersion) {


[08/50] [abbrv] hadoop git commit: Move YARN-4092 to 2.7.2

Posted by vv...@apache.org.
Move YARN-4092 to 2.7.2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4eaa7fd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4eaa7fd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4eaa7fd3

Branch: refs/heads/YARN-3926
Commit: 4eaa7fd3eae4412ac0b964c617b1bbb17a39d8be
Parents: a3fd2cc
Author: Jian He <ji...@apache.org>
Authored: Mon Aug 31 17:43:36 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Mon Aug 31 17:43:36 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4eaa7fd3/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 19c1082..80cf793 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -422,9 +422,6 @@ Release 2.8.0 - UNRELEASED
     YARN-1556. NPE getting application report with a null appId. (Weiwei Yang via 
     junping_du)
 
-    YARN-4092. Fixed UI redirection to print useful messages when both RMs are
-    in standby mode. (Xuan Gong via jianhe)
-
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not
@@ -824,6 +821,8 @@ Release 2.7.2 - UNRELEASED
     YARN-3978. Configurably turn off the saving of container info in Generic AHS
     (Eric Payne via jeagles)
 
+    YARN-4092. Fixed UI redirection to print useful messages when both RMs are
+    in standby mode. (Xuan Gong via jianhe)
 
   OPTIMIZATIONS
 


[49/50] [abbrv] hadoop git commit: MAPREDUCE-6442. Stack trace is missing when error occurs in client protocol provider's constructor Contributed by Chang Li.

Posted by vv...@apache.org.
MAPREDUCE-6442. Stack trace is missing when error occurs in client protocol provider's constructor Contributed by Chang Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b685773
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b685773
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b685773

Branch: refs/heads/YARN-3926
Commit: 9b685773ec1031e90cc2b8aedb42670721757e22
Parents: bcc85e3
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Sat Sep 5 11:21:03 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Sat Sep 5 11:21:03 2015 +0900

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                              | 3 +++
 .../src/main/java/org/apache/hadoop/mapreduce/Cluster.java        | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b685773/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 88cca41..5b5724b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -584,6 +584,9 @@ Release 2.7.2 - UNRELEASED
     MAPREDUCE-6439. AM may fail instead of retrying if RM shuts down during the
     allocate call. (Anubhav Dhoot via kasha)
 
+    MAPREDUCE-6442. Stack trace is missing when error occurs in client protocol
+    provider's constructor (Chang Li via ozawa)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b685773/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
index 34353ac..fc228e6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
@@ -111,7 +111,7 @@ public class Cluster {
         } 
         catch (Exception e) {
           LOG.info("Failed to use " + provider.getClass().getName()
-              + " due to error: " + e.getMessage());
+              + " due to error: ", e);
         }
       }
     }


[25/50] [abbrv] hadoop git commit: HDFS-328. Improve fs -setrep error message for invalid replication factors. Contributed by Daniel Templeton.

Posted by vv...@apache.org.
HDFS-328. Improve fs -setrep error message for invalid replication factors. Contributed by Daniel Templeton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afc88b39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afc88b39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afc88b39

Branch: refs/heads/YARN-3926
Commit: afc88b396f06488c331564e0f6987013bb920d3e
Parents: c006c3a
Author: Andrew Wang <wa...@apache.org>
Authored: Wed Sep 2 13:45:20 2015 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Wed Sep 2 13:46:00 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../server/blockmanagement/BlockManager.java    | 41 ++++++++------
 .../org/apache/hadoop/hdfs/TestDFSShell.java    | 56 ++++++++++++++++++++
 .../src/test/resources/testHDFSConf.xml         | 56 +++++++++++++++++++-
 4 files changed, 140 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afc88b39/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0f2d713..78bbf26 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -877,6 +877,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-2070. Add more unit tests for FsShell getmerge (Daniel Templeton via
     Colin P. McCabe)
 
+    HDFS-328. Improve fs -setrep error message for invalid replication factors.
+    (Daniel Templeton via wang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afc88b39/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1346ab3..08fbd4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -953,28 +953,39 @@ public class BlockManager implements BlockStatsMXBean {
 
   /**
    * Check whether the replication parameter is within the range
-   * determined by system configuration.
+   * determined by system configuration and throw an exception if it's not.
+   *
+   * @param src the path to the target file
+   * @param replication the requested replication factor
+   * @param clientName the name of the client node making the request
+   * @throws java.io.IOException thrown if the requested replication factor
+   * is out of bounds
    */
    public void verifyReplication(String src,
                           short replication,
                           String clientName) throws IOException {
 
-    if (replication >= minReplication && replication <= maxReplication) {
-      //common case. avoid building 'text'
-      return;
-    }
-    
-    String text = "file " + src 
-      + ((clientName != null) ? " on client " + clientName : "")
-      + ".\n"
-      + "Requested replication " + replication;
+    if (replication < minReplication || replication > maxReplication) {
+      StringBuilder msg = new StringBuilder("Requested replication factor of ");
 
-    if (replication > maxReplication)
-      throw new IOException(text + " exceeds maximum " + maxReplication);
+      msg.append(replication);
 
-    if (replication < minReplication)
-      throw new IOException(text + " is less than the required minimum " +
-                            minReplication);
+      if (replication > maxReplication) {
+        msg.append(" exceeds maximum of ");
+        msg.append(maxReplication);
+      } else {
+        msg.append(" is less than the required minimum of ");
+        msg.append(minReplication);
+      }
+
+      msg.append(" for ").append(src);
+
+      if (clientName != null) {
+        msg.append(" from ").append(clientName);
+      }
+
+      throw new IOException(msg.toString());
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afc88b39/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 1386124..dda2051 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -2412,7 +2412,63 @@ public class TestDFSShell {
     }
   }
 
+  /**
+   * Test -setrep with a replication factor that is too low.  We have to test
+   * this here because the mini-cluster used with testHDFSConf.xml uses a
+   * replication factor of 1 (for good reason).
+   */
+  @Test (timeout = 30000)
+  public void testSetrepLow() throws Exception {
+    Configuration conf = new Configuration();
+
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 2);
+
+    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
+    MiniDFSCluster cluster = builder.numDataNodes(2).format(true).build();
+    FsShell shell = new FsShell(conf);
+
+    cluster.waitActive();
+
+    final String testdir = "/tmp/TestDFSShell-testSetrepLow";
+    final Path hdfsFile = new Path(testdir, "testFileForSetrepLow");
+    final PrintStream origOut = System.out;
+    final PrintStream origErr = System.err;
+
+    try {
+      final FileSystem fs = cluster.getFileSystem();
+
+      assertTrue("Unable to create test directory",
+          fs.mkdirs(new Path(testdir)));
 
+      fs.create(hdfsFile, true).close();
+
+      // Capture the command output so we can examine it
+      final ByteArrayOutputStream bao = new ByteArrayOutputStream();
+      final PrintStream capture = new PrintStream(bao);
+
+      System.setOut(capture);
+      System.setErr(capture);
+
+      final String[] argv = new String[] { "-setrep", "1", hdfsFile.toString() };
+
+      try {
+        assertEquals("Command did not return the expected exit code",
+            1, shell.run(argv));
+      } finally {
+        System.setOut(origOut);
+        System.setErr(origErr);
+      }
+
+      assertEquals("Error message is not the expected error message",
+          "setrep: Requested replication factor of 1 is less than "
+              + "the required minimum of 2 for /tmp/TestDFSShell-"
+              + "testSetrepLow/testFileForSetrepLow\n",
+          bao.toString());
+    } finally {
+      shell.close();
+      cluster.shutdown();
+    }
+  }
 
   // setrep for file and directory.
   @Test (timeout = 30000)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afc88b39/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 18c68ca..7c3cac9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -6471,7 +6471,61 @@
         </comparator>
       </comparators>
     </test>
-    
+
+    <test> <!-- TESTED -->
+      <description>setrep: invalid replication factor -- too high</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /dir0</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -setrep 1025 /dir0/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^setrep: Requested replication factor of 1025 exceeds maximum of [0-9]+ for /dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>setrep: invalid replication factor -- 0</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p dir0</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -setrep 0 dir0/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-setrep: replication must be >= 1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>setrep: invalid replication factor -- NaN</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p dir0</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -setrep three dir0/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^setrep: Illegal replication, a positive integer expected</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
     <!-- Tests for touchz-->
     <test> <!-- TESTED -->
       <description>touchz: touching file (absolute path) </description>


[04/50] [abbrv] hadoop git commit: HDFS-8980. Remove unnecessary block replacement in INodeFile. Contributed by Jing Zhao.

Posted by vv...@apache.org.
HDFS-8980. Remove unnecessary block replacement in INodeFile. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/caa04de1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/caa04de1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/caa04de1

Branch: refs/heads/YARN-3926
Commit: caa04de149030691b7bc952b534c6128db217ed2
Parents: cf83156
Author: Jing Zhao <ji...@apache.org>
Authored: Mon Aug 31 11:48:09 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Aug 31 11:48:09 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../hdfs/server/blockmanagement/BlockInfo.java  | 19 +------
 .../blockmanagement/BlockInfoContiguous.java    | 15 -----
 .../server/blockmanagement/BlockManager.java    | 58 +++++++-------------
 .../hdfs/server/blockmanagement/BlocksMap.java  | 16 ------
 .../hdfs/server/namenode/FSEditLogLoader.java   |  8 +--
 6 files changed, 29 insertions(+), 89 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa04de1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3382f81..7b5979e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -865,6 +865,8 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8983. NameNode support for protected directories. (Arpit Agarwal)
 
+    HDFS-8980. Remove unnecessary block replacement in INodeFile. (jing9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa04de1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 706cbcd..810784d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -36,7 +36,7 @@ import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
  * the block are stored.
  */
 @InterfaceAudience.Private
-public abstract class  BlockInfo extends Block
+public abstract class BlockInfo extends Block
     implements LightWeightGSet.LinkedElement {
 
   public static final BlockInfo[] EMPTY_ARRAY = {};
@@ -207,12 +207,6 @@ public abstract class  BlockInfo extends Block
   abstract boolean removeStorage(DatanodeStorageInfo storage);
 
   /**
-   * Replace the current BlockInfo with the new one in corresponding
-   * DatanodeStorageInfo's linked list
-   */
-  abstract void replaceBlock(BlockInfo newBlock);
-
-  /**
    * Find specified DatanodeStorageInfo.
    * @return DatanodeStorageInfo or null if not found.
    */
@@ -372,19 +366,12 @@ public abstract class  BlockInfo extends Block
   }
 
   /**
-   * Convert an under construction block to a complete block.
-   *
-   * @return BlockInfo - a complete block.
-   * @throws IOException if the state of the block
-   * (the generation stamp and the length) has not been committed by
-   * the client or it does not have at least a minimal number of replicas
-   * reported from data-nodes.
+   * Convert an under construction block to complete.
    */
-  BlockInfo convertToCompleteBlock() throws IOException {
+  void convertToCompleteBlock() {
     assert getBlockUCState() != BlockUCState.COMPLETE :
         "Trying to convert a COMPLETE block";
     uc = null;
-    return this;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa04de1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 42934c3..94fb222 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -104,19 +104,4 @@ public class BlockInfoContiguous extends BlockInfo {
     }
     return 0;
   }
-
-  @Override
-  void replaceBlock(BlockInfo newBlock) {
-    assert newBlock instanceof BlockInfoContiguous;
-    for (int i = this.numNodes() - 1; i >= 0; i--) {
-      final DatanodeStorageInfo storage = this.getStorageInfo(i);
-      final boolean removed = storage.removeBlock(this);
-      assert removed : "currentBlock not found.";
-
-      final DatanodeStorageInfo.AddBlockResult result = storage.addBlock(
-          newBlock);
-      assert result == DatanodeStorageInfo.AddBlockResult.ADDED :
-          "newBlock already exists.";
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa04de1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8f7bb55..1346ab3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -648,37 +648,34 @@ public class BlockManager implements BlockStatsMXBean {
       return false; // already completed (e.g. by syncBlock)
     
     final boolean b = commitBlock(lastBlock, commitBlock);
-    if(countNodes(lastBlock).liveReplicas() >= minReplication)
-      completeBlock(bc, bc.numBlocks()-1, false);
+    if (countNodes(lastBlock).liveReplicas() >= minReplication) {
+      completeBlock(lastBlock, false);
+    }
     return b;
   }
 
   /**
    * Convert a specified block of the file to a complete block.
-   * @param bc file
-   * @param blkIndex  block index in the file
    * @throws IOException if the block does not have at least a minimal number
    * of replicas reported from data-nodes.
    */
-  private BlockInfo completeBlock(final BlockCollection bc,
-      final int blkIndex, boolean force) throws IOException {
-    if(blkIndex < 0)
-      return null;
-    BlockInfo curBlock = bc.getBlocks()[blkIndex];
-    if(curBlock.isComplete())
-      return curBlock;
+  private void completeBlock(BlockInfo curBlock, boolean force)
+      throws IOException {
+    if (curBlock.isComplete()) {
+      return;
+    }
 
     int numNodes = curBlock.numNodes();
-    if (!force && numNodes < minReplication)
-      throw new IOException("Cannot complete block: " +
-          "block does not satisfy minimal replication requirement.");
-    if(!force && curBlock.getBlockUCState() != BlockUCState.COMMITTED)
+    if (!force && numNodes < minReplication) {
+      throw new IOException("Cannot complete block: "
+          + "block does not satisfy minimal replication requirement.");
+    }
+    if (!force && curBlock.getBlockUCState() != BlockUCState.COMMITTED) {
       throw new IOException(
           "Cannot complete block: block has not been COMMITTED by the client");
-    BlockInfo completeBlock = curBlock.convertToCompleteBlock();
-    // replace penultimate block in file
-    bc.setBlock(blkIndex, completeBlock);
-    
+    }
+
+    curBlock.convertToCompleteBlock();
     // Since safe-mode only counts complete blocks, and we now have
     // one more complete block, we need to adjust the total up, and
     // also count it as safe, if we have at least the minimum replica
@@ -688,33 +685,18 @@ public class BlockManager implements BlockStatsMXBean {
     namesystem.adjustSafeModeBlockTotals(0, 1);
     namesystem.incrementSafeBlockCount(
         Math.min(numNodes, minReplication));
-    
-    // replace block in the blocksMap
-    return blocksMap.replaceBlock(completeBlock);
   }
 
-  private BlockInfo completeBlock(final BlockCollection bc,
-      final BlockInfo block, boolean force) throws IOException {
-    BlockInfo[] fileBlocks = bc.getBlocks();
-    for(int idx = 0; idx < fileBlocks.length; idx++)
-      if(fileBlocks[idx] == block) {
-        return completeBlock(bc, idx, force);
-      }
-    return block;
-  }
-  
   /**
    * Force the given block in the given file to be marked as complete,
    * regardless of whether enough replicas are present. This is necessary
    * when tailing edit logs as a Standby.
    */
-  public BlockInfo forceCompleteBlock(final BlockCollection bc,
-      final BlockInfo block) throws IOException {
+  public void forceCompleteBlock(final BlockInfo block) throws IOException {
     block.commitBlock(block);
-    return completeBlock(bc, block, true);
+    completeBlock(block, true);
   }
 
-  
   /**
    * Convert the last block of the file to an under construction block.<p>
    * The block is converted only if the file has blocks and the last one
@@ -2503,7 +2485,7 @@ public class BlockManager implements BlockStatsMXBean {
     int numCurrentReplica = countLiveNodes(storedBlock);
     if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
         && numCurrentReplica >= minReplication) {
-      completeBlock(getBlockCollection(storedBlock), storedBlock, false);
+      completeBlock(storedBlock, false);
     } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
       // check whether safe replication is reached for the block
       // only complete blocks are counted towards that.
@@ -2577,7 +2559,7 @@ public class BlockManager implements BlockStatsMXBean {
 
     if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
         numLiveReplicas >= minReplication) {
-      storedBlock = completeBlock(bc, storedBlock, false);
+      completeBlock(storedBlock, false);
     } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
       // check whether safe replication is reached for the block
       // only complete blocks are counted towards that

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa04de1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index 33c68f3..9189c32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
@@ -209,20 +209,4 @@ class BlocksMap {
   int getCapacity() {
     return capacity;
   }
-
-  /**
-   * Replace a block in the block map by a new block.
-   * The new block and the old one have the same key.
-   * @param newBlock - block for replacement
-   * @return new block
-   */
-  BlockInfo replaceBlock(BlockInfo newBlock) {
-    BlockInfo currentBlock = blocks.get(newBlock);
-    assert currentBlock != null : "the block if not in blocksMap";
-    // replace block in data-node lists
-    currentBlock.replaceBlock(newBlock);
-    // replace block in the map itself
-    blocks.put(newBlock);
-    return newBlock;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa04de1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index f22762c..fc0bb78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -506,7 +506,7 @@ public class FSEditLogLoader {
       }
       INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path), path);
       // add the new block to the INodeFile
-      addNewBlock(fsDir, addBlockOp, oldFile);
+      addNewBlock(addBlockOp, oldFile);
       break;
     }
     case OP_SET_REPLICATION: {
@@ -940,7 +940,7 @@ public class FSEditLogLoader {
   /**
    * Add a new block into the given INodeFile
    */
-  private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file)
+  private void addNewBlock(AddBlockOp op, INodeFile file)
       throws IOException {
     BlockInfo[] oldBlocks = file.getBlocks();
     Block pBlock = op.getPenultimateBlock();
@@ -960,7 +960,7 @@ public class FSEditLogLoader {
       
       oldLastBlock.setNumBytes(pBlock.getNumBytes());
       if (!oldLastBlock.isComplete()) {
-        fsNamesys.getBlockManager().forceCompleteBlock(file, oldLastBlock);
+        fsNamesys.getBlockManager().forceCompleteBlock(oldLastBlock);
         fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock);
       }
     } else { // the penultimate block is null
@@ -1013,7 +1013,7 @@ public class FSEditLogLoader {
       if (!oldBlock.isComplete() &&
           (!isLastBlock || op.shouldCompleteLastBlock())) {
         changeMade = true;
-        fsNamesys.getBlockManager().forceCompleteBlock(file, oldBlock);
+        fsNamesys.getBlockManager().forceCompleteBlock(oldBlock);
       }
       if (changeMade) {
         // The state or gen-stamp of the block has changed. So, we may be


[40/50] [abbrv] hadoop git commit: HDFS-9021. Use a yellow elephant rather than a blue one in diagram.

Posted by vv...@apache.org.
HDFS-9021. Use a yellow elephant rather than a blue one in diagram.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c83d13c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c83d13c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c83d13c6

Branch: refs/heads/YARN-3926
Commit: c83d13c64993c3a7f0f35142cddac19e1074976e
Parents: ed78b14
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Sep 3 16:27:20 2015 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu Sep 3 16:27:20 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 ++
 .../site/resources/images/LazyPersistWrites.png | Bin 107161 -> 90224 bytes
 2 files changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c83d13c6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ce3fbb4..b9b89aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -886,6 +886,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-9002. Move o.a.h.hdfs.net/*Peer classes to hdfs-client.
     (Mingliang Liu via wheat9)
 
+    HDFS-9021. Use a yellow elephant rather than a blue one in diagram. (wang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c83d13c6/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/LazyPersistWrites.png
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/LazyPersistWrites.png b/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/LazyPersistWrites.png
index b2bde93..a21e866 100644
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/LazyPersistWrites.png and b/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/LazyPersistWrites.png differ