You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by as...@apache.org on 2017/07/12 04:53:17 UTC

[01/50] [abbrv] hadoop git commit: HADOOP-14611. NetworkTopology.DEFAULT_HOST_LEVEL is unused (Contributed by Chen Liang via Daniel Templeton) [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/YARN-5972 6999ad2e0 -> 66b01b343 (forced update)


HADOOP-14611. NetworkTopology.DEFAULT_HOST_LEVEL is unused
(Contributed by Chen Liang via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a75f738
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a75f738
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a75f738

Branch: refs/heads/YARN-5972
Commit: 5a75f73893567151f525950cc1a15b3f1bfeac26
Parents: b08cc97
Author: Daniel Templeton <te...@apache.org>
Authored: Thu Jun 29 12:28:43 2017 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Thu Jun 29 12:28:43 2017 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java        | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a75f738/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 1018d58..278bf72 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -45,7 +45,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 @InterfaceStability.Unstable
 public class NetworkTopology {
   public final static String DEFAULT_RACK = "/default-rack";
-  public final static int DEFAULT_HOST_LEVEL = 2;
   public static final Logger LOG =
       LoggerFactory.getLogger(NetworkTopology.class);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: HADOOP-14443. Azure: Support retry and client side failover for authorization, SASKey and delegation token generation. Contributed by Santhosh G Nayak

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
index a0276cb5..fbd7f62 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
@@ -46,7 +46,7 @@ public class TestNativeAzureFileSystemAuthorization
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
     Configuration conf = new Configuration();
     conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
-    conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URL, "http://localhost/");
+    conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost/");
     return AzureBlobStorageTestAccount.create(conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
index 77be1b8..f459b24 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
@@ -21,34 +21,48 @@ package org.apache.hadoop.fs.azure;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.http.*;
+import org.apache.hadoop.io.retry.RetryUtils;
+import org.apache.http.Header;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpStatus;
+import org.apache.http.StatusLine;
+import org.apache.http.ProtocolVersion;
+import org.apache.http.ParseException;
+import org.apache.http.HeaderElement;
 import org.apache.http.client.HttpClient;
 import org.apache.http.client.methods.HttpGet;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
 
 import java.io.ByteArrayInputStream;
-import java.io.UnsupportedEncodingException;
-import java.net.URLEncoder;
 import java.nio.charset.StandardCharsets;
 
 import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.times;
 
 /**
  * Test class to hold all WasbRemoteCallHelper tests
  */
 public class TestWasbRemoteCallHelper
     extends AbstractWasbTestBase {
+  public static final String EMPTY_STRING = "";
+  private static final int INVALID_HTTP_STATUS_CODE_999 = 999;
 
   @Override
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
     Configuration conf = new Configuration();
     conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
-    conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URL, "http://localhost/");
+    conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost1/,http://localhost2/");
     return AzureBlobStorageTestAccount.create(conf);
   }
 
@@ -80,7 +94,7 @@ public class TestWasbRemoteCallHelper
     HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
     HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
     Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
-    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(999));
+    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(INVALID_HTTP_STATUS_CODE_999));
     // finished setting up mocks
 
     performop(mockHttpClient);
@@ -99,7 +113,7 @@ public class TestWasbRemoteCallHelper
     HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
     HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
     Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
-    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(200));
+    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
     Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
         .thenReturn(newHeader("Content-Type", "text/plain"));
     // finished setting up mocks
@@ -120,7 +134,7 @@ public class TestWasbRemoteCallHelper
     HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
     HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
     Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
-    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(200));
+    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
     Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
         .thenReturn(newHeader("Content-Type", "application/json"));
     // finished setting up mocks
@@ -141,7 +155,7 @@ public class TestWasbRemoteCallHelper
     HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
     HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
     Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
-    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(200));
+    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
     Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
         .thenReturn(newHeader("Content-Type", "application/json"));
     Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
@@ -164,7 +178,7 @@ public class TestWasbRemoteCallHelper
     HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
     HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
     Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
-    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(200));
+    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
     Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
         .thenReturn(newHeader("Content-Type", "application/json"));
     Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
@@ -188,7 +202,7 @@ public class TestWasbRemoteCallHelper
     HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
 
     Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
-    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(200));
+    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
     Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
         .thenReturn(newHeader("Content-Type", "application/json"));
     Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
@@ -220,7 +234,7 @@ public class TestWasbRemoteCallHelper
     HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
 
     Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
-    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(200));
+    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
     Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
         .thenReturn(newHeader("Content-Type", "application/json"));
     Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
@@ -250,7 +264,7 @@ public class TestWasbRemoteCallHelper
     HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
 
     Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
-    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(200));
+    Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
     Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
         .thenReturn(newHeader("Content-Type", "application/json"));
     Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
@@ -263,17 +277,155 @@ public class TestWasbRemoteCallHelper
     performop(mockHttpClient);
   }
 
-  private void setupExpectations() throws UnsupportedEncodingException {
+  @Test
+  public void testWhenOneInstanceIsDown() throws Throwable {
+
+    // set up mocks
+    HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+    HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
+
+    HttpResponse mockHttpResponseService1 = Mockito.mock(HttpResponse.class);
+    Mockito.when(mockHttpResponseService1.getStatusLine())
+        .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
+    Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Type"))
+        .thenReturn(newHeader("Content-Type", "application/json"));
+    Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Length"))
+        .thenReturn(newHeader("Content-Length", "1024"));
+    Mockito.when(mockHttpResponseService1.getEntity())
+        .thenReturn(mockHttpEntity);
+
+    HttpResponse mockHttpResponseService2 = Mockito.mock(HttpResponse.class);
+    Mockito.when(mockHttpResponseService2.getStatusLine())
+        .thenReturn(newStatusLine(HttpStatus.SC_OK));
+    Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Type"))
+        .thenReturn(newHeader("Content-Type", "application/json"));
+    Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Length"))
+        .thenReturn(newHeader("Content-Length", "1024"));
+    Mockito.when(mockHttpResponseService2.getEntity())
+        .thenReturn(mockHttpEntity);
+
+    class HttpGetForService1 extends ArgumentMatcher<HttpGet>{
+      @Override public boolean matches(Object o) {
+        return checkHttpGetMatchHost((HttpGet) o, "localhost1");
+      }
+    }
+    class HttpGetForService2 extends ArgumentMatcher<HttpGet>{
+      @Override public boolean matches(Object o) {
+        return checkHttpGetMatchHost((HttpGet) o, "localhost2");
+      }
+    }
+    Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1())))
+        .thenReturn(mockHttpResponseService1);
+    Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2())))
+        .thenReturn(mockHttpResponseService2);
+
+    //Need 3 times because performop()  does 3 fs operations.
+    Mockito.when(mockHttpEntity.getContent())
+        .thenReturn(new ByteArrayInputStream(validJsonResponse()
+            .getBytes(StandardCharsets.UTF_8)))
+        .thenReturn(new ByteArrayInputStream(validJsonResponse()
+            .getBytes(StandardCharsets.UTF_8)))
+        .thenReturn(new ByteArrayInputStream(validJsonResponse()
+            .getBytes(StandardCharsets.UTF_8)));
+    // finished setting up mocks
+
+    performop(mockHttpClient);
+
+    Mockito.verify(mockHttpClient, times(3)).execute(Mockito.argThat(new HttpGetForService2()));
+  }
+
+  @Test
+  public void testWhenServiceInstancesAreDown() throws Throwable {
+    //expectedEx.expect(WasbAuthorizationException.class);
+    // set up mocks
+    HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+    HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
+
+    HttpResponse mockHttpResponseService1 = Mockito.mock(HttpResponse.class);
+    Mockito.when(mockHttpResponseService1.getStatusLine())
+        .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
+    Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Type"))
+        .thenReturn(newHeader("Content-Type", "application/json"));
+    Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Length"))
+        .thenReturn(newHeader("Content-Length", "1024"));
+    Mockito.when(mockHttpResponseService1.getEntity())
+        .thenReturn(mockHttpEntity);
+
+    HttpResponse mockHttpResponseService2 = Mockito.mock(HttpResponse.class);
+    Mockito.when(mockHttpResponseService2.getStatusLine())
+        .thenReturn(newStatusLine(
+        HttpStatus.SC_INTERNAL_SERVER_ERROR));
+    Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Type"))
+        .thenReturn(newHeader("Content-Type", "application/json"));
+    Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Length"))
+        .thenReturn(newHeader("Content-Length", "1024"));
+    Mockito.when(mockHttpResponseService2.getEntity())
+        .thenReturn(mockHttpEntity);
+
+    class HttpGetForService1 extends ArgumentMatcher<HttpGet>{
+      @Override public boolean matches(Object o) {
+        return checkHttpGetMatchHost((HttpGet) o, "localhost1");
+      }
+    }
+    class HttpGetForService2 extends ArgumentMatcher<HttpGet>{
+      @Override public boolean matches(Object o) {
+        return checkHttpGetMatchHost((HttpGet) o, "localhost2");
+      }
+    }
+    Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1())))
+        .thenReturn(mockHttpResponseService1);
+    Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2())))
+        .thenReturn(mockHttpResponseService2);
 
-    String path = new Path("/").makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
-    String pathEncoded = URLEncoder.encode(path, "UTF-8");
+    //Need 3 times because performop()  does 3 fs operations.
+    Mockito.when(mockHttpEntity.getContent())
+        .thenReturn(new ByteArrayInputStream(
+            validJsonResponse().getBytes(StandardCharsets.UTF_8)))
+        .thenReturn(new ByteArrayInputStream(
+            validJsonResponse().getBytes(StandardCharsets.UTF_8)))
+        .thenReturn(new ByteArrayInputStream(
+            validJsonResponse().getBytes(StandardCharsets.UTF_8)));
+    // finished setting up mocks
+    try {
+      performop(mockHttpClient);
+    }catch (WasbAuthorizationException e){
+      e.printStackTrace();
+      Mockito.verify(mockHttpClient, atLeast(3))
+          .execute(argThat(new HttpGetForService1()));
+      Mockito.verify(mockHttpClient, atLeast(3))
+          .execute(argThat(new HttpGetForService2()));
+      Mockito.verify(mockHttpClient, times(7)).execute(Mockito.<HttpGet>any());
+    }
+  }
 
-    String requestURI = String.format("http://localhost/CHECK_AUTHORIZATION?wasb_absolute_path=%s&operation_type=write", pathEncoded);
+  private void setupExpectations() {
     expectedEx.expect(WasbAuthorizationException.class);
-    expectedEx.expectMessage("org.apache.hadoop.fs.azure.WasbRemoteCallException: "
-        + requestURI
-        + ":Encountered IOException while making remote call"
-    );
+
+    class MatchesPattern extends TypeSafeMatcher<String> {
+      private String pattern;
+
+      MatchesPattern(String pattern) {
+        this.pattern = pattern;
+      }
+
+      @Override protected boolean matchesSafely(String item) {
+        return item.matches(pattern);
+      }
+
+      @Override public void describeTo(Description description) {
+        description.appendText("matches pattern ").appendValue(pattern);
+      }
+
+      @Override protected void describeMismatchSafely(String item,
+          Description mismatchDescription) {
+        mismatchDescription.appendText("does not match");
+      }
+    }
+
+    expectedEx.expectMessage(new MatchesPattern(
+        "org\\.apache\\.hadoop\\.fs\\.azure\\.WasbRemoteCallException: "
+            + "Encountered error while making remote call to "
+            + "http:\\/\\/localhost1\\/,http:\\/\\/localhost2\\/ retried 6 time\\(s\\)\\."));
   }
 
   private void performop(HttpClient mockHttpClient) throws Throwable {
@@ -282,7 +434,10 @@ public class TestWasbRemoteCallHelper
 
     RemoteWasbAuthorizerImpl authorizer = new RemoteWasbAuthorizerImpl();
     authorizer.init(fs.getConf());
-    WasbRemoteCallHelper mockWasbRemoteCallHelper = new WasbRemoteCallHelper();
+    WasbRemoteCallHelper mockWasbRemoteCallHelper = new WasbRemoteCallHelper(
+        RetryUtils.getMultipleLinearRandomRetry(new Configuration(),
+            EMPTY_STRING, true,
+            EMPTY_STRING, "1000,3,10000,2"));
     mockWasbRemoteCallHelper.updateHttpClient(mockHttpClient);
     authorizer.updateWasbRemoteCallHelper(mockWasbRemoteCallHelper);
     fs.updateWasbAuthorizer(authorizer);
@@ -293,21 +448,26 @@ public class TestWasbRemoteCallHelper
   }
 
   private String validJsonResponse() {
-    return new String(
-        "{\"responseCode\": 0, \"authorizationResult\": true, \"responseMessage\": \"Authorized\"}"
-    );
+    return "{"
+        + "\"responseCode\": 0,"
+        + "\"authorizationResult\": true,"
+        + "\"responseMessage\": \"Authorized\""
+        + "}";
   }
 
   private String malformedJsonResponse() {
-    return new String(
-        "{\"responseCode\": 0, \"authorizationResult\": true, \"responseMessage\":"
-    );
+    return "{"
+        + "\"responseCode\": 0,"
+        + "\"authorizationResult\": true,"
+        + "\"responseMessage\":";
   }
 
   private String failureCodeJsonResponse() {
-    return new String(
-        "{\"responseCode\": 1, \"authorizationResult\": false, \"responseMessage\": \"Unauthorized\"}"
-    );
+    return "{"
+        + "\"responseCode\": 1,"
+        + "\"authorizationResult\": false,"
+        + "\"responseMessage\": \"Unauthorized\""
+        + "}";
   }
 
   private StatusLine newStatusLine(int statusCode) {
@@ -347,4 +507,10 @@ public class TestWasbRemoteCallHelper
       }
     };
   }
-}
\ No newline at end of file
+
+  /** Check that a HttpGet request is with given remote host. */
+  private static boolean checkHttpGetMatchHost(HttpGet g, String h) {
+    return g != null && g.getURI().getHost().equals(h);
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: HDFS-12078. Add time unit to the description of property dfs.namenode.stale.datanode.interval in hdfs-default.xml. Contributed by Weiwei Yang.

Posted by as...@apache.org.
HDFS-12078. Add time unit to the description of property dfs.namenode.stale.datanode.interval in hdfs-default.xml. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/186650d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/186650d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/186650d2

Branch: refs/heads/YARN-5972
Commit: 186650d21d482e2f3bc4523ae989ebe76081b0e3
Parents: b0560e0
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jul 4 14:51:52 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jul 4 14:51:52 2017 +0900

----------------------------------------------------------------------
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml              | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/186650d2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 96c04f0..4caee9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1811,8 +1811,8 @@
   <name>dfs.namenode.stale.datanode.interval</name>
   <value>30000</value>
   <description>
-    Default time interval for marking a datanode as "stale", i.e., if 
-    the namenode has not received heartbeat msg from a datanode for 
+    Default time interval in milliseconds for marking a datanode as "stale",
+    i.e., if the namenode has not received heartbeat msg from a datanode for
     more than this time interval, the datanode will be marked and treated 
     as "stale" by default. The stale interval cannot be too small since 
     otherwise this may cause too frequent change of stale states. 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: YARN-6752. Display reserved resources in web UI per application (Contributed by Abdullah Yousufi via Daniel Templeton)

Posted by as...@apache.org.
YARN-6752. Display reserved resources in web UI per application
(Contributed by Abdullah Yousufi via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06159858
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06159858
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06159858

Branch: refs/heads/YARN-5972
Commit: 06159858868a9cbeefc31bc6be5ae48a59cc8a6c
Parents: f484a6f
Author: Daniel Templeton <da...@dt-MBP15.local>
Authored: Sun Jul 9 19:02:31 2017 +0900
Committer: Daniel Templeton <da...@dt-MBP15.local>
Committed: Sun Jul 9 19:02:31 2017 +0900

----------------------------------------------------------------------
 .../apache/hadoop/yarn/server/webapp/dao/AppInfo.java | 14 ++++++++++++++
 .../webapp/FairSchedulerAppsBlock.java                |  6 ++++++
 .../server/resourcemanager/webapp/RMAppsBlock.java    |  8 ++++++++
 .../server/resourcemanager/webapp/dao/AppInfo.java    | 13 +++++++++++++
 .../resourcemanager/webapp/TestRMWebServicesApps.java |  2 +-
 5 files changed, 42 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06159858/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
index ff4519d..ac2f8da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
@@ -61,6 +61,8 @@ public class AppInfo {
   protected int priority;
   private long allocatedCpuVcores;
   private long allocatedMemoryMB;
+  private long reservedCpuVcores;
+  private long reservedMemoryMB;
   protected boolean unmanagedApplication;
   private String appNodeLabelExpression;
   private String amNodeLabelExpression;
@@ -101,6 +103,10 @@ public class AppInfo {
             .getUsedResources().getVirtualCores();
         allocatedMemoryMB = app.getApplicationResourceUsageReport()
             .getUsedResources().getMemorySize();
+        reservedCpuVcores = app.getApplicationResourceUsageReport()
+            .getReservedResources().getVirtualCores();
+        reservedMemoryMB = app.getApplicationResourceUsageReport()
+            .getReservedResources().getMemorySize();
       }
     }
     progress = app.getProgress() * 100; // in percent
@@ -160,6 +166,14 @@ public class AppInfo {
     return allocatedMemoryMB;
   }
 
+  public long getReservedCpuVcores() {
+    return reservedCpuVcores;
+  }
+
+  public long getReservedMemoryMB() {
+    return reservedMemoryMB;
+  }
+
   public float getProgress() {
     return progress;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06159858/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
index 41b5fe7..b7a7a93 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
@@ -95,6 +95,8 @@ public class FairSchedulerAppsBlock extends HtmlBlock {
             th(".runningcontainer", "Running Containers").
             th(".allocatedCpu", "Allocated CPU VCores").
             th(".allocatedMemory", "Allocated Memory MB").
+            th(".reservedCpu", "Reserved CPU VCores").
+            th(".reservedMemory", "Reserved Memory MB").
             th(".progress", "Progress").
             th(".ui", "Tracking UI")._()._().
         tbody();
@@ -142,6 +144,10 @@ public class FairSchedulerAppsBlock extends HtmlBlock {
         .valueOf(appInfo.getAllocatedVCores())).append("\",\"")
       .append(appInfo.getAllocatedMB() == -1 ? "N/A" : String
         .valueOf(appInfo.getAllocatedMB())).append("\",\"")
+      .append(appInfo.getReservedVCores() == -1 ? "N/A" : String
+        .valueOf(appInfo.getReservedVCores())).append("\",\"")
+      .append(appInfo.getReservedMB() == -1 ? "N/A" : String
+        .valueOf(appInfo.getReservedMB())).append("\",\"")
       // Progress bar
       .append("<br title='").append(percent)
       .append("'> <div class='").append(C_PROGRESSBAR).append("' title='")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06159858/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
index 6a18296..61674d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
@@ -66,6 +66,8 @@ public class RMAppsBlock extends AppsBlock {
           .th(".runningcontainer", "Running Containers")
           .th(".allocatedCpu", "Allocated CPU VCores")
           .th(".allocatedMemory", "Allocated Memory MB")
+          .th(".reservedCpu", "Reserved CPU VCores")
+          .th(".reservedMemory", "Reserved Memory MB")
           .th(".queuePercentage", "% of Queue")
           .th(".clusterPercentage", "% of Cluster")
           .th(".progress", "Progress")
@@ -146,6 +148,12 @@ public class RMAppsBlock extends AppsBlock {
         .append(app.getAllocatedMemoryMB() == -1 ? "N/A" :
             String.valueOf(app.getAllocatedMemoryMB()))
         .append("\",\"")
+        .append(app.getReservedCpuVcores() == -1 ? "N/A" : String
+            .valueOf(app.getReservedCpuVcores()))
+        .append("\",\"")
+        .append(app.getReservedMemoryMB() == -1 ? "N/A" :
+            String.valueOf(app.getReservedMemoryMB()))
+        .append("\",\"")
         .append(queuePercent)
         .append("\",\"")
         .append(clusterPercent)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06159858/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index 45ecced..f11939a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -93,6 +93,8 @@ public class AppInfo {
   private String amRPCAddress;
   protected long allocatedMB;
   protected long allocatedVCores;
+  protected long reservedMB;
+  protected long reservedVCores;
   protected int runningContainers;
   protected long memorySeconds;
   protected long vcoreSeconds;
@@ -196,8 +198,11 @@ public class AppInfo {
               .getApplicationResourceUsageReport();
           if (resourceReport != null) {
             Resource usedResources = resourceReport.getUsedResources();
+            Resource reservedResources = resourceReport.getReservedResources();
             allocatedMB = usedResources.getMemorySize();
             allocatedVCores = usedResources.getVirtualCores();
+            reservedMB = reservedResources.getMemorySize();
+            reservedVCores = reservedResources.getVirtualCores();
             runningContainers = resourceReport.getNumUsedContainers();
             queueUsagePercentage = resourceReport.getQueueUsagePercentage();
             clusterUsagePercentage = resourceReport.getClusterUsagePercentage();
@@ -404,6 +409,14 @@ public class AppInfo {
     return this.allocatedVCores;
   }
   
+  public long getReservedMB() {
+    return this.reservedMB;
+  }
+
+  public long getReservedVCores() {
+    return this.reservedVCores;
+  }
+
   public long getPreemptedMB() {
     return preemptedResourceMB;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06159858/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 056f1dd..1cbdec3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -1528,7 +1528,7 @@ public class TestRMWebServicesApps extends JerseyTestBase {
   public void verifyAppInfo(JSONObject info, RMApp app, boolean hasResourceReqs)
       throws JSONException, Exception {
 
-    int expectedNumberOfElements = 34 + (hasResourceReqs ? 2 : 0);
+    int expectedNumberOfElements = 36 + (hasResourceReqs ? 2 : 0);
     String appNodeLabelExpression = null;
     String amNodeLabelExpression = null;
     if (app.getApplicationSubmissionContext()


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: YARN-6776. Refactor ApplicaitonMasterService to move actual processing logic to a separate class. (asuresh)

Posted by as...@apache.org.
YARN-6776. Refactor ApplicaitonMasterService to move actual processing logic to a separate class. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5496a34c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5496a34c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5496a34c

Branch: refs/heads/YARN-5972
Commit: 5496a34c0cb2b1a83cfa6b0aba5a77b05ff2d8f0
Parents: 09653ea
Author: Arun Suresh <as...@apache.org>
Authored: Mon Jul 10 14:34:58 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Mon Jul 10 14:34:58 2017 -0700

----------------------------------------------------------------------
 .../ams/ApplicationMasterServiceProcessor.java  |  71 +++
 .../yarn/ams/ApplicationMasterServiceUtils.java |  89 ++++
 .../apache/hadoop/yarn/ams/package-info.java    |  24 +
 .../ApplicationMasterService.java               | 425 +----------------
 .../resourcemanager/DefaultAMSProcessor.java    | 455 +++++++++++++++++++
 ...pportunisticContainerAllocatorAMService.java | 163 +++----
 ...pportunisticContainerAllocatorAMService.java |   8 +
 7 files changed, 754 insertions(+), 481 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5496a34c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
new file mode 100644
index 0000000..b426f48
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.ams;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords
+    .FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords
+    .RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+import java.io.IOException;
+
+/**
+ * Interface to abstract out the the actual processing logic of the
+ * Application Master Service.
+ */
+public interface ApplicationMasterServiceProcessor {
+
+  /**
+   * Register AM attempt.
+   * @param applicationAttemptId applicationAttemptId.
+   * @param request Register Request.
+   * @return Register Response.
+   * @throws IOException IOException.
+   */
+  RegisterApplicationMasterResponse registerApplicationMaster(
+      ApplicationAttemptId applicationAttemptId,
+      RegisterApplicationMasterRequest request) throws IOException;
+
+  /**
+   * Allocate call.
+   * @param appAttemptId appAttemptId.
+   * @param request Allocate Request.
+   * @return Allocate Response.
+   * @throws YarnException YarnException.
+   */
+  AllocateResponse allocate(ApplicationAttemptId appAttemptId,
+      AllocateRequest request) throws YarnException;
+
+  /**
+   * Finish AM.
+   * @param applicationAttemptId applicationAttemptId.
+   * @param request Finish AM Request.
+   * @return Finish AM response.
+   */
+  FinishApplicationMasterResponse finishApplicationMaster(
+      ApplicationAttemptId applicationAttemptId,
+      FinishApplicationMasterRequest request);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5496a34c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
new file mode 100644
index 0000000..476da8b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.ams;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
+import org.apache.hadoop.yarn.api.records.UpdateContainerError;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Utility methods to be used by {@link ApplicationMasterServiceProcessor}.
+ */
+public final class ApplicationMasterServiceUtils {
+
+  private ApplicationMasterServiceUtils() { }
+
+  /**
+   * Add update container errors to {@link AllocateResponse}.
+   * @param allocateResponse Allocate Response.
+   * @param updateContainerErrors Errors.
+   */
+  public static void addToUpdateContainerErrors(
+      AllocateResponse allocateResponse,
+      List<UpdateContainerError> updateContainerErrors) {
+    if (!updateContainerErrors.isEmpty()) {
+      if (allocateResponse.getUpdateErrors() != null
+          && !allocateResponse.getUpdateErrors().isEmpty()) {
+        updateContainerErrors.addAll(allocateResponse.getUpdateErrors());
+      }
+      allocateResponse.setUpdateErrors(updateContainerErrors);
+    }
+  }
+
+  /**
+   * Add updated containers to {@link AllocateResponse}.
+   * @param allocateResponse Allocate Response.
+   * @param updateType Update Type.
+   * @param updatedContainers Updated Containers.
+   */
+  public static void addToUpdatedContainers(AllocateResponse allocateResponse,
+      ContainerUpdateType updateType, List<Container> updatedContainers) {
+    if (updatedContainers != null && updatedContainers.size() > 0) {
+      ArrayList<UpdatedContainer> containersToSet = new ArrayList<>();
+      if (allocateResponse.getUpdatedContainers() != null &&
+          !allocateResponse.getUpdatedContainers().isEmpty()) {
+        containersToSet.addAll(allocateResponse.getUpdatedContainers());
+      }
+      for (Container updatedContainer : updatedContainers) {
+        containersToSet.add(
+            UpdatedContainer.newInstance(updateType, updatedContainer));
+      }
+      allocateResponse.setUpdatedContainers(containersToSet);
+    }
+  }
+
+  /**
+   * Add allocated containers to {@link AllocateResponse}.
+   * @param allocateResponse Allocate Response.
+   * @param allocatedContainers Allocated Containers.
+   */
+  public static void addToAllocatedContainers(AllocateResponse allocateResponse,
+      List<Container> allocatedContainers) {
+    if (allocateResponse.getAllocatedContainers() != null
+        && !allocateResponse.getAllocatedContainers().isEmpty()) {
+      allocatedContainers.addAll(allocateResponse.getAllocatedContainers());
+    }
+    allocateResponse.setAllocatedContainers(allocatedContainers);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5496a34c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/package-info.java
new file mode 100644
index 0000000..b23534e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/package-info.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Public api for Application Master Service interceptors.
+ */
+@InterfaceAudience.Public
+package org.apache.hadoop.yarn.ams;
+import org.apache.hadoop.classification.InterfaceAudience;
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5496a34c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index 55b8fbb..fe8b83c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -21,12 +21,8 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
 import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
@@ -37,10 +33,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.security.SaslRpcServer;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@@ -52,30 +48,11 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
-import org.apache.hadoop.yarn.api.records.NMToken;
-import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.api.records.NodeReport;
-import org.apache.hadoop.yarn.api.records.PreemptionContainer;
-import org.apache.hadoop.yarn.api.records.PreemptionContract;
-import org.apache.hadoop.yarn.api.records.PreemptionMessage;
-import org.apache.hadoop.yarn.api.records.PreemptionResourceRequest;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
-import org.apache.hadoop.yarn.api.records.UpdateContainerError;
-import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
 import org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
-import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
-import org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException;
-import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -86,23 +63,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptStatusupdateEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUnregistrationEvent;
-
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler
-    .AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.security
     .AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
-import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
@@ -122,6 +88,12 @@ public class ApplicationMasterService extends AbstractService implements
   private final ConcurrentMap<ApplicationAttemptId, AllocateResponseLock> responseMap =
       new ConcurrentHashMap<ApplicationAttemptId, AllocateResponseLock>();
   protected final RMContext rmContext;
+  private final ApplicationMasterServiceProcessor amsProcessor;
+
+  public ApplicationMasterService(RMContext rmContext,
+      YarnScheduler scheduler) {
+    this(ApplicationMasterService.class.getName(), rmContext, scheduler);
+  }
 
   public ApplicationMasterService(String name, RMContext rmContext,
       YarnScheduler scheduler) {
@@ -129,11 +101,11 @@ public class ApplicationMasterService extends AbstractService implements
     this.amLivelinessMonitor = rmContext.getAMLivelinessMonitor();
     this.rScheduler = scheduler;
     this.rmContext = rmContext;
+    this.amsProcessor = createProcessor();
   }
 
-  public ApplicationMasterService(RMContext rmContext,
-      YarnScheduler scheduler) {
-    this(ApplicationMasterService.class.getName(), rmContext, scheduler);
+  protected ApplicationMasterServiceProcessor createProcessor() {
+    return new DefaultAMSProcessor(rmContext, rScheduler);
   }
 
   @Override
@@ -228,82 +200,22 @@ public class ApplicationMasterService extends AbstractService implements
                 + appID;
         LOG.warn(message);
         RMAuditLogger.logFailure(
-          this.rmContext.getRMApps()
-            .get(appID).getUser(),
-          AuditConstants.REGISTER_AM, "", "ApplicationMasterService", message,
-          appID, applicationAttemptId);
+            this.rmContext.getRMApps()
+                .get(appID).getUser(),
+            AuditConstants.REGISTER_AM, "", "ApplicationMasterService", message,
+            appID, applicationAttemptId);
         throw new InvalidApplicationMasterRequestException(message);
       }
-      
+
       this.amLivelinessMonitor.receivedPing(applicationAttemptId);
-      RMApp app = this.rmContext.getRMApps().get(appID);
-      
+
       // Setting the response id to 0 to identify if the
       // application master is register for the respective attemptid
       lastResponse.setResponseId(0);
       lock.setAllocateResponse(lastResponse);
-      LOG.info("AM registration " + applicationAttemptId);
-      this.rmContext
-        .getDispatcher()
-        .getEventHandler()
-        .handle(
-          new RMAppAttemptRegistrationEvent(applicationAttemptId, request
-            .getHost(), request.getRpcPort(), request.getTrackingUrl()));
-      RMAuditLogger.logSuccess(app.getUser(), AuditConstants.REGISTER_AM,
-        "ApplicationMasterService", appID, applicationAttemptId);
-
-      // Pick up min/max resource from scheduler...
-      RegisterApplicationMasterResponse response = recordFactory
-          .newRecordInstance(RegisterApplicationMasterResponse.class);
-      response.setMaximumResourceCapability(rScheduler
-          .getMaximumResourceCapability(app.getQueue()));
-      response.setApplicationACLs(app.getRMAppAttempt(applicationAttemptId)
-          .getSubmissionContext().getAMContainerSpec().getApplicationACLs());
-      response.setQueue(app.getQueue());
-      if (UserGroupInformation.isSecurityEnabled()) {
-        LOG.info("Setting client token master key");
-        response.setClientToAMTokenMasterKey(java.nio.ByteBuffer.wrap(rmContext
-            .getClientToAMTokenSecretManager()
-            .getMasterKey(applicationAttemptId).getEncoded()));        
-      }
-
-      // For work-preserving AM restart, retrieve previous attempts' containers
-      // and corresponding NM tokens.
-      if (app.getApplicationSubmissionContext()
-          .getKeepContainersAcrossApplicationAttempts()) {
-        List<Container> transferredContainers = rScheduler
-            .getTransferredContainers(applicationAttemptId);
-        if (!transferredContainers.isEmpty()) {
-          response.setContainersFromPreviousAttempts(transferredContainers);
-          List<NMToken> nmTokens = new ArrayList<NMToken>();
-          for (Container container : transferredContainers) {
-            try {
-              NMToken token = rmContext.getNMTokenSecretManager()
-                  .createAndGetNMToken(app.getUser(), applicationAttemptId,
-                      container);
-              if (null != token) {
-                nmTokens.add(token);
-              }
-            } catch (IllegalArgumentException e) {
-              // if it's a DNS issue, throw UnknowHostException directly and
-              // that
-              // will be automatically retried by RMProxy in RPC layer.
-              if (e.getCause() instanceof UnknownHostException) {
-                throw (UnknownHostException) e.getCause();
-              }
-            }
-          }
-          response.setNMTokensFromPreviousAttempts(nmTokens);
-          LOG.info("Application " + appID + " retrieved "
-              + transferredContainers.size() + " containers from previous"
-              + " attempts and " + nmTokens.size() + " NM tokens.");
-        }
-      }
 
-      response.setSchedulerResourceTypes(rScheduler
-        .getSchedulingResourceTypes());
-
-      return response;
+      return this.amsProcessor.registerApplicationMaster(
+          amrmTokenIdentifier.getApplicationAttemptId(), request);
     }
   }
 
@@ -353,15 +265,8 @@ public class ApplicationMasterService extends AbstractService implements
       }
 
       this.amLivelinessMonitor.receivedPing(applicationAttemptId);
-
-      rmContext.getDispatcher().getEventHandler().handle(
-          new RMAppAttemptUnregistrationEvent(applicationAttemptId, request
-              .getTrackingUrl(), request.getFinalApplicationStatus(), request
-              .getDiagnostics()));
-
-      // For UnmanagedAMs, return true so they don't retry
-      return FinishApplicationMasterResponse.newInstance(
-          rmApp.getApplicationSubmissionContext().getUnmanagedAM());
+      return this.amsProcessor.finishApplicationMaster(
+          applicationAttemptId, request);
     }
   }
 
@@ -441,10 +346,8 @@ public class ApplicationMasterService extends AbstractService implements
         throw new InvalidApplicationMasterRequestException(message);
       }
 
-      AllocateResponse response =
-          recordFactory.newRecordInstance(AllocateResponse.class);
-      allocateInternal(amrmTokenIdentifier.getApplicationAttemptId(),
-          request, response);
+      AllocateResponse response = this.amsProcessor.allocate(
+          amrmTokenIdentifier.getApplicationAttemptId(), request);
 
       // update AMRMToken if the token is rolled-up
       MasterKeyData nextMasterKey =
@@ -480,291 +383,7 @@ public class ApplicationMasterService extends AbstractService implements
       response.setResponseId(lastResponse.getResponseId() + 1);
       lock.setAllocateResponse(response);
       return response;
-    }    
-  }
-
-  protected void allocateInternal(ApplicationAttemptId appAttemptId,
-      AllocateRequest request, AllocateResponse allocateResponse)
-      throws YarnException {
-
-    //filter illegal progress values
-    float filteredProgress = request.getProgress();
-    if (Float.isNaN(filteredProgress) ||
-        filteredProgress == Float.NEGATIVE_INFINITY ||
-        filteredProgress < 0) {
-      request.setProgress(0);
-    } else if (filteredProgress > 1 ||
-        filteredProgress == Float.POSITIVE_INFINITY) {
-      request.setProgress(1);
-    }
-
-    // Send the status update to the appAttempt.
-    this.rmContext.getDispatcher().getEventHandler().handle(
-        new RMAppAttemptStatusupdateEvent(appAttemptId, request
-            .getProgress()));
-
-    List<ResourceRequest> ask = request.getAskList();
-    List<ContainerId> release = request.getReleaseList();
-
-    ResourceBlacklistRequest blacklistRequest =
-        request.getResourceBlacklistRequest();
-    List<String> blacklistAdditions =
-        (blacklistRequest != null) ?
-            blacklistRequest.getBlacklistAdditions() : Collections.EMPTY_LIST;
-    List<String> blacklistRemovals =
-        (blacklistRequest != null) ?
-            blacklistRequest.getBlacklistRemovals() : Collections.EMPTY_LIST;
-    RMApp app =
-        this.rmContext.getRMApps().get(appAttemptId.getApplicationId());
-
-    // set label expression for Resource Requests if resourceName=ANY
-    ApplicationSubmissionContext asc = app.getApplicationSubmissionContext();
-    for (ResourceRequest req : ask) {
-      if (null == req.getNodeLabelExpression()
-          && ResourceRequest.ANY.equals(req.getResourceName())) {
-        req.setNodeLabelExpression(asc.getNodeLabelExpression());
-      }
-    }
-
-    Resource maximumCapacity = rScheduler.getMaximumResourceCapability();
-
-    // sanity check
-    try {
-      RMServerUtils.normalizeAndValidateRequests(ask,
-          maximumCapacity, app.getQueue(),
-          rScheduler, rmContext);
-    } catch (InvalidResourceRequestException e) {
-      LOG.warn("Invalid resource ask by application " + appAttemptId, e);
-      throw e;
-    }
-
-    try {
-      RMServerUtils.validateBlacklistRequest(blacklistRequest);
-    }  catch (InvalidResourceBlacklistRequestException e) {
-      LOG.warn("Invalid blacklist request by application " + appAttemptId, e);
-      throw e;
-    }
-
-    // In the case of work-preserving AM restart, it's possible for the
-    // AM to release containers from the earlier attempt.
-    if (!app.getApplicationSubmissionContext()
-        .getKeepContainersAcrossApplicationAttempts()) {
-      try {
-        RMServerUtils.validateContainerReleaseRequest(release, appAttemptId);
-      } catch (InvalidContainerReleaseException e) {
-        LOG.warn("Invalid container release by application " + appAttemptId,
-            e);
-        throw e;
-      }
-    }
-
-    // Split Update Resource Requests into increase and decrease.
-    // No Exceptions are thrown here. All update errors are aggregated
-    // and returned to the AM.
-    List<UpdateContainerError> updateErrors = new ArrayList<>();
-    ContainerUpdates containerUpdateRequests =
-        RMServerUtils.validateAndSplitUpdateResourceRequests(
-        rmContext, request, maximumCapacity, updateErrors);
-
-    // Send new requests to appAttempt.
-    Allocation allocation;
-    RMAppAttemptState state =
-        app.getRMAppAttempt(appAttemptId).getAppAttemptState();
-    if (state.equals(RMAppAttemptState.FINAL_SAVING) ||
-        state.equals(RMAppAttemptState.FINISHING) ||
-        app.isAppFinalStateStored()) {
-      LOG.warn(appAttemptId + " is in " + state +
-               " state, ignore container allocate request.");
-      allocation = EMPTY_ALLOCATION;
-    } else {
-      allocation =
-          this.rScheduler.allocate(appAttemptId, ask, release,
-              blacklistAdditions, blacklistRemovals,
-              containerUpdateRequests);
-    }
-
-    if (!blacklistAdditions.isEmpty() || !blacklistRemovals.isEmpty()) {
-      LOG.info("blacklist are updated in Scheduler." +
-          "blacklistAdditions: " + blacklistAdditions + ", " +
-          "blacklistRemovals: " + blacklistRemovals);
-    }
-    RMAppAttempt appAttempt = app.getRMAppAttempt(appAttemptId);
-
-    if (allocation.getNMTokens() != null &&
-        !allocation.getNMTokens().isEmpty()) {
-      allocateResponse.setNMTokens(allocation.getNMTokens());
     }
-
-    // Notify the AM of container update errors
-    addToUpdateContainerErrors(allocateResponse, updateErrors);
-
-    // update the response with the deltas of node status changes
-    List<RMNode> updatedNodes = new ArrayList<RMNode>();
-    if(app.pullRMNodeUpdates(updatedNodes) > 0) {
-      List<NodeReport> updatedNodeReports = new ArrayList<NodeReport>();
-      for(RMNode rmNode: updatedNodes) {
-        SchedulerNodeReport schedulerNodeReport =
-            rScheduler.getNodeReport(rmNode.getNodeID());
-        Resource used = BuilderUtils.newResource(0, 0);
-        int numContainers = 0;
-        if (schedulerNodeReport != null) {
-          used = schedulerNodeReport.getUsedResource();
-          numContainers = schedulerNodeReport.getNumContainers();
-        }
-        NodeId nodeId = rmNode.getNodeID();
-        NodeReport report =
-            BuilderUtils.newNodeReport(nodeId, rmNode.getState(),
-                rmNode.getHttpAddress(), rmNode.getRackName(), used,
-                rmNode.getTotalCapability(), numContainers,
-                rmNode.getHealthReport(), rmNode.getLastHealthReportTime(),
-                rmNode.getNodeLabels());
-
-        updatedNodeReports.add(report);
-      }
-      allocateResponse.setUpdatedNodes(updatedNodeReports);
-    }
-
-    addToAllocatedContainers(allocateResponse, allocation.getContainers());
-
-    allocateResponse.setCompletedContainersStatuses(appAttempt
-        .pullJustFinishedContainers());
-    allocateResponse.setAvailableResources(allocation.getResourceLimit());
-
-    addToContainerUpdates(appAttemptId, allocateResponse, allocation);
-
-    allocateResponse.setNumClusterNodes(this.rScheduler.getNumClusterNodes());
-
-    // add collector address for this application
-    if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
-      allocateResponse.setCollectorAddr(
-          this.rmContext.getRMApps().get(appAttemptId.getApplicationId())
-              .getCollectorAddr());
-    }
-
-    // add preemption to the allocateResponse message (if any)
-    allocateResponse
-        .setPreemptionMessage(generatePreemptionMessage(allocation));
-
-    // Set application priority
-    allocateResponse.setApplicationPriority(app
-        .getApplicationPriority());
-  }
-
-  private void addToContainerUpdates(ApplicationAttemptId appAttemptId,
-      AllocateResponse allocateResponse, Allocation allocation) {
-    // Handling increased containers
-    addToUpdatedContainers(
-        allocateResponse, ContainerUpdateType.INCREASE_RESOURCE,
-        allocation.getIncreasedContainers());
-
-    // Handling decreased containers
-    addToUpdatedContainers(
-        allocateResponse, ContainerUpdateType.DECREASE_RESOURCE,
-        allocation.getDecreasedContainers());
-
-    // Handling promoted containers
-    addToUpdatedContainers(
-        allocateResponse, ContainerUpdateType.PROMOTE_EXECUTION_TYPE,
-        allocation.getPromotedContainers());
-
-    // Handling demoted containers
-    addToUpdatedContainers(
-        allocateResponse, ContainerUpdateType.DEMOTE_EXECUTION_TYPE,
-        allocation.getDemotedContainers());
-
-    addToUpdateContainerErrors(allocateResponse,
-        ((AbstractYarnScheduler)rScheduler)
-            .getApplicationAttempt(appAttemptId).pullUpdateContainerErrors());
-  }
-
-  protected void addToUpdateContainerErrors(AllocateResponse allocateResponse,
-      List<UpdateContainerError> updateContainerErrors) {
-    if (!updateContainerErrors.isEmpty()) {
-      if (allocateResponse.getUpdateErrors() != null
-          && !allocateResponse.getUpdateErrors().isEmpty()) {
-        updateContainerErrors = new ArrayList<>(updateContainerErrors);
-        updateContainerErrors.addAll(allocateResponse.getUpdateErrors());
-      }
-      allocateResponse.setUpdateErrors(updateContainerErrors);
-    }
-  }
-
-  protected void addToUpdatedContainers(AllocateResponse allocateResponse,
-      ContainerUpdateType updateType, List<Container> updatedContainers) {
-    if (updatedContainers != null && updatedContainers.size() > 0) {
-      ArrayList<UpdatedContainer> containersToSet = new ArrayList<>();
-      if (allocateResponse.getUpdatedContainers() != null &&
-          !allocateResponse.getUpdatedContainers().isEmpty()) {
-        containersToSet.addAll(allocateResponse.getUpdatedContainers());
-      }
-      for (Container updatedContainer : updatedContainers) {
-        containersToSet.add(
-            UpdatedContainer.newInstance(updateType, updatedContainer));
-      }
-      allocateResponse.setUpdatedContainers(containersToSet);
-    }
-  }
-
-  protected void addToAllocatedContainers(AllocateResponse allocateResponse,
-      List<Container> allocatedContainers) {
-    if (allocateResponse.getAllocatedContainers() != null
-        && !allocateResponse.getAllocatedContainers().isEmpty()) {
-      allocatedContainers = new ArrayList<>(allocatedContainers);
-      allocatedContainers.addAll(allocateResponse.getAllocatedContainers());
-    }
-    allocateResponse.setAllocatedContainers(allocatedContainers);
-  }
-
-  private PreemptionMessage generatePreemptionMessage(Allocation allocation){
-    PreemptionMessage pMsg = null;
-    // assemble strict preemption request
-    if (allocation.getStrictContainerPreemptions() != null) {
-       pMsg =
-        recordFactory.newRecordInstance(PreemptionMessage.class);
-      StrictPreemptionContract pStrict =
-          recordFactory.newRecordInstance(StrictPreemptionContract.class);
-      Set<PreemptionContainer> pCont = new HashSet<PreemptionContainer>();
-      for (ContainerId cId : allocation.getStrictContainerPreemptions()) {
-        PreemptionContainer pc =
-            recordFactory.newRecordInstance(PreemptionContainer.class);
-        pc.setId(cId);
-        pCont.add(pc);
-      }
-      pStrict.setContainers(pCont);
-      pMsg.setStrictContract(pStrict);
-    }
-
-    // assemble negotiable preemption request
-    if (allocation.getResourcePreemptions() != null &&
-        allocation.getResourcePreemptions().size() > 0 &&
-        allocation.getContainerPreemptions() != null &&
-        allocation.getContainerPreemptions().size() > 0) {
-      if (pMsg == null) {
-        pMsg =
-            recordFactory.newRecordInstance(PreemptionMessage.class);
-      }
-      PreemptionContract contract =
-          recordFactory.newRecordInstance(PreemptionContract.class);
-      Set<PreemptionContainer> pCont = new HashSet<PreemptionContainer>();
-      for (ContainerId cId : allocation.getContainerPreemptions()) {
-        PreemptionContainer pc =
-            recordFactory.newRecordInstance(PreemptionContainer.class);
-        pc.setId(cId);
-        pCont.add(pc);
-      }
-      List<PreemptionResourceRequest> pRes = new ArrayList<PreemptionResourceRequest>();
-      for (ResourceRequest crr : allocation.getResourcePreemptions()) {
-        PreemptionResourceRequest prr =
-            recordFactory.newRecordInstance(PreemptionResourceRequest.class);
-        prr.setResourceRequest(crr);
-        pRes.add(prr);
-      }
-      contract.setContainers(pCont);
-      contract.setResourceRequest(pRes);
-      pMsg.setContract(contract);
-    }
-    
-    return pMsg;
   }
 
   public void registerAppAttempt(ApplicationAttemptId attemptId) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5496a34c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
new file mode 100644
index 0000000..6eb1fba
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
@@ -0,0 +1,455 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceUtils;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
+import org.apache.hadoop.yarn.api.records.NMToken;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.PreemptionContainer;
+import org.apache.hadoop.yarn.api.records.PreemptionContract;
+import org.apache.hadoop.yarn.api.records.PreemptionMessage;
+import org.apache.hadoop.yarn.api.records.PreemptionResourceRequest;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
+import org.apache.hadoop.yarn.api.records.UpdateContainerError;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
+import org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException;
+import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt
+    .RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event
+    .RMAppAttemptStatusupdateEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event
+    .RMAppAttemptUnregistrationEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler
+    .AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler
+    .SchedulerNodeReport;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.resource.Resources;
+
+import java.io.IOException;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
+
+  private static final Log LOG = LogFactory.getLog(DefaultAMSProcessor.class);
+
+  private final static List<Container> EMPTY_CONTAINER_LIST =
+      new ArrayList<Container>();
+  protected static final Allocation EMPTY_ALLOCATION = new Allocation(
+      EMPTY_CONTAINER_LIST, Resources.createResource(0), null, null, null);
+
+  private final RecordFactory recordFactory =
+      RecordFactoryProvider.getRecordFactory(null);
+
+  private final RMContext rmContext;
+  private final YarnScheduler scheduler;
+
+  DefaultAMSProcessor(RMContext rmContext, YarnScheduler scheduler) {
+    this.rmContext = rmContext;
+    this.scheduler = scheduler;
+  }
+
+  public RegisterApplicationMasterResponse registerApplicationMaster(
+      ApplicationAttemptId applicationAttemptId,
+      RegisterApplicationMasterRequest request) throws IOException {
+
+    RMApp app = getRmContext().getRMApps().get(
+        applicationAttemptId.getApplicationId());
+    LOG.info("AM registration " + applicationAttemptId);
+    getRmContext().getDispatcher().getEventHandler()
+        .handle(
+            new RMAppAttemptRegistrationEvent(applicationAttemptId, request
+                .getHost(), request.getRpcPort(), request.getTrackingUrl()));
+    RMAuditLogger.logSuccess(app.getUser(),
+        RMAuditLogger.AuditConstants.REGISTER_AM,
+        "ApplicationMasterService", app.getApplicationId(),
+        applicationAttemptId);
+    RegisterApplicationMasterResponse response = recordFactory
+        .newRecordInstance(RegisterApplicationMasterResponse.class);
+    response.setMaximumResourceCapability(getScheduler()
+        .getMaximumResourceCapability(app.getQueue()));
+    response.setApplicationACLs(app.getRMAppAttempt(applicationAttemptId)
+        .getSubmissionContext().getAMContainerSpec().getApplicationACLs());
+    response.setQueue(app.getQueue());
+    if (UserGroupInformation.isSecurityEnabled()) {
+      LOG.info("Setting client token master key");
+      response.setClientToAMTokenMasterKey(java.nio.ByteBuffer.wrap(
+          getRmContext().getClientToAMTokenSecretManager()
+          .getMasterKey(applicationAttemptId).getEncoded()));
+    }
+
+    // For work-preserving AM restart, retrieve previous attempts' containers
+    // and corresponding NM tokens.
+    if (app.getApplicationSubmissionContext()
+        .getKeepContainersAcrossApplicationAttempts()) {
+      List<Container> transferredContainers = getScheduler()
+          .getTransferredContainers(applicationAttemptId);
+      if (!transferredContainers.isEmpty()) {
+        response.setContainersFromPreviousAttempts(transferredContainers);
+        List<NMToken> nmTokens = new ArrayList<NMToken>();
+        for (Container container : transferredContainers) {
+          try {
+            NMToken token = getRmContext().getNMTokenSecretManager()
+                .createAndGetNMToken(app.getUser(), applicationAttemptId,
+                    container);
+            if (null != token) {
+              nmTokens.add(token);
+            }
+          } catch (IllegalArgumentException e) {
+            // if it's a DNS issue, throw UnknowHostException directly and
+            // that
+            // will be automatically retried by RMProxy in RPC layer.
+            if (e.getCause() instanceof UnknownHostException) {
+              throw (UnknownHostException) e.getCause();
+            }
+          }
+        }
+        response.setNMTokensFromPreviousAttempts(nmTokens);
+        LOG.info("Application " + app.getApplicationId() + " retrieved "
+            + transferredContainers.size() + " containers from previous"
+            + " attempts and " + nmTokens.size() + " NM tokens.");
+      }
+    }
+
+    response.setSchedulerResourceTypes(getScheduler()
+        .getSchedulingResourceTypes());
+    return response;
+  }
+
+  public AllocateResponse allocate(ApplicationAttemptId appAttemptId,
+      AllocateRequest request) throws YarnException {
+
+    handleProgress(appAttemptId, request);
+
+    List<ResourceRequest> ask = request.getAskList();
+    List<ContainerId> release = request.getReleaseList();
+
+    ResourceBlacklistRequest blacklistRequest =
+        request.getResourceBlacklistRequest();
+    List<String> blacklistAdditions =
+        (blacklistRequest != null) ?
+            blacklistRequest.getBlacklistAdditions() : Collections.EMPTY_LIST;
+    List<String> blacklistRemovals =
+        (blacklistRequest != null) ?
+            blacklistRequest.getBlacklistRemovals() : Collections.EMPTY_LIST;
+    RMApp app =
+        getRmContext().getRMApps().get(appAttemptId.getApplicationId());
+
+    // set label expression for Resource Requests if resourceName=ANY
+    ApplicationSubmissionContext asc = app.getApplicationSubmissionContext();
+    for (ResourceRequest req : ask) {
+      if (null == req.getNodeLabelExpression()
+          && ResourceRequest.ANY.equals(req.getResourceName())) {
+        req.setNodeLabelExpression(asc.getNodeLabelExpression());
+      }
+    }
+
+    Resource maximumCapacity = getScheduler().getMaximumResourceCapability();
+
+    // sanity check
+    try {
+      RMServerUtils.normalizeAndValidateRequests(ask,
+          maximumCapacity, app.getQueue(),
+          getScheduler(), getRmContext());
+    } catch (InvalidResourceRequestException e) {
+      LOG.warn("Invalid resource ask by application " + appAttemptId, e);
+      throw e;
+    }
+
+    try {
+      RMServerUtils.validateBlacklistRequest(blacklistRequest);
+    }  catch (InvalidResourceBlacklistRequestException e) {
+      LOG.warn("Invalid blacklist request by application " + appAttemptId, e);
+      throw e;
+    }
+
+    // In the case of work-preserving AM restart, it's possible for the
+    // AM to release containers from the earlier attempt.
+    if (!app.getApplicationSubmissionContext()
+        .getKeepContainersAcrossApplicationAttempts()) {
+      try {
+        RMServerUtils.validateContainerReleaseRequest(release, appAttemptId);
+      } catch (InvalidContainerReleaseException e) {
+        LOG.warn("Invalid container release by application " + appAttemptId,
+            e);
+        throw e;
+      }
+    }
+
+    // Split Update Resource Requests into increase and decrease.
+    // No Exceptions are thrown here. All update errors are aggregated
+    // and returned to the AM.
+    List<UpdateContainerError> updateErrors = new ArrayList<>();
+    ContainerUpdates containerUpdateRequests =
+        RMServerUtils.validateAndSplitUpdateResourceRequests(
+            getRmContext(), request, maximumCapacity, updateErrors);
+
+    // Send new requests to appAttempt.
+    Allocation allocation;
+    RMAppAttemptState state =
+        app.getRMAppAttempt(appAttemptId).getAppAttemptState();
+    if (state.equals(RMAppAttemptState.FINAL_SAVING) ||
+        state.equals(RMAppAttemptState.FINISHING) ||
+        app.isAppFinalStateStored()) {
+      LOG.warn(appAttemptId + " is in " + state +
+          " state, ignore container allocate request.");
+      allocation = EMPTY_ALLOCATION;
+    } else {
+      allocation =
+          getScheduler().allocate(appAttemptId, ask, release,
+              blacklistAdditions, blacklistRemovals,
+              containerUpdateRequests);
+    }
+
+    if (!blacklistAdditions.isEmpty() || !blacklistRemovals.isEmpty()) {
+      LOG.info("blacklist are updated in Scheduler." +
+          "blacklistAdditions: " + blacklistAdditions + ", " +
+          "blacklistRemovals: " + blacklistRemovals);
+    }
+    RMAppAttempt appAttempt = app.getRMAppAttempt(appAttemptId);
+    AllocateResponse allocateResponse =
+        recordFactory.newRecordInstance(AllocateResponse.class);
+
+    if (allocation.getNMTokens() != null &&
+        !allocation.getNMTokens().isEmpty()) {
+      allocateResponse.setNMTokens(allocation.getNMTokens());
+    }
+
+    // Notify the AM of container update errors
+    ApplicationMasterServiceUtils.addToUpdateContainerErrors(
+        allocateResponse, updateErrors);
+
+    // update the response with the deltas of node status changes
+    handleNodeUpdates(app, allocateResponse);
+
+    ApplicationMasterServiceUtils.addToAllocatedContainers(
+        allocateResponse, allocation.getContainers());
+
+    allocateResponse.setCompletedContainersStatuses(appAttempt
+        .pullJustFinishedContainers());
+    allocateResponse.setAvailableResources(allocation.getResourceLimit());
+
+    addToContainerUpdates(allocateResponse, allocation,
+        ((AbstractYarnScheduler)getScheduler())
+            .getApplicationAttempt(appAttemptId).pullUpdateContainerErrors());
+
+    allocateResponse.setNumClusterNodes(getScheduler().getNumClusterNodes());
+
+    // add collector address for this application
+    if (YarnConfiguration.timelineServiceV2Enabled(
+        getRmContext().getYarnConfiguration())) {
+      allocateResponse.setCollectorAddr(
+          getRmContext().getRMApps().get(appAttemptId.getApplicationId())
+              .getCollectorAddr());
+    }
+
+    // add preemption to the allocateResponse message (if any)
+    allocateResponse
+        .setPreemptionMessage(generatePreemptionMessage(allocation));
+
+    // Set application priority
+    allocateResponse.setApplicationPriority(app
+        .getApplicationPriority());
+    return allocateResponse;
+  }
+
+  private void handleNodeUpdates(RMApp app, AllocateResponse allocateResponse) {
+    List<RMNode> updatedNodes = new ArrayList<>();
+    if(app.pullRMNodeUpdates(updatedNodes) > 0) {
+      List<NodeReport> updatedNodeReports = new ArrayList<>();
+      for(RMNode rmNode: updatedNodes) {
+        SchedulerNodeReport schedulerNodeReport =
+            getScheduler().getNodeReport(rmNode.getNodeID());
+        Resource used = BuilderUtils.newResource(0, 0);
+        int numContainers = 0;
+        if (schedulerNodeReport != null) {
+          used = schedulerNodeReport.getUsedResource();
+          numContainers = schedulerNodeReport.getNumContainers();
+        }
+        NodeId nodeId = rmNode.getNodeID();
+        NodeReport report =
+            BuilderUtils.newNodeReport(nodeId, rmNode.getState(),
+                rmNode.getHttpAddress(), rmNode.getRackName(), used,
+                rmNode.getTotalCapability(), numContainers,
+                rmNode.getHealthReport(), rmNode.getLastHealthReportTime(),
+                rmNode.getNodeLabels());
+
+        updatedNodeReports.add(report);
+      }
+      allocateResponse.setUpdatedNodes(updatedNodeReports);
+    }
+  }
+
+  private void handleProgress(ApplicationAttemptId appAttemptId,
+      AllocateRequest request) {
+    //filter illegal progress values
+    float filteredProgress = request.getProgress();
+    if (Float.isNaN(filteredProgress) ||
+        filteredProgress == Float.NEGATIVE_INFINITY ||
+        filteredProgress < 0) {
+      request.setProgress(0);
+    } else if (filteredProgress > 1 ||
+        filteredProgress == Float.POSITIVE_INFINITY) {
+      request.setProgress(1);
+    }
+
+    // Send the status update to the appAttempt.
+    getRmContext().getDispatcher().getEventHandler().handle(
+        new RMAppAttemptStatusupdateEvent(appAttemptId, request
+            .getProgress()));
+  }
+
+  public FinishApplicationMasterResponse finishApplicationMaster(
+      ApplicationAttemptId applicationAttemptId,
+      FinishApplicationMasterRequest request) {
+    RMApp app =
+        getRmContext().getRMApps().get(applicationAttemptId.getApplicationId());
+    // For UnmanagedAMs, return true so they don't retry
+    FinishApplicationMasterResponse response =
+        FinishApplicationMasterResponse.newInstance(
+            app.getApplicationSubmissionContext().getUnmanagedAM());
+    getRmContext().getDispatcher().getEventHandler().handle(
+        new RMAppAttemptUnregistrationEvent(applicationAttemptId, request
+            .getTrackingUrl(), request.getFinalApplicationStatus(), request
+            .getDiagnostics()));
+    return response;
+  }
+
+  private PreemptionMessage generatePreemptionMessage(Allocation allocation){
+    PreemptionMessage pMsg = null;
+    // assemble strict preemption request
+    if (allocation.getStrictContainerPreemptions() != null) {
+      pMsg =
+          recordFactory.newRecordInstance(PreemptionMessage.class);
+      StrictPreemptionContract pStrict =
+          recordFactory.newRecordInstance(StrictPreemptionContract.class);
+      Set<PreemptionContainer> pCont = new HashSet<>();
+      for (ContainerId cId : allocation.getStrictContainerPreemptions()) {
+        PreemptionContainer pc =
+            recordFactory.newRecordInstance(PreemptionContainer.class);
+        pc.setId(cId);
+        pCont.add(pc);
+      }
+      pStrict.setContainers(pCont);
+      pMsg.setStrictContract(pStrict);
+    }
+
+    // assemble negotiable preemption request
+    if (allocation.getResourcePreemptions() != null &&
+        allocation.getResourcePreemptions().size() > 0 &&
+        allocation.getContainerPreemptions() != null &&
+        allocation.getContainerPreemptions().size() > 0) {
+      if (pMsg == null) {
+        pMsg =
+            recordFactory.newRecordInstance(PreemptionMessage.class);
+      }
+      PreemptionContract contract =
+          recordFactory.newRecordInstance(PreemptionContract.class);
+      Set<PreemptionContainer> pCont = new HashSet<>();
+      for (ContainerId cId : allocation.getContainerPreemptions()) {
+        PreemptionContainer pc =
+            recordFactory.newRecordInstance(PreemptionContainer.class);
+        pc.setId(cId);
+        pCont.add(pc);
+      }
+      List<PreemptionResourceRequest> pRes = new ArrayList<>();
+      for (ResourceRequest crr : allocation.getResourcePreemptions()) {
+        PreemptionResourceRequest prr =
+            recordFactory.newRecordInstance(PreemptionResourceRequest.class);
+        prr.setResourceRequest(crr);
+        pRes.add(prr);
+      }
+      contract.setContainers(pCont);
+      contract.setResourceRequest(pRes);
+      pMsg.setContract(contract);
+    }
+
+    return pMsg;
+  }
+
+  protected RMContext getRmContext() {
+    return rmContext;
+  }
+
+  protected YarnScheduler getScheduler() {
+    return scheduler;
+  }
+
+  private static void addToContainerUpdates(AllocateResponse allocateResponse,
+      Allocation allocation, List<UpdateContainerError> updateContainerErrors) {
+    // Handling increased containers
+    ApplicationMasterServiceUtils.addToUpdatedContainers(
+        allocateResponse, ContainerUpdateType.INCREASE_RESOURCE,
+        allocation.getIncreasedContainers());
+
+    // Handling decreased containers
+    ApplicationMasterServiceUtils.addToUpdatedContainers(
+        allocateResponse, ContainerUpdateType.DECREASE_RESOURCE,
+        allocation.getDecreasedContainers());
+
+    // Handling promoted containers
+    ApplicationMasterServiceUtils.addToUpdatedContainers(
+        allocateResponse, ContainerUpdateType.PROMOTE_EXECUTION_TYPE,
+        allocation.getPromotedContainers());
+
+    // Handling demoted containers
+    ApplicationMasterServiceUtils.addToUpdatedContainers(
+        allocateResponse, ContainerUpdateType.DEMOTE_EXECUTION_TYPE,
+        allocation.getDemotedContainers());
+
+    ApplicationMasterServiceUtils.addToUpdateContainerErrors(
+        allocateResponse, updateContainerErrors);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5496a34c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
index 8f3a888..e03d944 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
@@ -23,6 +23,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceUtils;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -37,8 +39,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterDistributedSchedulingAMResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 
@@ -51,7 +51,6 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RemoteNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
-import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
@@ -70,7 +69,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretMan
 
 import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerAllocator;
 import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerContext;
-import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
 
 import java.io.IOException;
@@ -103,6 +101,84 @@ public class OpportunisticContainerAllocatorAMService
   private volatile List<RemoteNode> cachedNodes;
   private volatile long lastCacheUpdateTime;
 
+  class OpportunisticAMSProcessor extends DefaultAMSProcessor {
+
+    OpportunisticAMSProcessor(RMContext rmContext, YarnScheduler
+        scheduler) {
+      super(rmContext, scheduler);
+    }
+
+    @Override
+    public RegisterApplicationMasterResponse registerApplicationMaster(
+        ApplicationAttemptId applicationAttemptId,
+        RegisterApplicationMasterRequest request) throws IOException {
+      SchedulerApplicationAttempt appAttempt = ((AbstractYarnScheduler)
+          getScheduler()).getApplicationAttempt(applicationAttemptId);
+      if (appAttempt.getOpportunisticContainerContext() == null) {
+        OpportunisticContainerContext opCtx =
+            new OpportunisticContainerContext();
+        opCtx.setContainerIdGenerator(new OpportunisticContainerAllocator
+            .ContainerIdGenerator() {
+          @Override
+          public long generateContainerId() {
+            return appAttempt.getAppSchedulingInfo().getNewContainerId();
+          }
+        });
+        int tokenExpiryInterval = getConfig()
+            .getInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
+                YarnConfiguration.
+                    DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS);
+        opCtx.updateAllocationParams(
+            getScheduler().getMinimumResourceCapability(),
+            getScheduler().getMaximumResourceCapability(),
+            getScheduler().getMinimumResourceCapability(),
+            tokenExpiryInterval);
+        appAttempt.setOpportunisticContainerContext(opCtx);
+      }
+      return super.registerApplicationMaster(applicationAttemptId, request);
+    }
+
+    @Override
+    public AllocateResponse allocate(ApplicationAttemptId appAttemptId,
+        AllocateRequest request) throws YarnException {
+      // Partition requests to GUARANTEED and OPPORTUNISTIC.
+      OpportunisticContainerAllocator.PartitionedResourceRequests
+          partitionedAsks =
+          oppContainerAllocator.partitionAskList(request.getAskList());
+
+      // Allocate OPPORTUNISTIC containers.
+      SchedulerApplicationAttempt appAttempt =
+          ((AbstractYarnScheduler)rmContext.getScheduler())
+              .getApplicationAttempt(appAttemptId);
+
+      OpportunisticContainerContext oppCtx =
+          appAttempt.getOpportunisticContainerContext();
+      oppCtx.updateNodeList(getLeastLoadedNodes());
+
+      List<Container> oppContainers =
+          oppContainerAllocator.allocateContainers(
+              request.getResourceBlacklistRequest(),
+              partitionedAsks.getOpportunistic(), appAttemptId, oppCtx,
+              ResourceManager.getClusterTimeStamp(), appAttempt.getUser());
+
+      // Create RMContainers and update the NMTokens.
+      if (!oppContainers.isEmpty()) {
+        handleNewContainers(oppContainers, false);
+        appAttempt.updateNMTokens(oppContainers);
+      }
+
+      // Allocate GUARANTEED containers.
+      request.setAskList(partitionedAsks.getGuaranteed());
+
+      AllocateResponse response = super.allocate(appAttemptId, request);
+      if (!oppContainers.isEmpty()) {
+        ApplicationMasterServiceUtils.addToAllocatedContainers(
+            response, oppContainers);
+      }
+      return response;
+    }
+  }
+
   public OpportunisticContainerAllocatorAMService(RMContext rmContext,
       YarnScheduler scheduler) {
     super(OpportunisticContainerAllocatorAMService.class.getName(),
@@ -161,6 +237,11 @@ public class OpportunisticContainerAllocatorAMService
   }
 
   @Override
+  protected ApplicationMasterServiceProcessor createProcessor() {
+    return new OpportunisticAMSProcessor(rmContext, rmContext.getScheduler());
+  }
+
+  @Override
   public Server getServer(YarnRPC rpc, Configuration serverConf,
       InetSocketAddress addr, AMRMTokenSecretManager secretManager) {
     if (YarnConfiguration.isDistSchedulingEnabled(serverConf)) {
@@ -181,80 +262,6 @@ public class OpportunisticContainerAllocatorAMService
   }
 
   @Override
-  public RegisterApplicationMasterResponse registerApplicationMaster
-      (RegisterApplicationMasterRequest request) throws YarnException,
-      IOException {
-    final ApplicationAttemptId appAttemptId = getAppAttemptId();
-    SchedulerApplicationAttempt appAttempt = ((AbstractYarnScheduler)
-        rmContext.getScheduler()).getApplicationAttempt(appAttemptId);
-    if (appAttempt.getOpportunisticContainerContext() == null) {
-      OpportunisticContainerContext opCtx = new OpportunisticContainerContext();
-      opCtx.setContainerIdGenerator(new OpportunisticContainerAllocator
-          .ContainerIdGenerator() {
-        @Override
-        public long generateContainerId() {
-          return appAttempt.getAppSchedulingInfo().getNewContainerId();
-        }
-      });
-      int tokenExpiryInterval = getConfig()
-          .getInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
-              YarnConfiguration.DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS);
-      opCtx.updateAllocationParams(
-          rmContext.getScheduler().getMinimumResourceCapability(),
-          rmContext.getScheduler().getMaximumResourceCapability(),
-          rmContext.getScheduler().getMinimumResourceCapability(),
-          tokenExpiryInterval);
-      appAttempt.setOpportunisticContainerContext(opCtx);
-    }
-    return super.registerApplicationMaster(request);
-  }
-
-  @Override
-  public FinishApplicationMasterResponse finishApplicationMaster
-      (FinishApplicationMasterRequest request) throws YarnException,
-      IOException {
-    return super.finishApplicationMaster(request);
-  }
-
-  @Override
-  protected void allocateInternal(ApplicationAttemptId appAttemptId,
-      AllocateRequest request, AllocateResponse allocateResponse)
-      throws YarnException {
-
-    // Partition requests to GUARANTEED and OPPORTUNISTIC.
-    OpportunisticContainerAllocator.PartitionedResourceRequests
-        partitionedAsks =
-        oppContainerAllocator.partitionAskList(request.getAskList());
-
-    // Allocate OPPORTUNISTIC containers.
-    SchedulerApplicationAttempt appAttempt =
-        ((AbstractYarnScheduler)rmContext.getScheduler())
-            .getApplicationAttempt(appAttemptId);
-
-    OpportunisticContainerContext oppCtx =
-        appAttempt.getOpportunisticContainerContext();
-    oppCtx.updateNodeList(getLeastLoadedNodes());
-
-    List<Container> oppContainers =
-        oppContainerAllocator.allocateContainers(
-            request.getResourceBlacklistRequest(),
-            partitionedAsks.getOpportunistic(), appAttemptId, oppCtx,
-            ResourceManager.getClusterTimeStamp(), appAttempt.getUser());
-
-    // Create RMContainers and update the NMTokens.
-    if (!oppContainers.isEmpty()) {
-      handleNewContainers(oppContainers, false);
-      appAttempt.updateNMTokens(oppContainers);
-      addToAllocatedContainers(allocateResponse, oppContainers);
-    }
-
-    // Allocate GUARANTEED containers.
-    request.setAskList(partitionedAsks.getGuaranteed());
-
-    super.allocateInternal(appAttemptId, request, allocateResponse);
-  }
-
-  @Override
   public RegisterDistributedSchedulingAMResponse
       registerApplicationMasterForDistributedScheduling(
       RegisterApplicationMasterRequest request) throws YarnException,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5496a34c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
index b083642..6819395 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
@@ -79,6 +79,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo
+    .FifoScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerContext;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -652,6 +655,11 @@ public class TestOpportunisticContainerAllocatorAMService {
       public RMContainerTokenSecretManager getContainerTokenSecretManager() {
         return new RMContainerTokenSecretManager(conf);
       }
+
+      @Override
+      public ResourceScheduler getScheduler() {
+        return new FifoScheduler();
+      }
     };
     Container c = factory.newRecordInstance(Container.class);
     c.setExecutionType(ExecutionType.OPPORTUNISTIC);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: YARN-6694. Add certain envs to the default yarn.nodemanager.env-whitelist. Contributed by Jian He

Posted by as...@apache.org.
YARN-6694. Add certain envs to the default yarn.nodemanager.env-whitelist. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3be2659f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3be2659f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3be2659f

Branch: refs/heads/YARN-5972
Commit: 3be2659f83965a312d1095f03b7a95c7781c10af
Parents: af2773f
Author: Xuan <xg...@apache.org>
Authored: Thu Jun 29 20:10:35 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Thu Jun 29 20:10:35 2017 -0700

----------------------------------------------------------------------
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml         | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3be2659f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index cbd5345..81c9cb2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1013,7 +1013,7 @@
   <property>
     <description>Environment variables that containers may override rather than use NodeManager's default.</description>
     <name>yarn.nodemanager.env-whitelist</name>
-    <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME</value>
+    <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,PATH,LANG,TZ</value>
   </property>
 
   <property>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He.

Posted by as...@apache.org.
HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82cb2a64
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82cb2a64
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82cb2a64

Branch: refs/heads/YARN-5972
Commit: 82cb2a6497caa7c5e693aa41ad18e92f1c7eb16a
Parents: 7576a68
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri Jul 7 14:55:46 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Fri Jul 7 15:00:47 2017 +0900

----------------------------------------------------------------------
 .../fs/FileContextCreateMkdirBaseTest.java      |  2 +-
 .../hadoop/fs/FileContextPermissionBase.java    |  5 ++--
 .../apache/hadoop/fs/FileContextUtilBase.java   |  5 ++--
 .../org/apache/hadoop/fs/TestListFiles.java     |  4 +--
 .../fs/TestLocalFileSystemPermission.java       |  2 +-
 .../ha/TestActiveStandbyElectorRealZK.java      |  7 ++---
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |  6 ++--
 .../hadoop/ha/TestZKFailoverController.java     |  5 ++--
 .../io/serializer/TestSerializationFactory.java |  6 ++--
 .../org/apache/hadoop/ipc/MiniRPCBenchmark.java | 13 ++++-----
 .../java/org/apache/hadoop/ipc/TestIPC.java     |  5 ++--
 .../apache/hadoop/ipc/TestMiniRPCBenchmark.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java     |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 16 +++++------
 .../hadoop/security/TestGroupFallback.java      | 12 ++++----
 .../hadoop/security/TestUGIWithMiniKdc.java     |  2 +-
 .../security/TestUserGroupInformation.java      |  2 +-
 .../hadoop/security/ssl/TestSSLFactory.java     |  2 +-
 .../delegation/web/TestWebDelegationToken.java  |  2 +-
 .../apache/hadoop/test/GenericTestUtils.java    | 30 ++++++++++++++++++++
 .../hadoop/test/TestGenericTestUtils.java       | 10 +++++++
 .../hadoop/util/Crc32PerformanceTest.java       | 11 +++----
 hadoop-common-project/hadoop-nfs/pom.xml        |  6 ++++
 .../apache/hadoop/oncrpc/TestFrameDecoder.java  |  6 ++--
 .../TestDFSStripedOutputStreamWithFailure.java  | 28 ++++++++++++++++++
 25 files changed, 132 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
index c1de27a..fbd598c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -30,6 +29,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
 
 import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.event.Level;
 
 /**
  * <p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
index dff89f9..240989e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.StringTokenizer;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -32,6 +33,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
@@ -61,8 +63,7 @@ public abstract class FileContextPermissionBase {
   
   {
     try {
-      ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
-      .setLevel(org.apache.log4j.Level.DEBUG);
+      GenericTestUtils.setLogLevel(FileSystem.LOG, Level.DEBUG);
     }
     catch(Exception e) {
       System.out.println("Cannot change log level\n"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
index bebf4bf..0a96d3e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
@@ -23,10 +23,12 @@ import static org.junit.Assert.assertTrue;
 
 import java.util.Arrays;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 /**
  * <p>
@@ -48,8 +50,7 @@ public abstract class FileContextUtilBase {
   
   {
     try {
-      ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
-      .setLevel(org.apache.log4j.Level.DEBUG);
+      GenericTestUtils.setLogLevel(FileSystem.LOG, Level.DEBUG);
     } catch(Exception e) {
       System.out.println("Cannot change log level\n"
           + StringUtils.stringifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
index 010754f..44308ea 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
@@ -24,18 +24,18 @@ import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.log4j.Level;
 
 import static org.junit.Assert.*;
 import org.junit.Test;
 import org.junit.BeforeClass;
+import org.slf4j.event.Level;
 
 /**
  * This class tests the FileStatus API.
  */
 public class TestListFiles {
   static {
-    GenericTestUtils.setLogLevel(FileSystem.LOG, Level.ALL);
+    GenericTestUtils.setLogLevel(FileSystem.LOG, Level.TRACE);
   }
 
   static final long seed = 0xDEADBEEFL;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
index 1478111..81756f9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
@@ -21,10 +21,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
-import org.apache.log4j.Level;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
 
 import java.io.IOException;
 import java.util.ArrayList;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
index 59c0886..bbef9ef 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
@@ -24,12 +24,11 @@ import static org.junit.Assert.assertTrue;
 import java.util.Collections;
 import java.util.UUID;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.ActiveStandbyElector.State;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
-import org.apache.log4j.Level;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.ZooKeeper;
@@ -39,6 +38,7 @@ import org.mockito.AdditionalMatchers;
 import org.mockito.Mockito;
 
 import com.google.common.primitives.Ints;
+import org.slf4j.event.Level;
 
 /**
  * Test for {@link ActiveStandbyElector} using real zookeeper.
@@ -47,8 +47,7 @@ public class TestActiveStandbyElectorRealZK extends ClientBaseWithFixes {
   static final int NUM_ELECTORS = 2;
   
   static {
-    ((Log4JLogger)ActiveStandbyElector.LOG).getLogger().setLevel(
-        Level.ALL);
+    GenericTestUtils.setLogLevel(ActiveStandbyElector.LOG, Level.TRACE);
   }
   
   static final String PARENT_DIR = "/" + UUID.randomUUID();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
index 51fb829..f0ebc1e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
@@ -21,18 +21,18 @@ import static org.junit.Assert.*;
 
 import java.net.InetSocketAddress;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.SshFenceByTcpPort.Args;
-import org.apache.log4j.Level;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assume;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 public class TestSshFenceByTcpPort {
 
   static {
-    ((Log4JLogger)SshFenceByTcpPort.LOG).getLogger().setLevel(Level.ALL);
+    GenericTestUtils.setLogLevel(SshFenceByTcpPort.LOG, Level.TRACE);
   }
 
   private static String TEST_FENCING_HOST = System.getProperty(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
index 846c8ae..0fa8e86 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.*;
 import java.security.NoSuchAlgorithmException;
 
 import com.google.common.base.Supplier;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
@@ -30,7 +29,6 @@ import org.apache.hadoop.ha.HealthMonitor.State;
 import org.apache.hadoop.ha.MiniZKFCCluster.DummyZKFC;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.apache.log4j.Level;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
@@ -41,6 +39,7 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
+import org.slf4j.event.Level;
 
 public class TestZKFailoverController extends ClientBaseWithFixes {
   private Configuration conf;
@@ -71,7 +70,7 @@ public class TestZKFailoverController extends ClientBaseWithFixes {
     "digest:" + DIGEST_USER_HASH + ":rwcda";
   
   static {
-    ((Log4JLogger)ActiveStandbyElector.LOG).getLogger().setLevel(Level.ALL);
+    GenericTestUtils.setLogLevel(ActiveStandbyElector.LOG, Level.TRACE);
   }
   
   @Before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
index 6774155..2cde3e3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
@@ -18,21 +18,21 @@
 package org.apache.hadoop.io.serializer;
 
 import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNotNull;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.Writable;
-import org.apache.log4j.Level;
+import org.slf4j.event.Level;
 
 public class TestSerializationFactory {
 
   static {
-    ((Log4JLogger) SerializationFactory.LOG).getLogger().setLevel(Level.ALL);
+    GenericTestUtils.setLogLevel(SerializationFactory.LOG, Level.TRACE);
   }
 
   static Configuration conf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
index cdbd557..2290270 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
@@ -26,9 +26,9 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Enumeration;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
@@ -43,8 +43,7 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelect
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
 import org.apache.hadoop.util.Time;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
+import org.slf4j.event.Level;
 
 /**
  * MiniRPCBenchmark measures time to establish an RPC connection 
@@ -255,9 +254,9 @@ public class MiniRPCBenchmark {
   }
 
   static void setLoggingLevel(Level level) {
-    LogManager.getLogger(Server.class.getName()).setLevel(level);
-    ((Log4JLogger)Server.AUDITLOG).getLogger().setLevel(level);
-    LogManager.getLogger(Client.class.getName()).setLevel(level);
+    GenericTestUtils.setLogLevel(Server.LOG, level);
+    GenericTestUtils.setLogLevel(Server.AUDITLOG, level);
+    GenericTestUtils.setLogLevel(Client.LOG, level);
   }
 
   /**
@@ -370,7 +369,7 @@ public class MiniRPCBenchmark {
       useDelegationToken = args[3].equalsIgnoreCase("useToken");
     Level l = Level.ERROR;
     if(args.length > 4)
-      l = Level.toLevel(args[4]);
+      l = GenericTestUtils.toLevel(args[4]);
 
     MiniRPCBenchmark mb = new MiniRPCBenchmark(l);
     long elapsedTime = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 1d47473..4bda637 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -60,7 +60,6 @@ import javax.net.SocketFactory;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -85,7 +84,6 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Before;
@@ -98,6 +96,7 @@ import org.mockito.stubbing.Answer;
 import com.google.common.base.Supplier;
 import com.google.common.primitives.Bytes;
 import com.google.common.primitives.Ints;
+import org.slf4j.event.Level;
 
 /** Unit tests for IPC. */
 public class TestIPC {
@@ -864,7 +863,7 @@ public class TestIPC {
 
   @Test(timeout=30000)
   public void testConnectionIdleTimeouts() throws Exception {
-    ((Log4JLogger)Server.LOG).getLogger().setLevel(Level.DEBUG);
+    GenericTestUtils.setLogLevel(Server.LOG, Level.DEBUG);
     final int maxIdle = 1000;
     final int cleanupInterval = maxIdle*3/4; // stagger cleanups
     final int killMax = 3;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
index 0f34be8..a130fa9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.ipc;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.log4j.Level;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 /**
  * Test {@link MiniRPCBenchmark}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 166b205..8725cf4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -50,11 +50,11 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
 import org.apache.hadoop.test.MockitoUtil;
-import org.apache.log4j.Level;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.event.Level;
 
 import javax.net.SocketFactory;
 import java.io.Closeable;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
index c48ff2e..7608cb4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
@@ -22,7 +22,6 @@ import com.google.protobuf.ServiceException;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -36,7 +35,7 @@ import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.*;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.log4j.Level;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -44,6 +43,7 @@ import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.event.Level;
 
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.CallbackHandler;
@@ -186,12 +186,12 @@ public class TestSaslRPC extends TestRpcBase {
   }
 
   static {
-    ((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) SecurityUtil.LOG).getLogger().setLevel(Level.ALL);
+    GenericTestUtils.setLogLevel(Client.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(Server.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(SaslRpcClient.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(SaslRpcServer.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(SaslInputStream.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(SecurityUtil.LOG, Level.TRACE);
   }
 
   public static class BadTokenSecretManager extends TestTokenSecretManager {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
index a61eee6..85f17b1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
@@ -25,16 +25,16 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 public class TestGroupFallback {
   public static final Log LOG = LogFactory.getLog(TestGroupFallback.class);
 
   @Test
   public void testGroupShell() throws Exception {
-    Logger.getRootLogger().setLevel(Level.DEBUG);
+    GenericTestUtils.setRootLogLevel(Level.DEBUG);
     Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         "org.apache.hadoop.security.ShellBasedUnixGroupsMapping");
@@ -50,7 +50,7 @@ public class TestGroupFallback {
 
   @Test
   public void testNetgroupShell() throws Exception {
-    Logger.getRootLogger().setLevel(Level.DEBUG);
+    GenericTestUtils.setRootLogLevel(Level.DEBUG);
     Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         "org.apache.hadoop.security.ShellBasedUnixGroupsNetgroupMapping");
@@ -69,7 +69,7 @@ public class TestGroupFallback {
     LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
         "test the normal path and 'mvn -DTestGroupFallback clear test' will" +
         " test the fall back functionality");
-    Logger.getRootLogger().setLevel(Level.DEBUG);
+    GenericTestUtils.setRootLogLevel(Level.DEBUG);
     Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         "org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback");
@@ -88,7 +88,7 @@ public class TestGroupFallback {
     LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
         "test the normal path and 'mvn -DTestGroupFallback clear test' will" +
         " test the fall back functionality");
-    Logger.getRootLogger().setLevel(Level.DEBUG);
+    GenericTestUtils.setRootLogLevel(Level.DEBUG);
     Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         "org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMappingWithFallback");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
index 2c6c7e4..6c94b1d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
@@ -23,9 +23,9 @@ import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.PlatformName;
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
index 00062c0..bcb2126 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -41,6 +40,7 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
 
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
index 5369c9d..4f1aca0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -34,6 +33,7 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
 
 import javax.net.ssl.HttpsURLConnection;
 import javax.net.ssl.SSLEngine;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
index 7319e4c..c564b97 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHand
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.log4j.Level;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.servlet.ServletContextHandler;
@@ -42,6 +41,7 @@ import org.junit.Before;
 import org.junit.Test;
 import org.eclipse.jetty.servlet.FilterHolder;
 import org.eclipse.jetty.servlet.ServletHolder;
+import org.slf4j.event.Level;
 
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 77a79ff..82a5e08 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -141,6 +141,20 @@ public abstract class GenericTestUtils {
   }
 
   /**
+   * A helper used in log4j2 migration to accept legacy
+   * org.apache.commons.logging apis.
+   * <p>
+   * And will be removed after migration.
+   *
+   * @param log   a log
+   * @param level level to be set
+   */
+  @Deprecated
+  public static void setLogLevel(Log log, org.slf4j.event.Level level) {
+    setLogLevel(log, Level.toLevel(level.toString()));
+  }
+
+  /**
    * @deprecated
    * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
    */
@@ -172,6 +186,22 @@ public abstract class GenericTestUtils {
     setLogLevel(toLog4j(logger), Level.toLevel(level.toString()));
   }
 
+  public static void setRootLogLevel(org.slf4j.event.Level level) {
+    setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString()));
+  }
+
+  public static org.slf4j.event.Level toLevel(String level) {
+    return toLevel(level, org.slf4j.event.Level.DEBUG);
+  }
+
+  public static org.slf4j.event.Level toLevel(
+      String level, org.slf4j.event.Level defaultLevel) {
+    try {
+      return org.slf4j.event.Level.valueOf(level);
+    } catch (IllegalArgumentException e) {
+      return defaultLevel;
+    }
+  }
   /**
    * Extracts the name of the method where the invocation has happened
    * @return String name of the invoking method

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
index b3fc836..c1d45cc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
@@ -27,7 +27,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Supplier;
+import org.slf4j.event.Level;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -151,4 +153,12 @@ public class TestGenericTestUtils extends GenericTestUtils {
       assertExceptionContains(GenericTestUtils.ERROR_INVALID_ARGUMENT, e);
     }
   }
+
+  @Test
+  public void testToLevel() throws Throwable {
+    assertEquals(Level.INFO, toLevel("INFO"));
+    assertEquals(Level.DEBUG, toLevel("NonExistLevel"));
+    assertEquals(Level.INFO, toLevel("INFO", Level.TRACE));
+    assertEquals(Level.TRACE, toLevel("NonExistLevel", Level.TRACE));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
index 34dfc3a..ce28f50 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
@@ -27,10 +27,11 @@ import java.util.Random;
 import java.util.zip.CRC32;
 import java.util.zip.Checksum;
 
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.ChecksumException;
-import org.apache.log4j.Level;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.event.Level;
+
+import static org.slf4j.LoggerFactory.getLogger;
 
 /**
  * Performance tests to compare performance of Crc32|Crc32C implementations
@@ -176,8 +177,8 @@ public class Crc32PerformanceTest {
         crcs.add(Crc32.Native.class);
       }
       crcs.add(Crc32.NativeC.class);
-      ((Log4JLogger)LogFactory.getLog(NativeCodeLoader.class))
-          .getLogger().setLevel(Level.ALL);
+      GenericTestUtils.setLogLevel(getLogger(NativeCodeLoader.class),
+          Level.TRACE);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-nfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml
index 5fdaf44..bd5ab92 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -48,6 +48,12 @@
       <scope>provided</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
index 9d0fe0f..0e416b3 100644
--- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
+++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
@@ -28,8 +28,7 @@ import java.util.Random;
 import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
-import org.apache.log4j.Level;
-import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -38,11 +37,12 @@ import org.jboss.netty.channel.ChannelException;
 import org.jboss.netty.channel.ChannelHandlerContext;
 import org.junit.Test;
 import org.mockito.Mockito;
+import org.slf4j.event.Level;
 
 public class TestFrameDecoder {
   
   static {
-    ((Log4JLogger) RpcProgram.LOG).getLogger().setLevel(Level.ALL);
+    GenericTestUtils.setLogLevel(RpcProgram.LOG, Level.TRACE);
   }
 
   private static int resultSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index 9915a2f..5b99d45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -391,6 +391,34 @@ public class TestDFSStripedOutputStreamWithFailure {
   }
 
   /**
+   * When all the two DataNodes with partial data block fail
+   */
+  @Test
+  public void runTestWithMultipleFailure2() throws Exception {
+    final HdfsConfiguration conf = newHdfsConfiguration();
+    // two DNs have cellSize and the other DNs have cellSize*2
+    final int length = cellSize * (dataBlocks * 2 - 2);
+    // select the two DNs with partial block to kill
+    final int[] dnIndex = {dataBlocks - 2, dataBlocks - 1};
+    final int[] killPos = getKillPositions(length, dnIndex.length);
+
+    try {
+      LOG.info("runTestWithMultipleFailure2: length==" + length + ", killPos="
+          + Arrays.toString(killPos) + ", dnIndex="
+          + Arrays.toString(dnIndex));
+      setup(conf);
+      runTest(length, killPos, dnIndex, false);
+    } catch (Throwable e) {
+      final String err = "failed, killPos=" + Arrays.toString(killPos)
+          + ", dnIndex=" + Arrays.toString(dnIndex) + ", length=" + length;
+      LOG.error(err);
+      throw e;
+    } finally {
+      tearDown();
+    }
+  }
+
+  /**
    * runTest implementation.
    * @param length file length
    * @param killPos killing positions in ascending order


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: Revert "HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang."

Posted by as...@apache.org.
Revert "HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang."

Accidentally committed the wrong patch version, reverting to fix that.

This reverts commit 900221f95ea9fe1936b4d5f277e6047ee8734eca.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2f0cbd9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2f0cbd9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2f0cbd9

Branch: refs/heads/YARN-5972
Commit: a2f0cbd92f7e90909cf817c261a5fae13a9695b4
Parents: 3be2659
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Jun 30 10:19:27 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Jun 30 10:19:27 2017 -0700

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    | 13 +--
 .../PendingReconstructionBlocks.java            |  8 +-
 .../namenode/metrics/NameNodeMetrics.java       | 18 ----
 .../TestPendingReconstruction.java              | 86 +-------------------
 4 files changed, 7 insertions(+), 118 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f0cbd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a5ee30b..a0c4698 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1851,7 +1851,7 @@ public class BlockManager implements BlockStatsMXBean {
         (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block));
   }
 
-  BlockReconstructionWork scheduleReconstruction(BlockInfo block,
+  private BlockReconstructionWork scheduleReconstruction(BlockInfo block,
       int priority) {
     // skip abandoned block or block reopened for append
     if (block.isDeleted() || !block.isCompleteOrCommitted()) {
@@ -1873,7 +1873,6 @@ public class BlockManager implements BlockStatsMXBean {
     if(srcNodes == null || srcNodes.length == 0) {
       // block can not be reconstructed from any node
       LOG.debug("Block {} cannot be reconstructed from any node", block);
-      NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
       return null;
     }
 
@@ -1886,7 +1885,6 @@ public class BlockManager implements BlockStatsMXBean {
       neededReconstruction.remove(block, priority);
       blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
           " it has enough replicas", block);
-      NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
       return null;
     }
 
@@ -1902,7 +1900,6 @@ public class BlockManager implements BlockStatsMXBean {
     if (block.isStriped()) {
       if (pendingNum > 0) {
         // Wait the previous reconstruction to finish.
-        NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
         return null;
       }
 
@@ -3730,8 +3727,8 @@ public class BlockManager implements BlockStatsMXBean {
    * The given node is reporting that it received a certain block.
    */
   @VisibleForTesting
-  public void addBlock(DatanodeStorageInfo storageInfo, Block block,
-      String delHint) throws IOException {
+  void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
+      throws IOException {
     DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     // Decrement number of blocks scheduled to this datanode.
     // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
@@ -3754,9 +3751,7 @@ public class BlockManager implements BlockStatsMXBean {
     BlockInfo storedBlock = getStoredBlock(block);
     if (storedBlock != null &&
         block.getGenerationStamp() == storedBlock.getGenerationStamp()) {
-      if (pendingReconstruction.decrement(storedBlock, node)) {
-        NameNode.getNameNodeMetrics().incSuccessfulReReplications();
-      }
+      pendingReconstruction.decrement(storedBlock, node);
     }
     processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
         delHintNode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f0cbd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
index 0f20daa..2221d1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
@@ -30,7 +30,6 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
 
@@ -98,10 +97,8 @@ class PendingReconstructionBlocks {
    * for this block.
    *
    * @param dn The DataNode that finishes the reconstruction
-   * @return true if the block is decremented to 0 and got removed.
    */
-  boolean decrement(BlockInfo block, DatanodeDescriptor dn) {
-    boolean removed = false;
+  void decrement(BlockInfo block, DatanodeDescriptor dn) {
     synchronized (pendingReconstructions) {
       PendingBlockInfo found = pendingReconstructions.get(block);
       if (found != null) {
@@ -109,11 +106,9 @@ class PendingReconstructionBlocks {
         found.decrementReplicas(dn);
         if (found.getNumReplicas() <= 0) {
           pendingReconstructions.remove(block);
-          removed = true;
         }
       }
     }
-    return removed;
   }
 
   /**
@@ -268,7 +263,6 @@ class PendingReconstructionBlocks {
               timedOutItems.add(block);
             }
             LOG.warn("PendingReconstructionMonitor timed out " + block);
-            NameNode.getNameNodeMetrics().incTimeoutReReplications();
             iter.remove();
           }
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f0cbd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
index f2534e4..cb81f5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
@@ -58,12 +58,6 @@ public class NameNodeMetrics {
   @Metric MutableCounterLong createSymlinkOps;
   @Metric MutableCounterLong getLinkTargetOps;
   @Metric MutableCounterLong filesInGetListingOps;
-  @Metric ("Number of successful re-replications")
-  MutableCounterLong successfulReReplications;
-  @Metric ("Number of times we failed to schedule a block re-replication.")
-  MutableCounterLong numTimesReReplicationNotScheduled;
-  @Metric("Number of timed out block re-replications")
-  MutableCounterLong timeoutReReplications;
   @Metric("Number of allowSnapshot operations")
   MutableCounterLong allowSnapshotOps;
   @Metric("Number of disallowSnapshot operations")
@@ -306,18 +300,6 @@ public class NameNodeMetrics {
     transactionsBatchedInSync.incr(count);
   }
 
-  public void incSuccessfulReReplications() {
-    successfulReReplications.incr();
-  }
-
-  public void incNumTimesReReplicationNotScheduled() {
-    numTimesReReplicationNotScheduled.incr();
-  }
-
-  public void incTimeoutReReplications() {
-    timeoutReReplications.incr();
-  }
-
   public void addSync(long elapsed) {
     syncs.add(elapsed);
     for (MutableQuantiles q : syncsQuantiles) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f0cbd9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
index 042eae7..7679f9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
@@ -17,10 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY;
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -48,7 +44,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -183,7 +178,7 @@ public class TestPendingReconstruction {
   public void testProcessPendingReconstructions() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     conf.setLong(
-        DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
+        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
     MiniDFSCluster cluster = null;
     Block block;
     BlockInfo blockInfo;
@@ -423,7 +418,7 @@ public class TestPendingReconstruction {
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
     CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
         DFS_REPLICATION_INTERVAL);
-    CONF.setInt(DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
+    CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
         DFS_REPLICATION_INTERVAL);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
         DATANODE_COUNT).build();
@@ -476,81 +471,4 @@ public class TestPendingReconstruction {
       cluster.shutdown();
     }
   }
-
-  @Test
-  public void testReplicationCounter() throws Exception {
-    HdfsConfiguration conf = new HdfsConfiguration();
-    conf.setInt(DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
-    conf.setInt(DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 2);
-    MiniDFSCluster tmpCluster = new MiniDFSCluster.Builder(conf).numDataNodes(
-        DATANODE_COUNT).build();
-    tmpCluster.waitActive();
-    FSNamesystem fsn = tmpCluster.getNamesystem(0);
-    fsn.writeLock();
-
-    try {
-      BlockManager bm = fsn.getBlockManager();
-      BlocksMap blocksMap = bm.blocksMap;
-
-      // create three blockInfo below, blockInfo0 will success, blockInfo1 will
-      // time out, blockInfo2 will fail the replication.
-      BlockCollection bc0 = Mockito.mock(BlockCollection.class);
-      BlockInfo blockInfo0 = new BlockInfoContiguous((short) 3);
-      blockInfo0.setBlockId(0);
-
-      BlockCollection bc1 = Mockito.mock(BlockCollection.class);
-      BlockInfo blockInfo1 = new BlockInfoContiguous((short) 3);
-      blockInfo1.setBlockId(1);
-
-      BlockCollection bc2 = Mockito.mock(BlockCollection.class);
-      Mockito.when(bc2.getId()).thenReturn((2L));
-      BlockInfo blockInfo2 = new BlockInfoContiguous((short) 3);
-      blockInfo2.setBlockId(2);
-
-      blocksMap.addBlockCollection(blockInfo0, bc0);
-      blocksMap.addBlockCollection(blockInfo1, bc1);
-      blocksMap.addBlockCollection(blockInfo2, bc2);
-
-      PendingReconstructionBlocks pending = bm.pendingReconstruction;
-
-      MetricsRecordBuilder rb = getMetrics("NameNodeActivity");
-      assertCounter("SuccessfulReReplications", 0L, rb);
-      assertCounter("NumTimesReReplicationNotScheduled", 0L, rb);
-      assertCounter("TimeoutReReplications", 0L, rb);
-
-      // add block0 and block1 to pending queue.
-      pending.increment(blockInfo0);
-      pending.increment(blockInfo1);
-
-      Thread.sleep(2000);
-
-      rb = getMetrics("NameNodeActivity");
-      assertCounter("SuccessfulReReplications", 0L, rb);
-      assertCounter("NumTimesReReplicationNotScheduled", 0L, rb);
-      assertCounter("TimeoutReReplications", 0L, rb);
-
-      // call addBlock on block0 will make it successfully replicated.
-      // not callign addBlock on block1 will make it timeout later.
-      DatanodeStorageInfo[] storageInfos =
-          DFSTestUtil.createDatanodeStorageInfos(1);
-      bm.addBlock(storageInfos[0], blockInfo0, null);
-
-      // call schedule replication on blockInfo2 will fail the re-replication.
-      // because there is no source data to replicate from.
-      bm.scheduleReconstruction(blockInfo2, 0);
-
-      Thread.sleep(2000);
-
-      rb = getMetrics("NameNodeActivity");
-      assertCounter("SuccessfulReReplications", 1L, rb);
-      assertCounter("NumTimesReReplicationNotScheduled", 1L, rb);
-      assertCounter("TimeoutReReplications", 1L, rb);
-
-    } finally {
-      tmpCluster.shutdown();
-      fsn.writeUnlock();
-    }
-  }
-
-
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: Updating version for 3.0.0-beta1 development

Posted by as...@apache.org.
Updating version for 3.0.0-beta1 development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af2773f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af2773f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af2773f6

Branch: refs/heads/YARN-5972
Commit: af2773f609ba930825bab5d30767757c0e59aac7
Parents: 900221f
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Jun 29 17:57:40 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu Jun 29 17:57:40 2017 -0700

----------------------------------------------------------------------
 hadoop-assemblies/pom.xml                                        | 4 ++--
 hadoop-build-tools/pom.xml                                       | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml                  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml     | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml                  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml    | 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml          | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml              | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml                      | 4 ++--
 hadoop-client-modules/pom.xml                                    | 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml        | 4 ++--
 hadoop-cloud-storage-project/pom.xml                             | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml                 | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml               | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml                        | 4 ++--
 hadoop-common-project/hadoop-common/pom.xml                      | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml                         | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml                     | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml                         | 4 ++--
 hadoop-common-project/pom.xml                                    | 4 ++--
 hadoop-dist/pom.xml                                              | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml                   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml                   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml            | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml                      | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml                          | 4 ++--
 hadoop-hdfs-project/pom.xml                                      | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml                       | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml                   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml                    | 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml                   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml                      | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml         | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml       | 4 ++--
 hadoop-mapreduce-project/pom.xml                                 | 4 ++--
 hadoop-maven-plugins/pom.xml                                     | 2 +-
 hadoop-minicluster/pom.xml                                       | 4 ++--
 hadoop-project-dist/pom.xml                                      | 4 ++--
 hadoop-project/pom.xml                                           | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml                               | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml                         | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml                             | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml                                  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml                       | 2 +-
 hadoop-tools/hadoop-azure/pom.xml                                | 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml                             | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml                               | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml                               | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml                              | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml                                | 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml                            | 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml                                | 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml                                | 4 ++--
 hadoop-tools/hadoop-sls/pom.xml                                  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml                            | 4 ++--
 hadoop-tools/hadoop-tools-dist/pom.xml                           | 4 ++--
 hadoop-tools/pom.xml                                             | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml          | 4 ++--
 .../hadoop-yarn-applications-distributedshell/pom.xml            | 4 ++--
 .../hadoop-yarn-applications-unmanaged-am-launcher/pom.xml       | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml       | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml       | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml     | 4 ++--
 .../hadoop-yarn-server-applicationhistoryservice/pom.xml         | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml         | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml    | 4 ++--
 .../hadoop-yarn-server-resourcemanager/pom.xml                   | 4 ++--
 .../hadoop-yarn-server-sharedcachemanager/pom.xml                | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml          | 4 ++--
 .../hadoop-yarn-server-timeline-pluginstorage/pom.xml            | 4 ++--
 .../hadoop-yarn-server-timelineservice-hbase-tests/pom.xml       | 4 ++--
 .../hadoop-yarn-server-timelineservice-hbase/pom.xml             | 2 +-
 .../hadoop-yarn-server-timelineservice/pom.xml                   | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml      | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml       | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml         | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml           | 4 ++--
 hadoop-yarn-project/hadoop-yarn/pom.xml                          | 4 ++--
 hadoop-yarn-project/pom.xml                                      | 4 ++--
 pom.xml                                                          | 2 +-
 84 files changed, 160 insertions(+), 160 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-assemblies/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-assemblies/pom.xml b/hadoop-assemblies/pom.xml
index 5d0dc12..7a38421 100644
--- a/hadoop-assemblies/pom.xml
+++ b/hadoop-assemblies/pom.xml
@@ -23,11 +23,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-build-tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-build-tools/pom.xml b/hadoop-build-tools/pom.xml
index 9de3e50..8933495 100644
--- a/hadoop-build-tools/pom.xml
+++ b/hadoop-build-tools/pom.xml
@@ -18,7 +18,7 @@
   <parent>
     <artifactId>hadoop-main</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-build-tools</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-client-modules/hadoop-client-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml b/hadoop-client-modules/hadoop-client-api/pom.xml
index 135ffb8..2d3548b 100644
--- a/hadoop-client-modules/hadoop-client-api/pom.xml
+++ b/hadoop-client-modules/hadoop-client-api/pom.xml
@@ -18,11 +18,11 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project</artifactId>
-   <version>3.0.0-alpha4-SNAPSHOT</version>
+   <version>3.0.0-beta1-SNAPSHOT</version>
    <relativePath>../../hadoop-project</relativePath>
 </parent>
   <artifactId>hadoop-client-api</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Client</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
index 4adbc92..e495a69 100644
--- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-check-invariants</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <description>Enforces our invariants for the api and runtime client modules.</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
index 0509967..68d1f5b 100644
--- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-check-test-invariants</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <description>Enforces our invariants for the testing client modules.</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
index eaecbc0..c180965 100644
--- a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
+++ b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-integration-tests</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
 
   <description>Checks that we can use the generated artifacts</description>
   <name>Apache Hadoop Client Packaging Integration Tests</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-client-modules/hadoop-client-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index ac95bf5..4512906 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-minicluster</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Minicluster for Clients</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-client-modules/hadoop-client-runtime/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 3c8364c..2f64152 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -18,11 +18,11 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project</artifactId>
-   <version>3.0.0-alpha4-SNAPSHOT</version>
+   <version>3.0.0-beta1-SNAPSHOT</version>
    <relativePath>../../hadoop-project</relativePath>
 </parent>
   <artifactId>hadoop-client-runtime</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Client</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-client-modules/hadoop-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml
index 9ecfd78..629f9fa 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -18,11 +18,11 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project-dist</artifactId>
-   <version>3.0.0-alpha4-SNAPSHOT</version>
+   <version>3.0.0-beta1-SNAPSHOT</version>
    <relativePath>../../hadoop-project-dist</relativePath>
 </parent>
   <artifactId>hadoop-client</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
 
   <description>Apache Hadoop Client aggregation pom with dependencies exposed</description>
   <name>Apache Hadoop Client Aggregator</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-client-modules/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/pom.xml b/hadoop-client-modules/pom.xml
index 8a90a58..c60b7e1 100644
--- a/hadoop-client-modules/pom.xml
+++ b/hadoop-client-modules/pom.xml
@@ -18,7 +18,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-modules</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
index 59ef3ca..829316e 100644
--- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-cloud-storage</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Cloud Storage</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-cloud-storage-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-cloud-storage-project/pom.xml b/hadoop-cloud-storage-project/pom.xml
index 73baa47..0907d2f 100644
--- a/hadoop-cloud-storage-project/pom.xml
+++ b/hadoop-cloud-storage-project/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-cloud-storage-project</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Cloud Storage Project</description>
   <name>Apache Hadoop Cloud Storage Project</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-common-project/hadoop-annotations/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml
index 4c01494..0b41f78 100644
--- a/hadoop-common-project/hadoop-annotations/pom.xml
+++ b/hadoop-common-project/hadoop-annotations/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-annotations</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-common-project/hadoop-auth-examples/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml
index a895bad..83cee15 100644
--- a/hadoop-common-project/hadoop-auth-examples/pom.xml
+++ b/hadoop-common-project/hadoop-auth-examples/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>war</packaging>
 
   <name>Apache Hadoop Auth Examples</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-common-project/hadoop-auth/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index 61bc8b4..36a70ae 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-auth</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop Auth</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 87b4dc9..f74290d 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-common</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-common-project/hadoop-kms/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml
index d84efa4..27be05a 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-kms</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop KMS</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-common-project/hadoop-minikdc/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml
index d9e01ec..759786c 100644
--- a/hadoop-common-project/hadoop-minikdc/pom.xml
+++ b/hadoop-common-project/hadoop-minikdc/pom.xml
@@ -18,12 +18,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-minikdc</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop MiniKDC</description>
   <name>Apache Hadoop MiniKDC</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-common-project/hadoop-nfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml
index 9340128..5fdaf44 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-nfs</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop NFS</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-common-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml
index 63abb60..1b3a176 100644
--- a/hadoop-common-project/pom.xml
+++ b/hadoop-common-project/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-common-project</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Common Project</description>
   <name>Apache Hadoop Common Project</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 0f90069..84d5ab3 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-dist</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Distribution</description>
   <name>Apache Hadoop Distribution</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 5a82f42..cc5481f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-client</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Client</description>
   <name>Apache Hadoop HDFS Client</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index 64f08ca..b7adda0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop HttpFS</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index d139f7a..db17c22 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-native-client</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Native Client</description>
   <name>Apache Hadoop HDFS Native Client</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
index 0268067..a70b9b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-nfs</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop HDFS-NFS</description>
   <name>Apache Hadoop HDFS-NFS</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 3deea50..1c50d31 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-hdfs</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop HDFS</description>
   <name>Apache Hadoop HDFS</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-hdfs-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml
index d18cc7c..a3fdd02 100644
--- a/hadoop-hdfs-project/pom.xml
+++ b/hadoop-hdfs-project/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-project</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Project</description>
   <name>Apache Hadoop HDFS Project</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
index dcf93bf..e5fb153 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-app</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce App</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index 60f213c..db8ae49 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-common</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Common</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index dc1fee8..a23827d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-core</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Core</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
index 9210596..577d92d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-hs-plugins</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce HistoryServer Plugins</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
index 0b026ed..b185d45 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-hs</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce HistoryServer</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index 527e061..d618325 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce JobClient</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
index 721e28a..9fd5d1e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-nativetask</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce NativeTask</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
index 60adf00..3589350 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Shuffle</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index 0f453a8..4e7a0ae 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-mapreduce-client</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Client</name>
   <packaging>pom</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
index 948ebbc..13dc340 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-mapreduce-examples</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop MapReduce Examples</description>
   <name>Apache Hadoop MapReduce Examples</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-mapreduce-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml
index 060fe4b..c2816e5 100644
--- a/hadoop-mapreduce-project/pom.xml
+++ b/hadoop-mapreduce-project/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-mapreduce</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop MapReduce</name>
   <url>http://hadoop.apache.org/mapreduce/</url>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-maven-plugins/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml
index b10ee1e..9831456 100644
--- a/hadoop-maven-plugins/pom.xml
+++ b/hadoop-maven-plugins/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-maven-plugins</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-minicluster/pom.xml b/hadoop-minicluster/pom.xml
index 6c39f6c..7529f9e 100644
--- a/hadoop-minicluster/pom.xml
+++ b/hadoop-minicluster/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-minicluster</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Mini-Cluster</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index a4abbd6..6e73c0e 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-project-dist</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Project Dist POM</description>
   <name>Apache Hadoop Project Dist POM</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 7909442..a84070b 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -20,10 +20,10 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-main</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-project</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Project POM</description>
   <name>Apache Hadoop Project POM</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-aliyun/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/pom.xml b/hadoop-tools/hadoop-aliyun/pom.xml
index e48cbf4..7ffcc3d 100644
--- a/hadoop-tools/hadoop-aliyun/pom.xml
+++ b/hadoop-tools/hadoop-aliyun/pom.xml
@@ -18,7 +18,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-aliyun</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-archive-logs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archive-logs/pom.xml b/hadoop-tools/hadoop-archive-logs/pom.xml
index a6ea650..0ddd448 100644
--- a/hadoop-tools/hadoop-archive-logs/pom.xml
+++ b/hadoop-tools/hadoop-archive-logs/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-archive-logs</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Archive Logs</description>
   <name>Apache Hadoop Archive Logs</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-archives/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archives/pom.xml b/hadoop-tools/hadoop-archives/pom.xml
index 7d7131a..3389057 100644
--- a/hadoop-tools/hadoop-archives/pom.xml
+++ b/hadoop-tools/hadoop-archives/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-archives</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Archives</description>
   <name>Apache Hadoop Archives</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-aws/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index ca3c355..c995ca6 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-aws</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop Amazon Web Services support</name>
   <description>
     This module contains code to support integration with Amazon Web Services.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-azure-datalake/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml b/hadoop-tools/hadoop-azure-datalake/pom.xml
index c6b6350..3aed5e1 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-azure-datalake</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-azure/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index 37c37a3..03b531b 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-azure</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-datajoin/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-datajoin/pom.xml b/hadoop-tools/hadoop-datajoin/pom.xml
index c8a91ed..3c71309 100644
--- a/hadoop-tools/hadoop-datajoin/pom.xml
+++ b/hadoop-tools/hadoop-datajoin/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-datajoin</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Data Join</description>
   <name>Apache Hadoop Data Join</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-distcp/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml
index bccaac1..ce78fb7 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-distcp</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Distributed Copy</description>
   <name>Apache Hadoop Distributed Copy</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-extras/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/pom.xml b/hadoop-tools/hadoop-extras/pom.xml
index d08ad39..5968a71 100644
--- a/hadoop-tools/hadoop-extras/pom.xml
+++ b/hadoop-tools/hadoop-extras/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-extras</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Extras</description>
   <name>Apache Hadoop Extras</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-gridmix/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/pom.xml b/hadoop-tools/hadoop-gridmix/pom.xml
index 45ee039..1494c7b 100644
--- a/hadoop-tools/hadoop-gridmix/pom.xml
+++ b/hadoop-tools/hadoop-gridmix/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-gridmix</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Gridmix</description>
   <name>Apache Hadoop Gridmix</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-kafka/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-kafka/pom.xml b/hadoop-tools/hadoop-kafka/pom.xml
index 8f41de1..3b61650 100644
--- a/hadoop-tools/hadoop-kafka/pom.xml
+++ b/hadoop-tools/hadoop-kafka/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-kafka</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop Kafka Library support</name>
   <description>
     This module contains code to support integration with Kafka.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-openstack/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/pom.xml b/hadoop-tools/hadoop-openstack/pom.xml
index 5169ec5..00e6f43 100644
--- a/hadoop-tools/hadoop-openstack/pom.xml
+++ b/hadoop-tools/hadoop-openstack/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-openstack</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop OpenStack support</name>
   <description>
     This module contains code to support integration with OpenStack.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-pipes/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-pipes/pom.xml b/hadoop-tools/hadoop-pipes/pom.xml
index 457f3d3..1a951a0 100644
--- a/hadoop-tools/hadoop-pipes/pom.xml
+++ b/hadoop-tools/hadoop-pipes/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-pipes</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Pipes</description>
   <name>Apache Hadoop Pipes</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-rumen/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml
index 99e4367..864ad2f 100644
--- a/hadoop-tools/hadoop-rumen/pom.xml
+++ b/hadoop-tools/hadoop-rumen/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-rumen</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Rumen</description>
   <name>Apache Hadoop Rumen</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-sls/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml
index 4089473..7dd3718 100644
--- a/hadoop-tools/hadoop-sls/pom.xml
+++ b/hadoop-tools/hadoop-sls/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-sls</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Scheduler Load Simulator</description>
   <name>Apache Hadoop Scheduler Load Simulator</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-streaming/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml
index 1eb4eae..5ad8f4d 100644
--- a/hadoop-tools/hadoop-streaming/pom.xml
+++ b/hadoop-tools/hadoop-streaming/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-streaming</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop MapReduce Streaming</description>
   <name>Apache Hadoop MapReduce Streaming</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/hadoop-tools-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml
index 8204123..4e399a8 100644
--- a/hadoop-tools/hadoop-tools-dist/pom.xml
+++ b/hadoop-tools/hadoop-tools-dist/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-tools-dist</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Tools Dist</description>
   <name>Apache Hadoop Tools Dist</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml
index 55ee414..a6b925f 100644
--- a/hadoop-tools/pom.xml
+++ b/hadoop-tools/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-tools</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Tools</description>
   <name>Apache Hadoop Tools</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 33f16f9..f8f2973 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-api</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN API</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
index 9aa8951..1fea954 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-applications</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-applications-distributedshell</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN DistributedShell</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
index 99edc0a..ef0c7f9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-applications</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-applications-unmanaged-am-launcher</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Unmanaged Am Launcher</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
index d6090be..4c11d4d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-applications</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Applications</name>
   <packaging>pom</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
index 2501109..b83bff8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
@@ -17,10 +17,10 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-yarn-client</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Client</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index f9cfb11..505e20f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-common</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Common</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
index 09cf9c7..1435007 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-registry</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Registry</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
index 641d5fe..d732af4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-applicationhistoryservice</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN ApplicationHistoryService</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 465f71a..ea0f32e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-common</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Server Common</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 75ec0a2..a0f4ef7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-nodemanager</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN NodeManager</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index c40dfa3..9b8f8af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN ResourceManager</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml
index fb59131..a3b7879 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml
@@ -17,10 +17,10 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-yarn-server-sharedcachemanager</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN SharedCacheManager</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
index e57442c..f182236 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
@@ -19,10 +19,10 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-yarn-server-tests</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Server Tests</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml
index f536df6..8b57dcd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timeline-pluginstorage</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Timeline Plugin Storage</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index bacd017..b090954 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timelineservice-hbase-tests</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Timeline Service HBase tests</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
index a2a5c2e..acea867 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
@@ -22,7 +22,7 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
index a205e65..3dc6222 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timelineservice</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Timeline Service</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
index 34ffb08..0951528 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-web-proxy</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Web Proxy</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
index a0b0082..517326b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Server</name>
   <packaging>pom</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml
index d4fcf90..7576f76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-site</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN Site</name>
   <packaging>pom</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 34f7d0b..1d1b1b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-ui</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop YARN UI</name>
   <packaging>${packagingType}</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/hadoop-yarn/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml
index 725b133..52d9028 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -16,11 +16,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-yarn</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop YARN</name>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/hadoop-yarn-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml
index 2e3c095..df87417 100644
--- a/hadoop-yarn-project/pom.xml
+++ b/hadoop-yarn-project/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-yarn-project</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop YARN Project</name>
   <url>http://hadoop.apache.org/yarn/</url>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2773f6/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 17458bc..e7f6eeb 100644
--- a/pom.xml
+++ b/pom.xml
@@ -18,7 +18,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-main</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Main</description>
   <name>Apache Hadoop Main</name>
   <packaging>pom</packaging>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: HDFS-12089. Fix ambiguous NN retry log message in WebHDFS. Contributed by Eric Badger

Posted by as...@apache.org.
HDFS-12089. Fix ambiguous NN retry log message in WebHDFS. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6436768b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6436768b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6436768b

Branch: refs/heads/YARN-5972
Commit: 6436768baf1b2ac05f6786edcd76fd3a66c03eaa
Parents: a180ba4
Author: Mingliang Liu <li...@apache.org>
Authored: Wed Jul 5 11:10:57 2017 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Wed Jul 5 11:10:57 2017 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6436768b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index a9bc795..3861cba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -792,7 +792,7 @@ public class WebHdfsFileSystem extends FileSystem
               a.action == RetryPolicy.RetryAction.RetryDecision.FAILOVER_AND_RETRY;
 
           if (isRetry || isFailoverAndRetry) {
-            LOG.info("Retrying connect to namenode: {}. Already tried {}"
+            LOG.info("Retrying connect to namenode: {}. Already retried {}"
                     + " time(s); retry policy is {}, delay {}ms.",
                 nnAddr, retry, retryPolicy, a.delayMillis);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: Add release notes, changes, jdiff for 3.0.0-alpha4

Posted by as...@apache.org.
Add release notes, changes, jdiff for 3.0.0-alpha4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f10864a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f10864a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f10864a8

Branch: refs/heads/YARN-5972
Commit: f10864a820c5104d748378aa1c2c408e4aad8a6c
Parents: 7cd0952
Author: Andrew Wang <wa...@apache.org>
Authored: Fri Jul 7 11:01:59 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Fri Jul 7 11:01:59 2017 -0700

----------------------------------------------------------------------
 .../3.0.0-alpha4/CHANGES.3.0.0-alpha4.md        | 880 +++++++++++++++++++
 .../3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md   | 492 +++++++++++
 .../jdiff/Apache_Hadoop_HDFS_3.0.0-alpha4.xml   | 322 +++++++
 3 files changed, 1694 insertions(+)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: HDFS-12079. Description of dfs.block.invalidate.limit is incorrect in hdfs-default.xml. Contributed by Weiwei Yang.

Posted by as...@apache.org.
HDFS-12079. Description of dfs.block.invalidate.limit is incorrect in hdfs-default.xml. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0560e06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0560e06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0560e06

Branch: refs/heads/YARN-5972
Commit: b0560e0624756e2b3ce7b6bc741eee3c18d2a873
Parents: bf1f599
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jul 4 14:02:14 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jul 4 14:02:14 2017 +0900

----------------------------------------------------------------------
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml             | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0560e06/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index be345af..96c04f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -3375,7 +3375,10 @@
   <name>dfs.block.invalidate.limit</name>
   <value>1000</value>
   <description>
-    Limit on the list of invalidated block list kept by the Namenode.
+    The maximum number of invalidate blocks sent by namenode to a datanode
+    per heartbeat deletion command. This property works with
+    "dfs.namenode.invalidate.work.pct.per.iteration" to throttle block
+    deletions.
   </description>
 </property>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: HDFS-12052. Set SWEBHDFS delegation token kind when ssl is enabled in HttpFS. Contributed by Zoran Dimitrijevic.

Posted by as...@apache.org.
HDFS-12052. Set SWEBHDFS delegation token kind when ssl is enabled in HttpFS. Contributed by Zoran Dimitrijevic.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12c8fdce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12c8fdce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12c8fdce

Branch: refs/heads/YARN-5972
Commit: 12c8fdceaf263425661169cba25402df89d444c1
Parents: 3a7f02b
Author: John Zhuge <jz...@apache.org>
Authored: Tue Jul 11 11:19:08 2017 -0700
Committer: John Zhuge <jz...@apache.org>
Committed: Tue Jul 11 11:19:08 2017 -0700

----------------------------------------------------------------------
 .../http/server/HttpFSAuthenticationFilter.java |   6 +-
 .../fs/http/server/HttpFSServerWebServer.java   |   2 +-
 ...KerberosAuthenticationHandlerForTesting.java |   1 -
 .../hadoop/fs/http/server/TestHttpFSServer.java | 264 +++++++++++--------
 4 files changed, 154 insertions(+), 119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12c8fdce/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
index 37640aa..a4e4385 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
@@ -95,8 +95,12 @@ public class HttpFSAuthenticationFilter
       throw new RuntimeException("Could not read HttpFS signature secret file: " + signatureSecretFile);
     }
     setAuthHandlerClass(props);
+    String dtkind = WebHdfsConstants.WEBHDFS_TOKEN_KIND.toString();
+    if (conf.getBoolean(HttpFSServerWebServer.SSL_ENABLED_KEY, false)) {
+      dtkind = WebHdfsConstants.SWEBHDFS_TOKEN_KIND.toString();
+    }
     props.setProperty(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND,
-        WebHdfsConstants.WEBHDFS_TOKEN_KIND.toString());
+                      dtkind);
     return props;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12c8fdce/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java
index 6bd0d12..d8706c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java
@@ -52,7 +52,7 @@ public class HttpFSServerWebServer {
   private static final String HTTP_HOST_DEFAULT = "0.0.0.0";
 
   // SSL properties
-  private static final String SSL_ENABLED_KEY = "hadoop.httpfs.ssl.enabled";
+  static final String SSL_ENABLED_KEY = "httpfs.ssl.enabled";
   private static final boolean SSL_ENABLED_DEFAULT = false;
 
   private static final String HTTP_ADMINS_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12c8fdce/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
index 9a51bd3..8ac7da3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
@@ -28,7 +28,6 @@ public class HttpFSKerberosAuthenticationHandlerForTesting
   @Override
   public void init(Properties config) throws ServletException {
     //NOP overwrite to avoid Kerberos initialization
-    config.setProperty(TOKEN_KIND, "t");
     initTokenManager(config);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12c8fdce/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
index 91d22c8..7cdb39c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.http.server;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
 import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
 import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
 import org.json.simple.JSONArray;
@@ -46,12 +47,14 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttrCodec;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.lib.server.Service;
 import org.apache.hadoop.lib.server.ServiceException;
 import org.apache.hadoop.lib.service.Groups;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.server.AuthenticationToken;
 import org.apache.hadoop.security.authentication.util.Signer;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.HFSTestCase;
 import org.apache.hadoop.test.HadoopUsersConfTestHelper;
 import org.apache.hadoop.test.TestDir;
@@ -120,8 +123,8 @@ public class TestHttpFSServer extends HFSTestCase {
 
   }
 
-  private void createHttpFSServer(boolean addDelegationTokenAuthHandler)
-    throws Exception {
+  private Configuration createHttpFSConf(boolean addDelegationTokenAuthHandler,
+                                         boolean sslEnabled) throws Exception {
     File homeDir = TestDirHelper.getTestDir();
     Assert.assertTrue(new File(homeDir, "conf").mkdir());
     Assert.assertTrue(new File(homeDir, "log").mkdir());
@@ -133,10 +136,11 @@ public class TestHttpFSServer extends HFSTestCase {
     w.write("secret");
     w.close();
 
-    //HDFS configuration
+    // HDFS configuration
     File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
     hadoopConfDir.mkdirs();
     Configuration hdfsConf = TestHdfsHelper.getHdfsConf();
+
     // Http Server's conf should be based on HDFS's conf
     Configuration conf = new Configuration(hdfsConf);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
@@ -146,26 +150,39 @@ public class TestHttpFSServer extends HFSTestCase {
     conf.writeXml(os);
     os.close();
 
-    //HTTPFS configuration
+    // HTTPFS configuration
     conf = new Configuration(false);
     if (addDelegationTokenAuthHandler) {
-     conf.set("httpfs.authentication.type",
-              HttpFSKerberosAuthenticationHandlerForTesting.class.getName());
+      conf.set("httpfs.authentication.type",
+               HttpFSKerberosAuthenticationHandlerForTesting.class.getName());
     }
     conf.set("httpfs.services.ext", MockGroups.class.getName());
     conf.set("httpfs.admin.group", HadoopUsersConfTestHelper.
-      getHadoopUserGroups(HadoopUsersConfTestHelper.getHadoopUsers()[0])[0]);
-    conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
+        getHadoopUserGroups(HadoopUsersConfTestHelper.getHadoopUsers()[0])[0]);
+    conf.set("httpfs.proxyuser." +
+             HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
              HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
-    conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
+    conf.set("httpfs.proxyuser." +
+             HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
              HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
-    conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
+    conf.set("httpfs.authentication.signature.secret.file",
+             secretFile.getAbsolutePath());
     conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
+    if (sslEnabled) {
+      conf.set("httpfs.ssl.enabled", "true");
+    }
     File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
     os = new FileOutputStream(httpfsSite);
     conf.writeXml(os);
     os.close();
+    return conf;
+  }
 
+  private void createHttpFSServer(boolean addDelegationTokenAuthHandler,
+                                  boolean sslEnabled)
+      throws Exception {
+    Configuration conf = createHttpFSConf(addDelegationTokenAuthHandler,
+                                          sslEnabled);
     ClassLoader cl = Thread.currentThread().getContextClassLoader();
     URL url = cl.getResource("webapp");
     WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
@@ -177,12 +194,110 @@ public class TestHttpFSServer extends HFSTestCase {
     }
   }
 
+  private String getSignedTokenString()
+      throws Exception {
+    AuthenticationToken token = new AuthenticationToken("u", "p",
+        new KerberosDelegationTokenAuthenticationHandler().getType());
+    token.setExpires(System.currentTimeMillis() + 100000000);
+    SignerSecretProvider secretProvider =
+        StringSignerSecretProviderCreator.newStringSignerSecretProvider();
+    Properties secretProviderProps = new Properties();
+    secretProviderProps.setProperty(
+        AuthenticationFilter.SIGNATURE_SECRET, "secret");
+    secretProvider.init(secretProviderProps, null, -1);
+    Signer signer = new Signer(secretProvider);
+    return signer.sign(token.toString());
+  }
+
+  private void delegationTokenCommonTests(boolean sslEnabled) throws Exception {
+    URL url = new URL(TestJettyHelper.getJettyURL(),
+                      "/webhdfs/v1/?op=GETHOMEDIRECTORY");
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
+                        conn.getResponseCode());
+
+    String tokenSigned = getSignedTokenString();
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=GETDELEGATIONTOKEN");
+    conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestProperty("Cookie",
+                            AuthenticatedURL.AUTH_COOKIE  + "=" + tokenSigned);
+    Assert.assertEquals(HttpURLConnection.HTTP_OK,
+                        conn.getResponseCode());
+
+    JSONObject json = (JSONObject)new JSONParser().parse(
+        new InputStreamReader(conn.getInputStream()));
+    json = (JSONObject)
+      json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
+    String tokenStr = (String)json.get(
+        DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
+
+    Token<AbstractDelegationTokenIdentifier> dToken =
+        new Token<AbstractDelegationTokenIdentifier>();
+    dToken.decodeFromUrlString(tokenStr);
+    Assert.assertEquals(sslEnabled ?
+        WebHdfsConstants.SWEBHDFS_TOKEN_KIND :
+        WebHdfsConstants.WEBHDFS_TOKEN_KIND,
+        dToken.getKind());
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
+    conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(HttpURLConnection.HTTP_OK,
+                        conn.getResponseCode());
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
+    conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod("PUT");
+    Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
+                        conn.getResponseCode());
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
+    conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod("PUT");
+    conn.setRequestProperty("Cookie",
+                            AuthenticatedURL.AUTH_COOKIE  + "=" + tokenSigned);
+    Assert.assertEquals(HttpURLConnection.HTTP_OK,
+                        conn.getResponseCode());
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" + tokenStr);
+    conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod("PUT");
+    Assert.assertEquals(HttpURLConnection.HTTP_OK,
+                        conn.getResponseCode());
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
+    conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
+                        conn.getResponseCode());
+
+    // getTrash test with delegation
+    url = new URL(TestJettyHelper.getJettyURL(),
+        "/webhdfs/v1/?op=GETTRASHROOT&delegation=" + tokenStr);
+    conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
+        conn.getResponseCode());
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+        "/webhdfs/v1/?op=GETTRASHROOT");
+    conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestProperty("Cookie",
+        AuthenticatedURL.AUTH_COOKIE  + "=" + tokenSigned);
+    Assert.assertEquals(HttpURLConnection.HTTP_OK,
+        conn.getResponseCode());
+  }
+
   @Test
   @TestDir
   @TestJetty
   @TestHdfs
   public void instrumentation() throws Exception {
-    createHttpFSServer(false);
+    createHttpFSServer(false, false);
 
     URL url = new URL(TestJettyHelper.getJettyURL(),
                       MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "nobody"));
@@ -211,7 +326,7 @@ public class TestHttpFSServer extends HFSTestCase {
   @TestJetty
   @TestHdfs
   public void testHdfsAccess() throws Exception {
-    createHttpFSServer(false);
+    createHttpFSServer(false, false);
 
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     URL url = new URL(TestJettyHelper.getJettyURL(),
@@ -228,7 +343,7 @@ public class TestHttpFSServer extends HFSTestCase {
   @TestJetty
   @TestHdfs
   public void testMkdirs() throws Exception {
-    createHttpFSServer(false);
+    createHttpFSServer(false, false);
 
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
@@ -246,7 +361,7 @@ public class TestHttpFSServer extends HFSTestCase {
   @TestJetty
   @TestHdfs
   public void testGlobFilter() throws Exception {
-    createHttpFSServer(false);
+    createHttpFSServer(false, false);
 
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
     fs.mkdirs(new Path("/tmp"));
@@ -438,7 +553,7 @@ public class TestHttpFSServer extends HFSTestCase {
   @TestJetty
   @TestHdfs
   public void testPerms() throws Exception {
-    createHttpFSServer(false);
+    createHttpFSServer(false, false);
 
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
     fs.mkdirs(new Path("/perm"));
@@ -474,9 +589,9 @@ public class TestHttpFSServer extends HFSTestCase {
     final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
     final String dir = "/xattrTest";
     final String path = dir + "/file";
-    
-    createHttpFSServer(false);
-    
+
+    createHttpFSServer(false, false);
+
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
     fs.mkdirs(new Path(dir));
     
@@ -544,7 +659,7 @@ public class TestHttpFSServer extends HFSTestCase {
     String statusJson;
     List<String> aclEntries;
 
-    createHttpFSServer(false);
+    createHttpFSServer(false, false);
 
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
     fs.mkdirs(new Path(dir));
@@ -627,7 +742,7 @@ public class TestHttpFSServer extends HFSTestCase {
     String statusJson;
     List<String> aclEntries;
 
-    createHttpFSServer(false);
+    createHttpFSServer(false, false);
 
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
     fs.mkdirs(new Path(dir));
@@ -665,7 +780,7 @@ public class TestHttpFSServer extends HFSTestCase {
   @TestJetty
   @TestHdfs
   public void testOpenOffsetLength() throws Exception {
-    createHttpFSServer(false);
+    createHttpFSServer(false, false);
 
     byte[] array = new byte[]{0, 1, 2, 3};
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
@@ -690,7 +805,7 @@ public class TestHttpFSServer extends HFSTestCase {
   @TestJetty
   @TestHdfs
   public void testPutNoOperation() throws Exception {
-    createHttpFSServer(false);
+    createHttpFSServer(false, false);
 
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     URL url = new URL(TestJettyHelper.getJettyURL(),
@@ -708,7 +823,7 @@ public class TestHttpFSServer extends HFSTestCase {
   @TestHdfs
   public void testGetTrashRoot() throws Exception {
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
-    createHttpFSServer(false);
+    createHttpFSServer(false, false);
     String trashJson = getStatus("/", "GETTRASHROOT");
     String trashPath = getPath(trashJson);
 
@@ -741,99 +856,16 @@ public class TestHttpFSServer extends HFSTestCase {
   @TestJetty
   @TestHdfs
   public void testDelegationTokenOperations() throws Exception {
-    createHttpFSServer(true);
-
-    URL url = new URL(TestJettyHelper.getJettyURL(),
-                      "/webhdfs/v1/?op=GETHOMEDIRECTORY");
-    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-    Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
-                        conn.getResponseCode());
-
-
-    AuthenticationToken token =
-      new AuthenticationToken("u", "p",
-          new KerberosDelegationTokenAuthenticationHandler().getType());
-    token.setExpires(System.currentTimeMillis() + 100000000);
-    SignerSecretProvider secretProvider =
-        StringSignerSecretProviderCreator.newStringSignerSecretProvider();
-    Properties secretProviderProps = new Properties();
-    secretProviderProps.setProperty(AuthenticationFilter.SIGNATURE_SECRET, "secret");
-    secretProvider.init(secretProviderProps, null, -1);
-    Signer signer = new Signer(secretProvider);
-    String tokenSigned = signer.sign(token.toString());
-
-    url = new URL(TestJettyHelper.getJettyURL(),
-                  "/webhdfs/v1/?op=GETHOMEDIRECTORY");
-    conn = (HttpURLConnection) url.openConnection();
-    conn.setRequestProperty("Cookie",
-                            AuthenticatedURL.AUTH_COOKIE  + "=" + tokenSigned);
-    Assert.assertEquals(HttpURLConnection.HTTP_OK,
-                        conn.getResponseCode());
-
-    url = new URL(TestJettyHelper.getJettyURL(),
-                  "/webhdfs/v1/?op=GETDELEGATIONTOKEN");
-    conn = (HttpURLConnection) url.openConnection();
-    conn.setRequestProperty("Cookie",
-                            AuthenticatedURL.AUTH_COOKIE  + "=" + tokenSigned);
-    Assert.assertEquals(HttpURLConnection.HTTP_OK,
-                        conn.getResponseCode());
-
-    JSONObject json = (JSONObject)
-      new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
-    json = (JSONObject)
-      json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
-    String tokenStr = (String)
-        json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
-
-    url = new URL(TestJettyHelper.getJettyURL(),
-                  "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
-    conn = (HttpURLConnection) url.openConnection();
-    Assert.assertEquals(HttpURLConnection.HTTP_OK,
-                        conn.getResponseCode());
-
-    url = new URL(TestJettyHelper.getJettyURL(),
-                  "/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
-    conn = (HttpURLConnection) url.openConnection();
-    conn.setRequestMethod("PUT");
-    Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
-                        conn.getResponseCode());
-
-    url = new URL(TestJettyHelper.getJettyURL(),
-                  "/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
-    conn = (HttpURLConnection) url.openConnection();
-    conn.setRequestMethod("PUT");
-    conn.setRequestProperty("Cookie",
-                            AuthenticatedURL.AUTH_COOKIE  + "=" + tokenSigned);
-    Assert.assertEquals(HttpURLConnection.HTTP_OK,
-                        conn.getResponseCode());
-
-    url = new URL(TestJettyHelper.getJettyURL(),
-                  "/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" + tokenStr);
-    conn = (HttpURLConnection) url.openConnection();
-    conn.setRequestMethod("PUT");
-    Assert.assertEquals(HttpURLConnection.HTTP_OK,
-                        conn.getResponseCode());
-
-    url = new URL(TestJettyHelper.getJettyURL(),
-                  "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
-    conn = (HttpURLConnection) url.openConnection();
-    Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
-                        conn.getResponseCode());
-
-    // getTrash test with delegation
-    url = new URL(TestJettyHelper.getJettyURL(),
-        "/webhdfs/v1/?op=GETTRASHROOT&delegation=" + tokenStr);
-    conn = (HttpURLConnection) url.openConnection();
-    Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
-        conn.getResponseCode());
-
-    url = new URL(TestJettyHelper.getJettyURL(),
-        "/webhdfs/v1/?op=GETTRASHROOT");
-    conn = (HttpURLConnection) url.openConnection();
-    conn.setRequestProperty("Cookie",
-        AuthenticatedURL.AUTH_COOKIE  + "=" + tokenSigned);
-    Assert.assertEquals(HttpURLConnection.HTTP_OK,
-        conn.getResponseCode());
+    createHttpFSServer(true, false);
+    delegationTokenCommonTests(false);
   }
 
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testDelegationTokenOperationsSsl() throws Exception {
+    createHttpFSServer(true, true);
+    delegationTokenCommonTests(true);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: HADOOP-14535 wasb: implement high-performance random access and seek of block blobs. Contributed by Thomas Marquardt

Posted by as...@apache.org.
HADOOP-14535 wasb: implement high-performance random access and seek of block blobs.
Contributed by Thomas Marquardt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d670c3a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d670c3a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d670c3a4

Branch: refs/heads/YARN-5972
Commit: d670c3a4da7dd80dccf6c6308603bb3bb013b3b0
Parents: 12c8fdc
Author: Steve Loughran <st...@apache.org>
Authored: Tue Jul 11 21:34:27 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Tue Jul 11 21:34:27 2017 +0100

----------------------------------------------------------------------
 .../hadoop/fs/contract/ContractTestUtils.java   |   8 +
 .../fs/azure/AzureNativeFileSystemStore.java    |  78 +-
 .../hadoop/fs/azure/BlockBlobInputStream.java   | 396 ++++++++++
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  38 +-
 .../fs/azure/NativeAzureFileSystemHelper.java   |  28 +
 .../hadoop/fs/azure/NativeFileSystemStore.java  |   6 +-
 .../fs/azure/SecureStorageInterfaceImpl.java    |   5 +
 .../hadoop/fs/azure/StorageInterface.java       |  11 +-
 .../hadoop/fs/azure/StorageInterfaceImpl.java   |   5 +
 .../fs/azure/AzureBlobStorageTestAccount.java   |  40 +-
 .../hadoop/fs/azure/MockStorageInterface.java   |  36 +-
 .../azure/TestAzureConcurrentOutOfBandIo.java   |   2 +-
 .../fs/azure/TestBlockBlobInputStream.java      | 756 +++++++++++++++++++
 13 files changed, 1325 insertions(+), 84 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index fd77045..39c6d18 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -1486,6 +1486,14 @@ public class ContractTestUtils extends Assert {
       return now() - startTime;
     }
 
+    /**
+     * Elapsed time in milliseconds; no rounding.
+     * @return elapsed time
+     */
+    public long elapsedTimeMs() {
+      return elapsedTime() / 1000000;
+    }
+
     public double bandwidth(long bytes) {
       return bandwidthMBs(bytes, duration());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 5fa964a..6b6f07a 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.fs.azure;
 import static org.apache.hadoop.fs.azure.NativeAzureFileSystem.PATH_DELIMITER;
 
-import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -121,6 +120,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   private static final String KEY_STREAM_MIN_READ_SIZE = "fs.azure.read.request.size";
   private static final String KEY_STORAGE_CONNECTION_TIMEOUT = "fs.azure.storage.timeout";
   private static final String KEY_WRITE_BLOCK_SIZE = "fs.azure.write.request.size";
+  @VisibleForTesting
+  static final String KEY_INPUT_STREAM_VERSION = "fs.azure.input.stream.version.for.internal.use.only";
 
   // Property controlling whether to allow reads on blob which are concurrently
   // appended out-of-band.
@@ -222,6 +223,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   public static final int DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * 1024 * 1024;
   public static final int DEFAULT_UPLOAD_BLOCK_SIZE = 4 * 1024 * 1024;
 
+  private static final int DEFAULT_INPUT_STREAM_VERSION = 2;
+
   // Retry parameter defaults.
   //
 
@@ -280,6 +283,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
   private int downloadBlockSizeBytes = DEFAULT_DOWNLOAD_BLOCK_SIZE;
   private int uploadBlockSizeBytes = DEFAULT_UPLOAD_BLOCK_SIZE;
+  private int inputStreamVersion = DEFAULT_INPUT_STREAM_VERSION;
 
   // Bandwidth throttling exponential back-off parameters
   //
@@ -691,6 +695,9 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
     this.uploadBlockSizeBytes = sessionConfiguration.getInt(
         KEY_WRITE_BLOCK_SIZE, DEFAULT_UPLOAD_BLOCK_SIZE);
 
+    this.inputStreamVersion = sessionConfiguration.getInt(
+        KEY_INPUT_STREAM_VERSION, DEFAULT_INPUT_STREAM_VERSION);
+
     // The job may want to specify a timeout to use when engaging the
     // storage service. The default is currently 90 seconds. It may
     // be necessary to increase this value for long latencies in larger
@@ -1417,8 +1424,18 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   private InputStream openInputStream(CloudBlobWrapper blob)
       throws StorageException, IOException {
     if (blob instanceof CloudBlockBlobWrapper) {
-      return blob.openInputStream(getDownloadOptions(),
-          getInstrumentedContext(isConcurrentOOBAppendAllowed()));
+      LOG.debug("Using stream seek algorithm {}", inputStreamVersion);
+      switch(inputStreamVersion) {
+      case 1:
+        return blob.openInputStream(getDownloadOptions(),
+            getInstrumentedContext(isConcurrentOOBAppendAllowed()));
+      case 2:
+        return new BlockBlobInputStream((CloudBlockBlobWrapper) blob,
+            getDownloadOptions(),
+            getInstrumentedContext(isConcurrentOOBAppendAllowed()));
+      default:
+        throw new IOException("Unknown seek algorithm: " + inputStreamVersion);
+      }
     } else {
       return new PageBlobInputStream(
           (CloudPageBlobWrapper) blob, getInstrumentedContext(
@@ -2023,32 +2040,12 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   }
 
   @Override
-  public DataInputStream retrieve(String key) throws AzureException, IOException {
-      try {
-        // Check if a session exists, if not create a session with the
-        // Azure storage server.
-        if (null == storageInteractionLayer) {
-          final String errMsg = String.format(
-              "Storage session expected for URI '%s' but does not exist.",
-              sessionUri);
-          throw new AssertionError(errMsg);
-        }
-        checkContainer(ContainerAccessType.PureRead);
-
-        // Get blob reference and open the input buffer stream.
-        CloudBlobWrapper blob = getBlobReference(key);
-
-        // Return a data input stream.
-        DataInputStream inDataStream = new DataInputStream(openInputStream(blob));
-        return inDataStream;
-    } catch (Exception e) {
-      // Re-throw as an Azure storage exception.
-      throw new AzureException(e);
-    }
+  public InputStream retrieve(String key) throws AzureException, IOException {
+    return retrieve(key, 0);
   }
 
   @Override
-  public DataInputStream retrieve(String key, long startByteOffset)
+  public InputStream retrieve(String key, long startByteOffset)
       throws AzureException, IOException {
       try {
         // Check if a session exists, if not create a session with the
@@ -2061,24 +2058,19 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
         }
         checkContainer(ContainerAccessType.PureRead);
 
-        // Get blob reference and open the input buffer stream.
-        CloudBlobWrapper blob = getBlobReference(key);
-
-        // Open input stream and seek to the start offset.
-        InputStream in = blob.openInputStream(
-          getDownloadOptions(), getInstrumentedContext(isConcurrentOOBAppendAllowed()));
-
-        // Create a data input stream.
-	    DataInputStream inDataStream = new DataInputStream(in);
-	    
-	    // Skip bytes and ignore return value. This is okay
-	    // because if you try to skip too far you will be positioned
-	    // at the end and reads will not return data.
-	    inDataStream.skip(startByteOffset);
-        return inDataStream;
+        InputStream inputStream = openInputStream(getBlobReference(key));
+        if (startByteOffset > 0) {
+          // Skip bytes and ignore return value. This is okay
+          // because if you try to skip too far you will be positioned
+          // at the end and reads will not return data.
+          inputStream.skip(startByteOffset);
+        }
+        return inputStream;
+    } catch (IOException e) {
+        throw e;
     } catch (Exception e) {
-      // Re-throw as an Azure storage exception.
-      throw new AzureException(e);
+        // Re-throw as an Azure storage exception.
+        throw new AzureException(e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
new file mode 100644
index 0000000..2ed0686
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
@@ -0,0 +1,396 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+
+import org.apache.hadoop.fs.FSExceptionMessages;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.azure.StorageInterface.CloudBlockBlobWrapper;
+
+/**
+ * Encapsulates the BlobInputStream used by block blobs and adds support for
+ * random access and seek. Random access performance is improved by several
+ * orders of magnitude.
+ */
+final class BlockBlobInputStream extends InputStream implements Seekable {
+  private final CloudBlockBlobWrapper blob;
+  private final BlobRequestOptions options;
+  private final OperationContext opContext;
+  private InputStream blobInputStream = null;
+  private int minimumReadSizeInBytes = 0;
+  private long streamPositionAfterLastRead = -1;
+  private long streamPosition = 0;
+  private long streamLength = 0;
+  private boolean closed = false;
+  private byte[] streamBuffer;
+  private int streamBufferPosition;
+  private int streamBufferLength;
+
+  /**
+   * Creates a seek-able stream for reading from block blobs.
+   * @param blob a block blob reference.
+   * @param options the blob request options.
+   * @param opContext the blob operation context.
+   * @throws IOException IO failure
+   */
+  BlockBlobInputStream(CloudBlockBlobWrapper blob,
+      BlobRequestOptions options,
+      OperationContext opContext) throws IOException {
+    this.blob = blob;
+    this.options = options;
+    this.opContext = opContext;
+
+    this.minimumReadSizeInBytes = blob.getStreamMinimumReadSizeInBytes();
+
+    try {
+      this.blobInputStream = blob.openInputStream(options, opContext);
+    } catch (StorageException e) {
+      throw new IOException(e);
+    }
+
+    this.streamLength = blob.getProperties().getLength();
+  }
+
+  private void checkState() throws IOException {
+    if (closed) {
+      throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+    }
+  }
+
+  /**
+   * Gets the read position of the stream.
+   * @return the zero-based byte offset of the read position.
+   * @throws IOException IO failure
+   */
+  @Override
+  public synchronized long getPos() throws IOException {
+    checkState();
+    return streamPosition;
+  }
+
+  /**
+   * Sets the read position of the stream.
+   * @param pos a zero-based byte offset in the stream.
+   * @throws EOFException if read is out of range
+   */
+  @Override
+  public synchronized void seek(long pos) throws IOException {
+    checkState();
+    if (pos < 0) {
+      throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK + " " + pos);
+    }
+    if (pos > streamLength) {
+      throw new EOFException(
+          FSExceptionMessages.CANNOT_SEEK_PAST_EOF + " " + pos);
+    }
+    if (pos == getPos()) {
+      // no=op, no state change
+      return;
+    }
+
+    if (streamBuffer != null) {
+      long offset = streamPosition - pos;
+      if (offset > 0 && offset < streamBufferLength) {
+        streamBufferPosition = streamBufferLength - (int) offset;
+      } else {
+        streamBufferPosition = streamBufferLength;
+      }
+    }
+
+    streamPosition = pos;
+    // close BlobInputStream after seek is invoked because BlobInputStream
+    // does not support seek
+    closeBlobInputStream();
+  }
+
+  /**
+   * Seeks an secondary copy of the data.  This method is not supported.
+   * @param targetPos a zero-based byte offset in the stream.
+   * @return false
+   * @throws IOException IO failure
+   */
+  @Override
+  public boolean seekToNewSource(long targetPos) throws IOException {
+    return false;
+  }
+
+  /**
+   * Gets the number of bytes that can be read (or skipped over) without
+   * performing a network operation.
+   * @throws IOException IO failure
+   */
+  @Override
+  public synchronized int available() throws IOException {
+    checkState();
+    if (blobInputStream != null) {
+      return blobInputStream.available();
+    } else {
+      return (streamBuffer == null)
+          ? 0
+          : streamBufferLength - streamBufferPosition;
+    }
+  }
+
+  private void closeBlobInputStream() throws IOException {
+    if (blobInputStream != null) {
+      try {
+        blobInputStream.close();
+      } finally {
+        blobInputStream = null;
+      }
+    }
+  }
+
+  /**
+   * Closes this stream and releases any system resources associated with it.
+   * @throws IOException IO failure
+   */
+  @Override
+  public synchronized void close() throws IOException {
+    closed = true;
+    closeBlobInputStream();
+    streamBuffer = null;
+    streamBufferPosition = 0;
+    streamBufferLength = 0;
+  }
+
+  private int doNetworkRead(byte[] buffer, int offset, int len)
+      throws IOException {
+    MemoryOutputStream outputStream;
+    boolean needToCopy = false;
+
+    if (streamPositionAfterLastRead == streamPosition) {
+      // caller is reading sequentially, so initialize the stream buffer
+      if (streamBuffer == null) {
+        streamBuffer = new byte[(int) Math.min(minimumReadSizeInBytes,
+            streamLength)];
+      }
+      streamBufferPosition = 0;
+      streamBufferLength = 0;
+      outputStream = new MemoryOutputStream(streamBuffer, streamBufferPosition,
+          streamBuffer.length);
+      needToCopy = true;
+    } else {
+      outputStream = new MemoryOutputStream(buffer, offset, len);
+    }
+
+    long bytesToRead = Math.min(
+        minimumReadSizeInBytes,
+        Math.min(
+            outputStream.capacity(),
+            streamLength - streamPosition));
+
+    try {
+      blob.downloadRange(streamPosition, bytesToRead, outputStream, options,
+          opContext);
+    } catch (StorageException e) {
+      throw new IOException(e);
+    }
+
+    int bytesRead = outputStream.size();
+    if (bytesRead > 0) {
+      streamPosition += bytesRead;
+      streamPositionAfterLastRead = streamPosition;
+      int count = Math.min(bytesRead, len);
+      if (needToCopy) {
+        streamBufferLength = bytesRead;
+        System.arraycopy(streamBuffer, streamBufferPosition, buffer, offset,
+            count);
+        streamBufferPosition += count;
+      }
+      return count;
+    } else {
+      // This may happen if the blob was modified after the length was obtained.
+      throw new EOFException("End of stream reached unexpectedly.");
+    }
+  }
+
+  /**
+   * Reads up to <code>len</code> bytes of data from the input stream into an
+   * array of bytes.
+   * @param b a buffer into which the data is written.
+   * @param offset a start offset into {@code buffer} where the data is written.
+   * @param len the maximum number of bytes to be read.
+   * @return the number of bytes written into {@code buffer}, or -1.
+   * @throws IOException IO failure
+   */
+  @Override
+  public synchronized int read(byte[] b, int offset, int len)
+      throws IOException {
+    checkState();
+    NativeAzureFileSystemHelper.validateReadArgs(b, offset, len);
+    if (blobInputStream != null) {
+      int numberOfBytesRead = blobInputStream.read(b, offset, len);
+      streamPosition += numberOfBytesRead;
+      return numberOfBytesRead;
+    } else {
+      if (offset < 0 || len < 0 || len > b.length - offset) {
+        throw new IndexOutOfBoundsException("read arguments out of range");
+      }
+      if (len == 0) {
+        return 0;
+      }
+
+      int bytesRead = 0;
+      int available = available();
+      if (available > 0) {
+        bytesRead = Math.min(available, len);
+        System.arraycopy(streamBuffer, streamBufferPosition, b, offset,
+            bytesRead);
+        streamBufferPosition += bytesRead;
+      }
+
+      if (len == bytesRead) {
+        return len;
+      }
+      if (streamPosition >= streamLength) {
+        return (bytesRead > 0) ? bytesRead : -1;
+      }
+
+      offset += bytesRead;
+      len -= bytesRead;
+
+      return bytesRead + doNetworkRead(b, offset, len);
+    }
+  }
+
+  /**
+   * Reads the next byte of data from the stream.
+   * @return the next byte of data, or -1
+   * @throws IOException IO failure
+   */
+  @Override
+  public int read() throws IOException {
+    byte[] buffer = new byte[1];
+    int numberOfBytesRead = read(buffer, 0, 1);
+    return (numberOfBytesRead < 1) ? -1 : buffer[0];
+  }
+
+  /**
+   * Skips over and discards n bytes of data from this input stream.
+   * @param n the number of bytes to be skipped.
+   * @return the actual number of bytes skipped.
+   * @throws IOException IO failure
+   */
+  @Override
+  public synchronized long skip(long n) throws IOException {
+    checkState();
+
+    if (blobInputStream != null) {
+      return blobInputStream.skip(n);
+    } else {
+      if (n < 0 || streamPosition + n > streamLength) {
+        throw new IndexOutOfBoundsException("skip range");
+      }
+
+      if (streamBuffer != null) {
+        streamBufferPosition = (n < streamBufferLength - streamBufferPosition)
+            ? streamBufferPosition + (int) n
+            : streamBufferLength;
+      }
+
+      streamPosition += n;
+      return n;
+    }
+  }
+
+  /**
+   * An <code>OutputStream</code> backed by a user-supplied buffer.
+   */
+  static class MemoryOutputStream extends OutputStream {
+    private final byte[] buffer;
+    private final int offset;
+    private final int length;
+    private int writePosition;
+
+    /**
+     * Creates a <code>MemoryOutputStream</code> from a user-supplied buffer.
+     * @param buffer an array of bytes.
+     * @param offset a starting offset in <code>buffer</code> where the data
+     * will be written.
+     * @param length the maximum number of bytes to be written to the stream.
+     */
+    MemoryOutputStream(byte[] buffer, int offset, int length) {
+      if (buffer == null) {
+        throw new NullPointerException("buffer");
+      }
+      if (offset < 0 || length < 0 || length > buffer.length - offset) {
+        throw new IndexOutOfBoundsException("offset out of range of buffer");
+      }
+      this.buffer = buffer;
+      this.offset = offset;
+      this.length = length;
+      this.writePosition = offset;
+    }
+
+    /**
+     * Gets the current size of the stream.
+     */
+    public synchronized int size() {
+      return writePosition - offset;
+    }
+
+    /**
+     * Gets the current capacity of the stream.
+     */
+    public synchronized int capacity() {
+      return length - offset;
+    }
+
+    /**
+     * Writes the next byte to the stream.
+     * @param b the byte to be written.
+     * @throws IOException IO failure
+     */
+    public synchronized void write(int b) throws IOException {
+      if (size() > length - 1) {
+        throw new IOException("No space for more writes");
+      }
+      buffer[writePosition++] = (byte) b;
+    }
+
+    /**
+     * Writes a range of bytes to the stream.
+     * @param b a byte array.
+     * @param off the start offset in <code>buffer</code> from which the data
+     * is read.
+     * @param length the number of bytes to be written.
+     * @throws IOException IO failure
+     */
+    public synchronized void write(byte[] b, int off, int length)
+        throws IOException {
+      if (b == null) {
+        throw new NullPointerException("Null buffer argument");
+      }
+      if (off < 0 || length < 0 || length > b.length - off) {
+        throw new IndexOutOfBoundsException("array write offset");
+      }
+      System.arraycopy(b, off, buffer, writePosition, length);
+      writePosition += length;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index f999992..a45ea81 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.fs.azure;
 
-import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
 import java.io.FileNotFoundException;
@@ -60,6 +59,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Seekable;
 import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
 import org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
 import org.apache.hadoop.fs.azure.security.Constants;
@@ -743,7 +743,7 @@ public class NativeAzureFileSystem extends FileSystem {
     // File length, valid only for streams over block blobs.
     private long fileLength;
 
-    public NativeAzureFsInputStream(DataInputStream in, String key, long fileLength) {
+    NativeAzureFsInputStream(InputStream in, String key, long fileLength) {
       this.in = in;
       this.key = key;
       this.isPageBlob = store.isPageBlobKey(key);
@@ -817,27 +817,6 @@ public class NativeAzureFileSystem extends FileSystem {
       }
     }
 
-    @Override
-    public synchronized  void readFully(long position, byte[] buffer, int offset, int length)
-        throws IOException {
-      validatePositionedReadArgs(position, buffer, offset, length);
-
-      int nread = 0;
-      while (nread < length) {
-        // In case BlobInputStream is used, mark() can act as a hint to read ahead only this
-        // length instead of 4 MB boundary.
-        in.mark(length - nread);
-        int nbytes = read(position + nread,
-            buffer,
-            offset + nread,
-            length - nread);
-        if (nbytes < 0) {
-          throw new EOFException(FSExceptionMessages.EOF_IN_READ_FULLY);
-        }
-        nread += nbytes;
-      }
-    }
-
     /*
      * Reads up to len bytes of data from the input stream into an array of
      * bytes. An attempt is made to read as many as len bytes, but a smaller
@@ -909,9 +888,14 @@ public class NativeAzureFileSystem extends FileSystem {
           throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
         }
         if (this.pos > pos) {
-          IOUtils.closeStream(in);
-          in = store.retrieve(key);
-          this.pos = in.skip(pos);
+          if (in instanceof Seekable) {
+            ((Seekable) in).seek(pos);
+            this.pos = pos;
+          } else {
+            IOUtils.closeStream(in);
+            in = store.retrieve(key);
+            this.pos = in.skip(pos);
+          }
         } else {
           this.pos += in.skip(pos - this.pos);
         }
@@ -2538,7 +2522,7 @@ public class NativeAzureFileSystem extends FileSystem {
           + " is a directory not a file.");
     }
 
-    DataInputStream inputStream = null;
+    InputStream inputStream;
     try {
       inputStream = store.retrieve(key);
     } catch(Exception ex) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
index 40efdc6..57af1f8 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
@@ -18,9 +18,11 @@
 
 package org.apache.hadoop.fs.azure;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.util.Map;
 
+import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -29,6 +31,8 @@ import com.microsoft.azure.storage.StorageErrorCodeStrings;
 import com.microsoft.azure.storage.StorageException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FSExceptionMessages;
+
 /**
  * Utility class that has helper methods.
  *
@@ -104,4 +108,28 @@ final class NativeAzureFileSystemHelper {
       }
     }
   }
+
+  /**
+   * Validation code, based on
+   * {@code FSInputStream.validatePositionedReadArgs()}.
+   * @param buffer destination buffer
+   * @param offset offset within the buffer
+   * @param length length of bytes to read
+   * @throws EOFException if the position is negative
+   * @throws IndexOutOfBoundsException if there isn't space for the amount of
+   * data requested.
+   * @throws IllegalArgumentException other arguments are invalid.
+   */
+  static void validateReadArgs(byte[] buffer, int offset, int length)
+      throws EOFException {
+    Preconditions.checkArgument(length >= 0, "length is negative");
+    Preconditions.checkArgument(buffer != null, "Null buffer");
+    if (buffer.length - offset < length) {
+      throw new IndexOutOfBoundsException(
+          FSExceptionMessages.TOO_MANY_BYTES_FOR_DEST_BUFFER
+              + ": request length=" + length
+              + ", with offset =" + offset
+              + "; buffer capacity =" + (buffer.length - offset));
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
index 611fe1a..1c7309f 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
@@ -18,9 +18,9 @@
 
 package org.apache.hadoop.fs.azure;
 
-import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.net.URI;
 import java.util.Date;
 
@@ -46,9 +46,9 @@ interface NativeFileSystemStore {
 
   FileMetadata retrieveMetadata(String key) throws IOException;
 
-  DataInputStream retrieve(String key) throws IOException;
+  InputStream retrieve(String key) throws IOException;
 
-  DataInputStream retrieve(String key, long byteRangeStart) throws IOException;
+  InputStream retrieve(String key, long byteRangeStart) throws IOException;
 
   DataOutputStream storefile(String key, PermissionStatus permissionStatus)
       throws AzureException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
index 810aacf..3d33453 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
@@ -466,6 +466,11 @@ public class SecureStorageInterfaceImpl extends StorageInterface {
     }
 
     @Override
+    public int getStreamMinimumReadSizeInBytes() {
+        return getBlob().getStreamMinimumReadSizeInBytes();
+    }
+
+    @Override
     public void setStreamMinimumReadSizeInBytes(int minimumReadSizeBytes) {
       getBlob().setStreamMinimumReadSizeInBytes(minimumReadSizeBytes);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
index aef9fc3..8b6b082 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
@@ -582,10 +582,17 @@ abstract class StorageInterface {
         throws StorageException;
 
     SelfRenewingLease acquireLease() throws StorageException;
-    
+
+    /**
+     * Gets the minimum read block size to use with this Blob.
+     *
+     * @return The minimum block size, in bytes, for reading from a block blob.
+     */
+    int getStreamMinimumReadSizeInBytes();
+
     /**
      * Sets the minimum read block size to use with this Blob.
-     * 
+     *
      * @param minimumReadSizeBytes
      *          The maximum block size, in bytes, for reading from a block blob
      *          while using a {@link BlobInputStream} object, ranging from 512

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
index 491a0d0..d3d0370 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
@@ -399,6 +399,11 @@ class StorageInterfaceImpl extends StorageInterface {
     }
 
     @Override
+    public int getStreamMinimumReadSizeInBytes() {
+        return getBlob().getStreamMinimumReadSizeInBytes();
+    }
+
+    @Override
     public void setStreamMinimumReadSizeInBytes(int minimumReadSizeBytes) {
       getBlob().setStreamMinimumReadSizeInBytes(minimumReadSizeBytes);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
index 2cdc2e7..7fa59ce 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
@@ -82,13 +82,22 @@ public final class AzureBlobStorageTestAccount {
   private static final ConcurrentLinkedQueue<MetricsRecord> allMetrics =
       new ConcurrentLinkedQueue<MetricsRecord>();
   private static boolean metricsConfigSaved = false;
+  private boolean skipContainerDelete = false;
 
   private AzureBlobStorageTestAccount(NativeAzureFileSystem fs,
       CloudStorageAccount account,
       CloudBlobContainer container) {
+    this(fs, account, container, false);
+  }
+
+  private AzureBlobStorageTestAccount(NativeAzureFileSystem fs,
+      CloudStorageAccount account,
+      CloudBlobContainer container,
+      boolean skipContainerDelete) {
     this.account = account;
     this.container = container;
     this.fs = fs;
+    this.skipContainerDelete = skipContainerDelete;
   }
 
   /**
@@ -524,8 +533,19 @@ public final class AzureBlobStorageTestAccount {
     return create(containerNameSuffix, createOptions, null);
   }
 
-  public static AzureBlobStorageTestAccount create(String containerNameSuffix,
-      EnumSet<CreateOptions> createOptions, Configuration initialConfiguration)
+  public static AzureBlobStorageTestAccount create(
+      String containerNameSuffix,
+      EnumSet<CreateOptions> createOptions,
+      Configuration initialConfiguration)
+      throws Exception {
+    return create(containerNameSuffix, createOptions, initialConfiguration, false);
+  }
+
+  public static AzureBlobStorageTestAccount create(
+      String containerNameSuffix,
+      EnumSet<CreateOptions> createOptions,
+      Configuration initialConfiguration,
+      boolean useContainerSuffixAsContainerName)
       throws Exception {
     saveMetricsConfigFile();
     NativeAzureFileSystem fs = null;
@@ -538,12 +558,17 @@ public final class AzureBlobStorageTestAccount {
       return null;
     }
     fs = new NativeAzureFileSystem();
-    String containerName = String.format("wasbtests-%s-%tQ%s",
-        System.getProperty("user.name"), new Date(), containerNameSuffix);
+    String containerName = useContainerSuffixAsContainerName
+        ? containerNameSuffix
+        : String.format(
+            "wasbtests-%s-%tQ%s",
+            System.getProperty("user.name"),
+            new Date(),
+            containerNameSuffix);
     container = account.createCloudBlobClient().getContainerReference(
         containerName);
     if (createOptions.contains(CreateOptions.CreateContainer)) {
-      container.create();
+      container.createIfNotExists();
     }
     String accountName = conf.get(TEST_ACCOUNT_NAME_PROPERTY_NAME);
     if (createOptions.contains(CreateOptions.UseSas)) {
@@ -578,7 +603,8 @@ public final class AzureBlobStorageTestAccount {
     // Create test account initializing the appropriate member variables.
     //
     AzureBlobStorageTestAccount testAcct =
-        new AzureBlobStorageTestAccount(fs, account, container);
+        new AzureBlobStorageTestAccount(fs, account, container,
+            useContainerSuffixAsContainerName);
 
     return testAcct;
   }
@@ -824,7 +850,7 @@ public final class AzureBlobStorageTestAccount {
       fs.close();
       fs = null;
     }
-    if (container != null) {
+    if (!skipContainerDelete && container != null) {
       container.deleteIfExists();
       container = null;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
index 4fda017..4f26d9f 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.lang.reflect.Method;
+import java.net.HttpURLConnection;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
@@ -474,12 +475,30 @@ public class MockStorageInterface extends StorageInterface {
     public void downloadRange(long offset, long length, OutputStream os,
         BlobRequestOptions options, OperationContext opContext)
         throws StorageException {
-      throw new NotImplementedException();
+      if (offset < 0 || length <= 0) {
+        throw new IndexOutOfBoundsException();
+      }
+      if (!backingStore.exists(convertUriToDecodedString(uri))) {
+        throw new StorageException("BlobNotFound",
+            "Resource does not exist.",
+            HttpURLConnection.HTTP_NOT_FOUND,
+            null,
+            null);
+      }
+      byte[] content = backingStore.getContent(convertUriToDecodedString(uri));
+      try {
+        os.write(content, (int) offset, (int) length);
+      } catch (IOException e) {
+        throw new StorageException("Unknown error", "Unexpected error", e);
+      }
     }
   }
 
   class MockCloudBlockBlobWrapper extends MockCloudBlobWrapper
     implements CloudBlockBlobWrapper {
+
+    int minimumReadSize = AzureNativeFileSystemStore.DEFAULT_DOWNLOAD_BLOCK_SIZE;
+
     public MockCloudBlockBlobWrapper(URI uri, HashMap<String, String> metadata,
         int length) {
       super(uri, metadata, length);
@@ -493,7 +512,13 @@ public class MockStorageInterface extends StorageInterface {
     }
 
     @Override
+    public int getStreamMinimumReadSizeInBytes() {
+      return this.minimumReadSize;
+    }
+
+    @Override
     public void setStreamMinimumReadSizeInBytes(int minimumReadSizeBytes) {
+        this.minimumReadSize = minimumReadSizeBytes;
     }
 
     @Override
@@ -546,6 +571,9 @@ public class MockStorageInterface extends StorageInterface {
 
   class MockCloudPageBlobWrapper extends MockCloudBlobWrapper
     implements CloudPageBlobWrapper {
+
+    int minimumReadSize = AzureNativeFileSystemStore.DEFAULT_DOWNLOAD_BLOCK_SIZE;
+
     public MockCloudPageBlobWrapper(URI uri, HashMap<String, String> metadata,
         int length) {
       super(uri, metadata, length);
@@ -571,7 +599,13 @@ public class MockStorageInterface extends StorageInterface {
     }
 
     @Override
+    public int getStreamMinimumReadSizeInBytes() {
+      return this.minimumReadSize;
+    }
+
+    @Override
     public void setStreamMinimumReadSizeInBytes(int minimumReadSize) {
+        this.minimumReadSize = minimumReadSize;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java
index a311a29..7ea7534 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java
@@ -155,7 +155,7 @@ public class TestAzureConcurrentOutOfBandIo {
         "WASB_String.txt");
    writeBlockTask.startWriting();
    int count = 0;
-   DataInputStream inputStream = null;
+   InputStream inputStream = null;
 
    for (int i = 0; i < 5; i++) {
      try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d670c3a4/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
new file mode 100644
index 0000000..2db063b
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
@@ -0,0 +1,756 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.EOFException;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Random;
+import java.util.concurrent.Callable;
+
+import org.junit.FixMethodOrder;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.junit.runners.MethodSorters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSExceptionMessages;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
+
+import static org.junit.Assert.*;
+import static org.junit.Assume.*;
+
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+
+/**
+ * Test semantics and performance of the original block blob input stream
+ * (KEY_INPUT_STREAM_VERSION=1) and the new
+ * <code>BlockBlobInputStream</code> (KEY_INPUT_STREAM_VERSION=2).
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+
+public class TestBlockBlobInputStream extends AbstractWasbTestBase {
+  private static final Logger LOG = LoggerFactory.getLogger(
+      TestBlockBlobInputStream.class);
+  private static final int KILOBYTE = 1024;
+  private static final int MEGABYTE = KILOBYTE * KILOBYTE;
+  private static final int TEST_FILE_SIZE = 6 * MEGABYTE;
+  private static final Path TEST_FILE_PATH = new Path(
+      "TestBlockBlobInputStream.txt");
+
+  private AzureBlobStorageTestAccount accountUsingInputStreamV1;
+  private AzureBlobStorageTestAccount accountUsingInputStreamV2;
+  private long testFileLength;
+
+  /**
+   * Long test timeout.
+   */
+  @Rule
+  public Timeout testTimeout = new Timeout(10 * 60 * 1000);
+  private FileStatus testFileStatus;
+  private Path hugefile;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    Configuration conf = new Configuration();
+    conf.setInt(AzureNativeFileSystemStore.KEY_INPUT_STREAM_VERSION, 1);
+
+    accountUsingInputStreamV1 = AzureBlobStorageTestAccount.create(
+        "testblockblobinputstream",
+        EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
+        conf,
+        true);
+
+    accountUsingInputStreamV2 = AzureBlobStorageTestAccount.create(
+        "testblockblobinputstream",
+        EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
+        null,
+        true);
+
+    assumeNotNull(accountUsingInputStreamV1);
+    assumeNotNull(accountUsingInputStreamV2);
+    hugefile = fs.makeQualified(TEST_FILE_PATH);
+    try {
+      testFileStatus = fs.getFileStatus(TEST_FILE_PATH);
+      testFileLength = testFileStatus.getLen();
+    } catch (FileNotFoundException e) {
+      // file doesn't exist
+      testFileLength = 0;
+    }
+  }
+
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setInt(AzureNativeFileSystemStore.KEY_INPUT_STREAM_VERSION, 1);
+
+    accountUsingInputStreamV1 = AzureBlobStorageTestAccount.create(
+        "testblockblobinputstream",
+        EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
+        conf,
+        true);
+
+    accountUsingInputStreamV2 = AzureBlobStorageTestAccount.create(
+        "testblockblobinputstream",
+        EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
+        null,
+        true);
+
+    assumeNotNull(accountUsingInputStreamV1);
+    assumeNotNull(accountUsingInputStreamV2);
+    return accountUsingInputStreamV1;
+  }
+
+  /**
+   * Create a test file by repeating the characters in the alphabet.
+   * @throws IOException
+   */
+  private void createTestFileAndSetLength() throws IOException {
+    FileSystem fs = accountUsingInputStreamV1.getFileSystem();
+
+    // To reduce test run time, the test file can be reused.
+    if (fs.exists(TEST_FILE_PATH)) {
+      testFileStatus = fs.getFileStatus(TEST_FILE_PATH);
+      testFileLength = testFileStatus.getLen();
+      LOG.info("Reusing test file: {}", testFileStatus);
+      return;
+    }
+
+    int sizeOfAlphabet = ('z' - 'a' + 1);
+    byte[] buffer = new byte[26 * KILOBYTE];
+    char character = 'a';
+    for (int i = 0; i < buffer.length; i++) {
+      buffer[i] = (byte) character;
+      character = (character == 'z') ? 'a' : (char) ((int) character + 1);
+    }
+
+    LOG.info("Creating test file {} of size: {}", TEST_FILE_PATH,
+        TEST_FILE_SIZE );
+    ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+
+    try(FSDataOutputStream outputStream = fs.create(TEST_FILE_PATH)) {
+      int bytesWritten = 0;
+      while (bytesWritten < TEST_FILE_SIZE) {
+        outputStream.write(buffer);
+        bytesWritten += buffer.length;
+      }
+      LOG.info("Closing stream {}", outputStream);
+      ContractTestUtils.NanoTimer closeTimer
+          = new ContractTestUtils.NanoTimer();
+      outputStream.close();
+      closeTimer.end("time to close() output stream");
+    }
+    timer.end("time to write %d KB", TEST_FILE_SIZE / 1024);
+    testFileLength = fs.getFileStatus(TEST_FILE_PATH).getLen();
+  }
+
+  void assumeHugeFileExists() throws IOException {
+    ContractTestUtils.assertPathExists(fs, "huge file not created", hugefile);
+    FileStatus status = fs.getFileStatus(hugefile);
+    ContractTestUtils.assertIsFile(hugefile, status);
+    assertTrue("File " + hugefile + " is empty", status.getLen() > 0);
+  }
+
+  /**
+   * Calculate megabits per second from the specified values for bytes and
+   * milliseconds.
+   * @param bytes The number of bytes.
+   * @param milliseconds The number of milliseconds.
+   * @return The number of megabits per second.
+   */
+  private static double toMbps(long bytes, long milliseconds) {
+    return bytes / 1000.0 * 8 / milliseconds;
+  }
+
+  @Test
+  public void test_0100_CreateHugeFile() throws IOException {
+    createTestFileAndSetLength();
+  }
+
+  /**
+   * Validates the implementation of InputStream.markSupported.
+   * @throws IOException
+   */
+  @Test
+  public void test_0301_MarkSupportedV1() throws IOException {
+    validateMarkSupported(accountUsingInputStreamV1.getFileSystem());
+  }
+
+  /**
+   * Validates the implementation of InputStream.markSupported.
+   * @throws IOException
+   */
+  @Test
+  public void test_0302_MarkSupportedV2() throws IOException {
+    validateMarkSupported(accountUsingInputStreamV1.getFileSystem());
+  }
+
+  private void validateMarkSupported(FileSystem fs) throws IOException {
+    assumeHugeFileExists();
+    try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+      assertTrue("mark is not supported", inputStream.markSupported());
+    }
+  }
+
+  /**
+   * Validates the implementation of InputStream.mark and reset
+   * for version 1 of the block blob input stream.
+   * @throws Exception
+   */
+  @Test
+  public void test_0303_MarkAndResetV1() throws Exception {
+    validateMarkAndReset(accountUsingInputStreamV1.getFileSystem());
+  }
+
+  /**
+   * Validates the implementation of InputStream.mark and reset
+   * for version 2 of the block blob input stream.
+   * @throws Exception
+   */
+  @Test
+  public void test_0304_MarkAndResetV2() throws Exception {
+    validateMarkAndReset(accountUsingInputStreamV2.getFileSystem());
+  }
+
+  private void validateMarkAndReset(FileSystem fs) throws Exception {
+    assumeHugeFileExists();
+    try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+      inputStream.mark(KILOBYTE - 1);
+
+      byte[] buffer = new byte[KILOBYTE];
+      int bytesRead = inputStream.read(buffer);
+      assertEquals(buffer.length, bytesRead);
+
+      inputStream.reset();
+      assertEquals("rest -> pos 0", 0, inputStream.getPos());
+
+      inputStream.mark(8 * KILOBYTE - 1);
+
+      buffer = new byte[8 * KILOBYTE];
+      bytesRead = inputStream.read(buffer);
+      assertEquals(buffer.length, bytesRead);
+
+      intercept(IOException.class,
+          "Resetting to invalid mark",
+          new Callable<FSDataInputStream>() {
+            @Override
+            public FSDataInputStream call() throws Exception {
+              inputStream.reset();
+              return inputStream;
+            }
+          }
+      );
+    }
+  }
+
+  /**
+   * Validates the implementation of Seekable.seekToNewSource, which should
+   * return false for version 1 of the block blob input stream.
+   * @throws IOException
+   */
+  @Test
+  public void test_0305_SeekToNewSourceV1() throws IOException {
+    validateSeekToNewSource(accountUsingInputStreamV1.getFileSystem());
+  }
+
+  /**
+   * Validates the implementation of Seekable.seekToNewSource, which should
+   * return false for version 2 of the block blob input stream.
+   * @throws IOException
+   */
+  @Test
+  public void test_0306_SeekToNewSourceV2() throws IOException {
+    validateSeekToNewSource(accountUsingInputStreamV2.getFileSystem());
+  }
+
+  private void validateSeekToNewSource(FileSystem fs) throws IOException {
+    assumeHugeFileExists();
+    try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+      assertFalse(inputStream.seekToNewSource(0));
+    }
+  }
+
+  /**
+   * Validates the implementation of InputStream.skip and ensures there is no
+   * network I/O for version 1 of the block blob input stream.
+   * @throws Exception
+   */
+  @Test
+  public void test_0307_SkipBoundsV1() throws Exception {
+    validateSkipBounds(accountUsingInputStreamV1.getFileSystem());
+  }
+
+  /**
+   * Validates the implementation of InputStream.skip and ensures there is no
+   * network I/O for version 2 of the block blob input stream.
+   * @throws Exception
+   */
+  @Test
+  public void test_0308_SkipBoundsV2() throws Exception {
+    validateSkipBounds(accountUsingInputStreamV2.getFileSystem());
+  }
+
+  private void validateSkipBounds(FileSystem fs) throws Exception {
+    assumeHugeFileExists();
+    try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+      NanoTimer timer = new NanoTimer();
+
+      long skipped = inputStream.skip(-1);
+      assertEquals(0, skipped);
+
+      skipped = inputStream.skip(0);
+      assertEquals(0, skipped);
+
+      assertTrue(testFileLength > 0);
+
+      skipped = inputStream.skip(testFileLength);
+      assertEquals(testFileLength, skipped);
+
+      intercept(EOFException.class,
+          new Callable<Long>() {
+            @Override
+            public Long call() throws Exception {
+              return inputStream.skip(1);
+            }
+          }
+      );
+      long elapsedTimeMs = timer.elapsedTimeMs();
+      assertTrue(
+          String.format(
+              "There should not be any network I/O (elapsedTimeMs=%1$d).",
+              elapsedTimeMs),
+          elapsedTimeMs < 20);
+    }
+  }
+
+  /**
+   * Validates the implementation of Seekable.seek and ensures there is no
+   * network I/O for forward seek.
+   * @throws Exception
+   */
+  @Test
+  public void test_0309_SeekBoundsV1() throws Exception {
+    validateSeekBounds(accountUsingInputStreamV1.getFileSystem());
+  }
+
+  /**
+   * Validates the implementation of Seekable.seek and ensures there is no
+   * network I/O for forward seek.
+   * @throws Exception
+   */
+  @Test
+  public void test_0310_SeekBoundsV2() throws Exception {
+    validateSeekBounds(accountUsingInputStreamV2.getFileSystem());
+  }
+
+  private void validateSeekBounds(FileSystem fs) throws Exception {
+    assumeHugeFileExists();
+    try (
+        FSDataInputStream inputStream = fs.open(TEST_FILE_PATH);
+    ) {
+      NanoTimer timer = new NanoTimer();
+
+      inputStream.seek(0);
+      assertEquals(0, inputStream.getPos());
+
+      intercept(EOFException.class,
+          FSExceptionMessages.NEGATIVE_SEEK,
+          new Callable<FSDataInputStream>() {
+            @Override
+            public FSDataInputStream call() throws Exception {
+              inputStream.seek(-1);
+              return inputStream;
+            }
+          }
+      );
+
+      assertTrue("Test file length only " + testFileLength, testFileLength > 0);
+      inputStream.seek(testFileLength);
+      assertEquals(testFileLength, inputStream.getPos());
+
+      intercept(EOFException.class,
+          FSExceptionMessages.CANNOT_SEEK_PAST_EOF,
+          new Callable<FSDataInputStream>() {
+            @Override
+            public FSDataInputStream call() throws Exception {
+              inputStream.seek(testFileLength + 1);
+              return inputStream;
+            }
+          }
+      );
+
+      long elapsedTimeMs = timer.elapsedTimeMs();
+      assertTrue(
+          String.format(
+              "There should not be any network I/O (elapsedTimeMs=%1$d).",
+              elapsedTimeMs),
+          elapsedTimeMs < 20);
+    }
+  }
+
+  /**
+   * Validates the implementation of Seekable.seek, Seekable.getPos,
+   * and InputStream.available.
+   * @throws Exception
+   */
+  @Test
+  public void test_0311_SeekAndAvailableAndPositionV1() throws Exception {
+    validateSeekAndAvailableAndPosition(
+        accountUsingInputStreamV1.getFileSystem());
+  }
+
+  /**
+   * Validates the implementation of Seekable.seek, Seekable.getPos,
+   * and InputStream.available.
+   * @throws Exception
+   */
+  @Test
+  public void test_0312_SeekAndAvailableAndPositionV2() throws Exception {
+    validateSeekAndAvailableAndPosition(
+        accountUsingInputStreamV2.getFileSystem());
+  }
+
+  private void validateSeekAndAvailableAndPosition(FileSystem fs)
+      throws Exception {
+    assumeHugeFileExists();
+    try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+      byte[] expected1 = {(byte) 'a', (byte) 'b', (byte) 'c'};
+      byte[] expected2 = {(byte) 'd', (byte) 'e', (byte) 'f'};
+      byte[] expected3 = {(byte) 'b', (byte) 'c', (byte) 'd'};
+      byte[] expected4 = {(byte) 'g', (byte) 'h', (byte) 'i'};
+      byte[] buffer = new byte[3];
+
+      int bytesRead = inputStream.read(buffer);
+      assertEquals(buffer.length, bytesRead);
+      assertArrayEquals(expected1, buffer);
+      assertEquals(buffer.length, inputStream.getPos());
+      assertEquals(testFileLength - inputStream.getPos(),
+          inputStream.available());
+
+      bytesRead = inputStream.read(buffer);
+      assertEquals(buffer.length, bytesRead);
+      assertArrayEquals(expected2, buffer);
+      assertEquals(2 * buffer.length, inputStream.getPos());
+      assertEquals(testFileLength - inputStream.getPos(),
+          inputStream.available());
+
+      // reverse seek
+      int seekPos = 0;
+      inputStream.seek(seekPos);
+
+      bytesRead = inputStream.read(buffer);
+      assertEquals(buffer.length, bytesRead);
+      assertArrayEquals(expected1, buffer);
+      assertEquals(buffer.length + seekPos, inputStream.getPos());
+      assertEquals(testFileLength - inputStream.getPos(),
+          inputStream.available());
+
+      // reverse seek
+      seekPos = 1;
+      inputStream.seek(seekPos);
+
+      bytesRead = inputStream.read(buffer);
+      assertEquals(buffer.length, bytesRead);
+      assertArrayEquals(expected3, buffer);
+      assertEquals(buffer.length + seekPos, inputStream.getPos());
+      assertEquals(testFileLength - inputStream.getPos(),
+          inputStream.available());
+
+      // forward seek
+      seekPos = 6;
+      inputStream.seek(seekPos);
+
+      bytesRead = inputStream.read(buffer);
+      assertEquals(buffer.length, bytesRead);
+      assertArrayEquals(expected4, buffer);
+      assertEquals(buffer.length + seekPos, inputStream.getPos());
+      assertEquals(testFileLength - inputStream.getPos(),
+          inputStream.available());
+    }
+  }
+
+  /**
+   * Validates the implementation of InputStream.skip, Seekable.getPos,
+   * and InputStream.available.
+   * @throws IOException
+   */
+  @Test
+  public void test_0313_SkipAndAvailableAndPositionV1() throws IOException {
+    validateSkipAndAvailableAndPosition(
+        accountUsingInputStreamV1.getFileSystem());
+  }
+
+  /**
+   * Validates the implementation of InputStream.skip, Seekable.getPos,
+   * and InputStream.available.
+   * @throws IOException
+   */
+  @Test
+  public void test_0314_SkipAndAvailableAndPositionV2() throws IOException {
+    validateSkipAndAvailableAndPosition(
+        accountUsingInputStreamV1.getFileSystem());
+  }
+
+  private void validateSkipAndAvailableAndPosition(FileSystem fs)
+      throws IOException {
+    assumeHugeFileExists();
+    try (
+        FSDataInputStream inputStream = fs.open(TEST_FILE_PATH);
+    ) {
+      byte[] expected1 = {(byte) 'a', (byte) 'b', (byte) 'c'};
+      byte[] expected2 = {(byte) 'd', (byte) 'e', (byte) 'f'};
+      byte[] expected3 = {(byte) 'b', (byte) 'c', (byte) 'd'};
+      byte[] expected4 = {(byte) 'g', (byte) 'h', (byte) 'i'};
+
+      assertEquals(testFileLength, inputStream.available());
+      assertEquals(0, inputStream.getPos());
+
+      int n = 3;
+      long skipped = inputStream.skip(n);
+
+      assertEquals(skipped, inputStream.getPos());
+      assertEquals(testFileLength - inputStream.getPos(),
+          inputStream.available());
+      assertEquals(skipped, n);
+
+      byte[] buffer = new byte[3];
+      int bytesRead = inputStream.read(buffer);
+      assertEquals(buffer.length, bytesRead);
+      assertArrayEquals(expected2, buffer);
+      assertEquals(buffer.length + skipped, inputStream.getPos());
+      assertEquals(testFileLength - inputStream.getPos(),
+          inputStream.available());
+
+      // does skip still work after seek?
+      int seekPos = 1;
+      inputStream.seek(seekPos);
+
+      bytesRead = inputStream.read(buffer);
+      assertEquals(buffer.length, bytesRead);
+      assertArrayEquals(expected3, buffer);
+      assertEquals(buffer.length + seekPos, inputStream.getPos());
+      assertEquals(testFileLength - inputStream.getPos(),
+          inputStream.available());
+
+      long currentPosition = inputStream.getPos();
+      n = 2;
+      skipped = inputStream.skip(n);
+
+      assertEquals(currentPosition + skipped, inputStream.getPos());
+      assertEquals(testFileLength - inputStream.getPos(),
+          inputStream.available());
+      assertEquals(skipped, n);
+
+      bytesRead = inputStream.read(buffer);
+      assertEquals(buffer.length, bytesRead);
+      assertArrayEquals(expected4, buffer);
+      assertEquals(buffer.length + skipped + currentPosition,
+          inputStream.getPos());
+      assertEquals(testFileLength - inputStream.getPos(),
+          inputStream.available());
+    }
+  }
+
+  /**
+   * Ensures parity in the performance of sequential read for
+   * version 1 and version 2 of the block blob input stream.
+   * @throws IOException
+   */
+  @Test
+  public void test_0315_SequentialReadPerformance() throws IOException {
+    assumeHugeFileExists();
+    final int maxAttempts = 10;
+    final double maxAcceptableRatio = 1.01;
+    double v1ElapsedMs = 0, v2ElapsedMs = 0;
+    double ratio = Double.MAX_VALUE;
+    for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
+      v1ElapsedMs = sequentialRead(1,
+          accountUsingInputStreamV1.getFileSystem(), false);
+      v2ElapsedMs = sequentialRead(2,
+          accountUsingInputStreamV2.getFileSystem(), false);
+      ratio = v2ElapsedMs / v1ElapsedMs;
+      LOG.info(String.format(
+          "v1ElapsedMs=%1$d, v2ElapsedMs=%2$d, ratio=%3$.2f",
+          (long) v1ElapsedMs,
+          (long) v2ElapsedMs,
+          ratio));
+    }
+    assertTrue(String.format(
+        "Performance of version 2 is not acceptable: v1ElapsedMs=%1$d,"
+            + " v2ElapsedMs=%2$d, ratio=%3$.2f",
+        (long) v1ElapsedMs,
+        (long) v2ElapsedMs,
+        ratio),
+        ratio < maxAcceptableRatio);
+  }
+
+  /**
+   * Ensures parity in the performance of sequential read after reverse seek for
+   * version 2 of the block blob input stream.
+   * @throws IOException
+   */
+  @Test
+  public void test_0316_SequentialReadAfterReverseSeekPerformanceV2()
+      throws IOException {
+    assumeHugeFileExists();
+    final int maxAttempts = 10;
+    final double maxAcceptableRatio = 1.01;
+    double beforeSeekElapsedMs = 0, afterSeekElapsedMs = 0;
+    double ratio = Double.MAX_VALUE;
+    for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
+      beforeSeekElapsedMs = sequentialRead(2,
+          accountUsingInputStreamV2.getFileSystem(), false);
+      afterSeekElapsedMs = sequentialRead(2,
+          accountUsingInputStreamV2.getFileSystem(), true);
+      ratio = afterSeekElapsedMs / beforeSeekElapsedMs;
+      LOG.info(String.format(
+          "beforeSeekElapsedMs=%1$d, afterSeekElapsedMs=%2$d, ratio=%3$.2f",
+          (long) beforeSeekElapsedMs,
+          (long) afterSeekElapsedMs,
+          ratio));
+    }
+    assertTrue(String.format(
+        "Performance of version 2 after reverse seek is not acceptable:"
+            + " beforeSeekElapsedMs=%1$d, afterSeekElapsedMs=%2$d,"
+            + " ratio=%3$.2f",
+        (long) beforeSeekElapsedMs,
+        (long) afterSeekElapsedMs,
+        ratio),
+        ratio < maxAcceptableRatio);
+  }
+
+  private long sequentialRead(int version,
+      FileSystem fs,
+      boolean afterReverseSeek) throws IOException {
+    byte[] buffer = new byte[16 * KILOBYTE];
+    long totalBytesRead = 0;
+    long bytesRead = 0;
+
+    try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+      if (afterReverseSeek) {
+        while (bytesRead > 0 && totalBytesRead < 4 * MEGABYTE) {
+          bytesRead = inputStream.read(buffer);
+          totalBytesRead += bytesRead;
+        }
+        totalBytesRead = 0;
+        inputStream.seek(0);
+      }
+
+      NanoTimer timer = new NanoTimer();
+      while ((bytesRead = inputStream.read(buffer)) > 0) {
+        totalBytesRead += bytesRead;
+      }
+      long elapsedTimeMs = timer.elapsedTimeMs();
+
+      LOG.info(String.format(
+          "v%1$d: bytesRead=%2$d, elapsedMs=%3$d, Mbps=%4$.2f,"
+              + " afterReverseSeek=%5$s",
+          version,
+          totalBytesRead,
+          elapsedTimeMs,
+          toMbps(totalBytesRead, elapsedTimeMs),
+          afterReverseSeek));
+
+      assertEquals(testFileLength, totalBytesRead);
+      inputStream.close();
+      return elapsedTimeMs;
+    }
+  }
+
+  @Test
+  public void test_0317_RandomReadPerformance() throws IOException {
+    assumeHugeFileExists();
+    final int maxAttempts = 10;
+    final double maxAcceptableRatio = 0.10;
+    double v1ElapsedMs = 0, v2ElapsedMs = 0;
+    double ratio = Double.MAX_VALUE;
+    for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
+      v1ElapsedMs = randomRead(1,
+          accountUsingInputStreamV1.getFileSystem());
+      v2ElapsedMs = randomRead(2,
+          accountUsingInputStreamV2.getFileSystem());
+      ratio = v2ElapsedMs / v1ElapsedMs;
+      LOG.info(String.format(
+          "v1ElapsedMs=%1$d, v2ElapsedMs=%2$d, ratio=%3$.2f",
+          (long) v1ElapsedMs,
+          (long) v2ElapsedMs,
+          ratio));
+    }
+    assertTrue(String.format(
+        "Performance of version 2 is not acceptable: v1ElapsedMs=%1$d,"
+            + " v2ElapsedMs=%2$d, ratio=%3$.2f",
+        (long) v1ElapsedMs,
+        (long) v2ElapsedMs,
+        ratio),
+        ratio < maxAcceptableRatio);
+  }
+
+  private long randomRead(int version, FileSystem fs) throws IOException {
+    assumeHugeFileExists();
+    final int minBytesToRead = 2 * MEGABYTE;
+    Random random = new Random();
+    byte[] buffer = new byte[8 * KILOBYTE];
+    long totalBytesRead = 0;
+    long bytesRead = 0;
+    try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+      NanoTimer timer = new NanoTimer();
+
+      do {
+        bytesRead = inputStream.read(buffer);
+        totalBytesRead += bytesRead;
+        inputStream.seek(random.nextInt(
+            (int) (testFileLength - buffer.length)));
+      } while (bytesRead > 0 && totalBytesRead < minBytesToRead);
+
+      long elapsedTimeMs = timer.elapsedTimeMs();
+
+      inputStream.close();
+
+      LOG.info(String.format(
+          "v%1$d: totalBytesRead=%2$d, elapsedTimeMs=%3$d, Mbps=%4$.2f",
+          version,
+          totalBytesRead,
+          elapsedTimeMs,
+          toMbps(totalBytesRead, elapsedTimeMs)));
+
+      assertTrue(minBytesToRead <= totalBytesRead);
+
+      return elapsedTimeMs;
+    }
+  }
+
+  @Test
+  public void test_999_DeleteHugeFiles() throws IOException {
+    ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+    fs.delete(TEST_FILE_PATH, false);
+    timer.end("time to delete %s", TEST_FILE_PATH);
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: HADOOP-13414. Hide Jetty Server version header in HTTP responses. Contributed by Surendra Singth Lilhore.

Posted by as...@apache.org.
HADOOP-13414. Hide Jetty Server version header in HTTP responses. Contributed by Surendra Singth Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a180ba40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a180ba40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a180ba40

Branch: refs/heads/YARN-5972
Commit: a180ba408128b2d916822e78deb979bbcd1894da
Parents: b17e655
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Jul 5 16:05:18 2017 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Wed Jul 5 16:05:18 2017 +0530

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/http/HttpServer2.java           | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a180ba40/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 0891e8e..d7436b2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -431,6 +431,7 @@ public final class HttpServer2 implements FilterContainer {
       HttpConfiguration httpConfig = new HttpConfiguration();
       httpConfig.setRequestHeaderSize(requestHeaderSize);
       httpConfig.setResponseHeaderSize(responseHeaderSize);
+      httpConfig.setSendServerVersion(false);
 
       for (URI ep : endpoints) {
         final ServerConnector connector;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang.

Posted by as...@apache.org.
HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a9dc5f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a9dc5f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a9dc5f4

Branch: refs/heads/YARN-5972
Commit: 6a9dc5f44b0c7945e3e9a56248cd4ff80d5c8f0f
Parents: a2f0cbd
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Jun 30 10:20:12 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Jun 30 10:20:12 2017 -0700

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    | 13 ++-
 .../PendingReconstructionBlocks.java            |  8 +-
 .../namenode/metrics/NameNodeMetrics.java       | 18 ++++
 .../TestPendingReconstruction.java              | 90 +++++++++++++++++++-
 4 files changed, 122 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a9dc5f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a0c4698..a5ee30b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1851,7 +1851,7 @@ public class BlockManager implements BlockStatsMXBean {
         (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block));
   }
 
-  private BlockReconstructionWork scheduleReconstruction(BlockInfo block,
+  BlockReconstructionWork scheduleReconstruction(BlockInfo block,
       int priority) {
     // skip abandoned block or block reopened for append
     if (block.isDeleted() || !block.isCompleteOrCommitted()) {
@@ -1873,6 +1873,7 @@ public class BlockManager implements BlockStatsMXBean {
     if(srcNodes == null || srcNodes.length == 0) {
       // block can not be reconstructed from any node
       LOG.debug("Block {} cannot be reconstructed from any node", block);
+      NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
       return null;
     }
 
@@ -1885,6 +1886,7 @@ public class BlockManager implements BlockStatsMXBean {
       neededReconstruction.remove(block, priority);
       blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
           " it has enough replicas", block);
+      NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
       return null;
     }
 
@@ -1900,6 +1902,7 @@ public class BlockManager implements BlockStatsMXBean {
     if (block.isStriped()) {
       if (pendingNum > 0) {
         // Wait the previous reconstruction to finish.
+        NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
         return null;
       }
 
@@ -3727,8 +3730,8 @@ public class BlockManager implements BlockStatsMXBean {
    * The given node is reporting that it received a certain block.
    */
   @VisibleForTesting
-  void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
-      throws IOException {
+  public void addBlock(DatanodeStorageInfo storageInfo, Block block,
+      String delHint) throws IOException {
     DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     // Decrement number of blocks scheduled to this datanode.
     // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
@@ -3751,7 +3754,9 @@ public class BlockManager implements BlockStatsMXBean {
     BlockInfo storedBlock = getStoredBlock(block);
     if (storedBlock != null &&
         block.getGenerationStamp() == storedBlock.getGenerationStamp()) {
-      pendingReconstruction.decrement(storedBlock, node);
+      if (pendingReconstruction.decrement(storedBlock, node)) {
+        NameNode.getNameNodeMetrics().incSuccessfulReReplications();
+      }
     }
     processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
         delHintNode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a9dc5f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
index 2221d1d..0f20daa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
@@ -30,6 +30,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
 
@@ -97,8 +98,10 @@ class PendingReconstructionBlocks {
    * for this block.
    *
    * @param dn The DataNode that finishes the reconstruction
+   * @return true if the block is decremented to 0 and got removed.
    */
-  void decrement(BlockInfo block, DatanodeDescriptor dn) {
+  boolean decrement(BlockInfo block, DatanodeDescriptor dn) {
+    boolean removed = false;
     synchronized (pendingReconstructions) {
       PendingBlockInfo found = pendingReconstructions.get(block);
       if (found != null) {
@@ -106,9 +109,11 @@ class PendingReconstructionBlocks {
         found.decrementReplicas(dn);
         if (found.getNumReplicas() <= 0) {
           pendingReconstructions.remove(block);
+          removed = true;
         }
       }
     }
+    return removed;
   }
 
   /**
@@ -263,6 +268,7 @@ class PendingReconstructionBlocks {
               timedOutItems.add(block);
             }
             LOG.warn("PendingReconstructionMonitor timed out " + block);
+            NameNode.getNameNodeMetrics().incTimeoutReReplications();
             iter.remove();
           }
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a9dc5f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
index cb81f5a..f2534e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
@@ -58,6 +58,12 @@ public class NameNodeMetrics {
   @Metric MutableCounterLong createSymlinkOps;
   @Metric MutableCounterLong getLinkTargetOps;
   @Metric MutableCounterLong filesInGetListingOps;
+  @Metric ("Number of successful re-replications")
+  MutableCounterLong successfulReReplications;
+  @Metric ("Number of times we failed to schedule a block re-replication.")
+  MutableCounterLong numTimesReReplicationNotScheduled;
+  @Metric("Number of timed out block re-replications")
+  MutableCounterLong timeoutReReplications;
   @Metric("Number of allowSnapshot operations")
   MutableCounterLong allowSnapshotOps;
   @Metric("Number of disallowSnapshot operations")
@@ -300,6 +306,18 @@ public class NameNodeMetrics {
     transactionsBatchedInSync.incr(count);
   }
 
+  public void incSuccessfulReReplications() {
+    successfulReReplications.incr();
+  }
+
+  public void incNumTimesReReplicationNotScheduled() {
+    numTimesReReplicationNotScheduled.incr();
+  }
+
+  public void incTimeoutReReplications() {
+    timeoutReReplications.incr();
+  }
+
   public void addSync(long elapsed) {
     syncs.add(elapsed);
     for (MutableQuantiles q : syncsQuantiles) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a9dc5f4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
index 7679f9d..29ee953 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
@@ -17,13 +17,21 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
+import java.io.IOException;
 import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -44,6 +52,8 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -178,7 +188,7 @@ public class TestPendingReconstruction {
   public void testProcessPendingReconstructions() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     conf.setLong(
-        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
+        DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
     MiniDFSCluster cluster = null;
     Block block;
     BlockInfo blockInfo;
@@ -418,7 +428,7 @@ public class TestPendingReconstruction {
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
     CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
         DFS_REPLICATION_INTERVAL);
-    CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
+    CONF.setInt(DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
         DFS_REPLICATION_INTERVAL);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
         DATANODE_COUNT).build();
@@ -471,4 +481,80 @@ public class TestPendingReconstruction {
       cluster.shutdown();
     }
   }
+
+  /**
+   * Test the metric counters of the re-replication process.
+   * @throws IOException
+   * @throws InterruptedException
+   * @throws TimeoutException
+   */
+  @Test (timeout = 300000)
+  public void testReplicationCounter() throws IOException,
+      InterruptedException, TimeoutException {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.setInt(DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
+    conf.setInt(DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 1);
+    MiniDFSCluster tmpCluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+        DATANODE_COUNT).build();
+    tmpCluster.waitActive();
+    FSNamesystem fsn = tmpCluster.getNamesystem(0);
+    fsn.writeLock();
+
+    try {
+      BlockManager bm = fsn.getBlockManager();
+      BlocksMap blocksMap = bm.blocksMap;
+
+      // create three blockInfo below, blockInfo0 will success, blockInfo1 will
+      // time out, blockInfo2 will fail the replication.
+      BlockCollection bc0 = Mockito.mock(BlockCollection.class);
+      BlockInfo blockInfo0 = new BlockInfoContiguous((short) 3);
+      blockInfo0.setBlockId(0);
+
+      BlockCollection bc1 = Mockito.mock(BlockCollection.class);
+      BlockInfo blockInfo1 = new BlockInfoContiguous((short) 3);
+      blockInfo1.setBlockId(1);
+
+      BlockCollection bc2 = Mockito.mock(BlockCollection.class);
+      BlockInfo blockInfo2 = new BlockInfoContiguous((short) 3);
+      blockInfo2.setBlockId(2);
+
+      blocksMap.addBlockCollection(blockInfo0, bc0);
+      blocksMap.addBlockCollection(blockInfo1, bc1);
+      blocksMap.addBlockCollection(blockInfo2, bc2);
+
+      PendingReconstructionBlocks pending = bm.pendingReconstruction;
+
+      MetricsRecordBuilder rb = getMetrics("NameNodeActivity");
+      assertCounter("SuccessfulReReplications", 0L, rb);
+      assertCounter("NumTimesReReplicationNotScheduled", 0L, rb);
+      assertCounter("TimeoutReReplications", 0L, rb);
+
+      // add block0 and block1 to pending queue.
+      pending.increment(blockInfo0);
+      pending.increment(blockInfo1);
+
+      // call addBlock on block0 will make it successfully replicated.
+      // not calling addBlock on block1 will make it timeout later.
+      DatanodeStorageInfo[] storageInfos =
+          DFSTestUtil.createDatanodeStorageInfos(1);
+      bm.addBlock(storageInfos[0], blockInfo0, null);
+
+      // call schedule replication on blockInfo2 will fail the re-replication.
+      // because there is no source data to replicate from.
+      bm.scheduleReconstruction(blockInfo2, 0);
+
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          MetricsRecordBuilder rb = getMetrics("NameNodeActivity");
+          return getLongCounter("SuccessfulReReplications", rb) == 1 &&
+              getLongCounter("NumTimesReReplicationNotScheduled", rb) == 1 &&
+              getLongCounter("TimeoutReReplications", rb) == 1;
+        }
+      }, 100, 60000);
+    } finally {
+      tmpCluster.shutdown();
+      fsn.writeUnlock();
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: HADOOP-14571. Deprecate public APIs relate to log4j1

Posted by as...@apache.org.
HADOOP-14571. Deprecate public APIs relate to log4j1

This closes #244

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2aba1da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2aba1da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2aba1da

Branch: refs/heads/YARN-5972
Commit: f2aba1da30aae482a2d65696493b609948f9b904
Parents: 1aaa7f1
Author: Wenxin He <we...@gmail.com>
Authored: Tue Jun 27 11:51:34 2017 +0800
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jul 4 18:55:20 2017 +0900

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/io/IOUtils.java |  3 ++
 .../java/org/apache/hadoop/util/LogAdapter.java |  4 +++
 .../apache/hadoop/test/GenericTestUtils.java    | 33 ++++++++++++++++++++
 3 files changed, 40 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2aba1da/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index e24f196..a3bccef 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -246,7 +246,10 @@ public class IOUtils {
    *
    * @param log the log to record problems to at debug level. Can be null.
    * @param closeables the objects to close
+   * @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)}
+   * instead
    */
+  @Deprecated
   public static void cleanup(Log log, java.io.Closeable... closeables) {
     for (java.io.Closeable c : closeables) {
       if (c != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2aba1da/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java
index 6ef9093..b2bcbf5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java
@@ -32,6 +32,10 @@ class LogAdapter {
     this.LOGGER = LOGGER;
   }
 
+  /**
+   * @deprecated use {@link #create(Logger)} instead
+   */
+  @Deprecated
   public static LogAdapter create(Log LOG) {
     return new LogAdapter(LOG);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2aba1da/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 00dc7f2..77a79ff 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -94,20 +94,33 @@ public abstract class GenericTestUtils {
   public static final String ERROR_INVALID_ARGUMENT =
       "Total wait time should be greater than check interval time";
 
+  /**
+   * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
+   */
+  @Deprecated
   @SuppressWarnings("unchecked")
   public static void disableLog(Log log) {
     // We expect that commons-logging is a wrapper around Log4j.
     disableLog((Log4JLogger) log);
   }
 
+  @Deprecated
   public static Logger toLog4j(org.slf4j.Logger logger) {
     return LogManager.getLogger(logger.getName());
   }
 
+  /**
+   * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
+   */
+  @Deprecated
   public static void disableLog(Log4JLogger log) {
     log.getLogger().setLevel(Level.OFF);
   }
 
+  /**
+   * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
+   */
+  @Deprecated
   public static void disableLog(Logger logger) {
     logger.setLevel(Level.OFF);
   }
@@ -116,20 +129,40 @@ public abstract class GenericTestUtils {
     disableLog(toLog4j(logger));
   }
 
+  /**
+   * @deprecated
+   * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
+   */
+  @Deprecated
   @SuppressWarnings("unchecked")
   public static void setLogLevel(Log log, Level level) {
     // We expect that commons-logging is a wrapper around Log4j.
     setLogLevel((Log4JLogger) log, level);
   }
 
+  /**
+   * @deprecated
+   * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
+   */
+  @Deprecated
   public static void setLogLevel(Log4JLogger log, Level level) {
     log.getLogger().setLevel(level);
   }
 
+  /**
+   * @deprecated
+   * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
+   */
+  @Deprecated
   public static void setLogLevel(Logger logger, Level level) {
     logger.setLevel(level);
   }
 
+  /**
+   * @deprecated
+   * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
+   */
+  @Deprecated
   public static void setLogLevel(org.slf4j.Logger logger, Level level) {
     setLogLevel(toLog4j(logger), level);
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: HADOOP-14443. Azure: Support retry and client side failover for authorization, SASKey and delegation token generation. Contributed by Santhosh G Nayak

Posted by as...@apache.org.
HADOOP-14443. Azure: Support retry and client side failover for authorization, SASKey and delegation token generation. Contributed by Santhosh G Nayak


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38996fdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38996fdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38996fdc

Branch: refs/heads/YARN-5972
Commit: 38996fdcf0987d1da00ce46f8284d8fcdce57329
Parents: bcba844
Author: Mingliang Liu <li...@apache.org>
Authored: Thu Jun 29 16:13:04 2017 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Fri Jun 30 16:53:48 2017 -0700

----------------------------------------------------------------------
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  39 +--
 .../fs/azure/RemoteSASKeyGeneratorImpl.java     | 268 +++++++------------
 .../fs/azure/RemoteWasbAuthorizerImpl.java      | 225 ++++++----------
 .../fs/azure/SecureWasbRemoteCallHelper.java    | 210 +++++++++++++++
 .../hadoop/fs/azure/WasbRemoteCallHelper.java   | 259 +++++++++++++-----
 .../hadoop/fs/azure/security/Constants.java     |  20 +-
 .../hadoop/fs/azure/security/JsonUtils.java     |  52 ++++
 .../RemoteWasbDelegationTokenManager.java       | 162 +++++++++++
 .../hadoop/fs/azure/security/SecurityUtils.java |  86 ------
 .../hadoop/fs/azure/security/TokenUtils.java    |  60 +++++
 .../security/WasbDelegationTokenManager.java    |  54 ++++
 .../fs/azure/security/WasbTokenRenewer.java     |  77 +-----
 .../hadoop-azure/src/site/markdown/index.md     |  44 ++-
 .../TestNativeAzureFileSystemAuthorization.java |   2 +-
 .../fs/azure/TestWasbRemoteCallHelper.java      | 228 +++++++++++++---
 15 files changed, 1170 insertions(+), 616 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 22f79ff..f999992 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -27,9 +27,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.net.URL;
 import java.nio.charset.Charset;
-import java.security.PrivilegedExceptionAction;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Date;
@@ -65,15 +63,14 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
 import org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
 import org.apache.hadoop.fs.azure.security.Constants;
-import org.apache.hadoop.fs.azure.security.SecurityUtils;
+import org.apache.hadoop.fs.azure.security.RemoteWasbDelegationTokenManager;
+import org.apache.hadoop.fs.azure.security.WasbDelegationTokenManager;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
-import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
-import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
@@ -1177,7 +1174,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
   private UserGroupInformation ugi;
 
-  private String delegationToken = null;
+  private WasbDelegationTokenManager wasbDelegationTokenManager;
 
   public NativeAzureFileSystem() {
     // set store in initialize()
@@ -1327,9 +1324,7 @@ public class NativeAzureFileSystem extends FileSystem {
     }
 
     if (UserGroupInformation.isSecurityEnabled() && kerberosSupportEnabled) {
-      DelegationTokenAuthenticator authenticator = new KerberosDelegationTokenAuthenticator();
-      authURL = new DelegationTokenAuthenticatedURL(authenticator);
-      credServiceUrl = SecurityUtils.getCredServiceUrls(conf);
+      this.wasbDelegationTokenManager = new RemoteWasbDelegationTokenManager(conf);
     }
   }
 
@@ -3002,31 +2997,7 @@ public class NativeAzureFileSystem extends FileSystem {
   @Override
   public synchronized Token<?> getDelegationToken(final String renewer) throws IOException {
     if (kerberosSupportEnabled) {
-      try {
-        final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-        UserGroupInformation connectUgi = ugi.getRealUser();
-        final UserGroupInformation proxyUser = connectUgi;
-        if (connectUgi == null) {
-          connectUgi = ugi;
-        }
-        connectUgi.checkTGTAndReloginFromKeytab();
-        return connectUgi.doAs(new PrivilegedExceptionAction<Token<?>>() {
-          @Override
-          public Token<?> run() throws Exception {
-            return authURL.getDelegationToken(new URL(credServiceUrl
-                    + Constants.DEFAULT_DELEGATION_TOKEN_MANAGER_ENDPOINT),
-                authToken, renewer, (proxyUser != null)? ugi.getShortUserName(): null);
-          }
-        });
-      } catch (Exception ex) {
-        LOG.error("Error in fetching the delegation token from remote service",
-            ex);
-        if (ex instanceof IOException) {
-          throw (IOException) ex;
-        } else {
-          throw new IOException(ex);
-        }
-      }
+      return wasbDelegationTokenManager.getDelegationToken(renewer);
     } else {
       return super.getDelegationToken(renewer);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
index 0e9c700..87f3b0b 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
@@ -21,20 +21,16 @@ package org.apache.hadoop.fs.azure;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.net.UnknownHostException;
-import java.security.PrivilegedExceptionAction;
+import java.util.List;
 
 import com.fasterxml.jackson.databind.ObjectReader;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.Validate;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.azure.security.Constants;
-import org.apache.hadoop.fs.azure.security.SecurityUtils;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.authentication.client.Authenticator;
-import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
+
+import org.apache.http.NameValuePair;
 import org.apache.http.client.methods.HttpGet;
 import org.apache.http.client.utils.URIBuilder;
 
@@ -56,56 +52,65 @@ public class RemoteSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
 
   public static final Logger LOG =
       LoggerFactory.getLogger(AzureNativeFileSystemStore.class);
-
   private static final ObjectReader RESPONSE_READER = new ObjectMapper()
       .readerFor(RemoteSASKeyGenerationResponse.class);
 
   /**
+   * Configuration parameter name expected in the Configuration
+   * object to provide the url of the remote service {@value}
+   */
+  public static final String KEY_CRED_SERVICE_URLS =
+      "fs.azure.cred.service.urls";
+  /**
+   * Configuration key to enable http retry policy for SAS Key generation. {@value}
+   */
+  public static final String
+      SAS_KEY_GENERATOR_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY =
+      "fs.azure.saskeygenerator.http.retry.policy.enabled";
+  /**
+   * Configuration key for SAS Key Generation http retry policy spec. {@value}
+   */
+  public static final String
+      SAS_KEY_GENERATOR_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY =
+      "fs.azure.saskeygenerator.http.retry.policy.spec";
+  /**
    * Container SAS Key generation OP name. {@value}
    */
   private static final String CONTAINER_SAS_OP = "GET_CONTAINER_SAS";
-
   /**
    * Relative Blob SAS Key generation OP name. {@value}
    */
   private static final String BLOB_SAS_OP = "GET_RELATIVE_BLOB_SAS";
-
   /**
    * Query parameter specifying the expiry period to be used for sas key
    * {@value}
    */
   private static final String SAS_EXPIRY_QUERY_PARAM_NAME = "sas_expiry";
-
   /**
    * Query parameter name for the storage account. {@value}
    */
   private static final String STORAGE_ACCOUNT_QUERY_PARAM_NAME =
       "storage_account";
-
   /**
    * Query parameter name for the storage account container. {@value}
    */
-  private static final String CONTAINER_QUERY_PARAM_NAME =
-      "container";
-
-  /**
-   * Query parameter name for user info {@value}
-   */
-  private static final String DELEGATION_TOKEN_QUERY_PARAM_NAME =
-      "delegation";
-
+  private static final String CONTAINER_QUERY_PARAM_NAME = "container";
   /**
    * Query parameter name for the relative path inside the storage
    * account container. {@value}
    */
-  private static final String RELATIVE_PATH_QUERY_PARAM_NAME =
-      "relative_path";
+  private static final String RELATIVE_PATH_QUERY_PARAM_NAME = "relative_path";
+  /**
+   * SAS Key Generation Remote http client retry policy spec. {@value}
+   */
+  private static final String
+      SAS_KEY_GENERATOR_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
+      "1000,3,10000,2";
 
-  private String delegationToken;
-  private String credServiceUrl = "";
   private WasbRemoteCallHelper remoteCallHelper = null;
-  private boolean isSecurityEnabled;
   private boolean isKerberosSupportEnabled;
+  private RetryPolicy retryPolicy;
+  private String[] commaSeparatedUrls;
 
   public RemoteSASKeyGeneratorImpl(Configuration conf) {
     super(conf);
@@ -114,180 +119,111 @@ public class RemoteSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
   public void initialize(Configuration conf) throws IOException {
 
     LOG.debug("Initializing RemoteSASKeyGeneratorImpl instance");
-    setDelegationToken();
-    try {
-      credServiceUrl = SecurityUtils.getCredServiceUrls(conf);
-    } catch (UnknownHostException e) {
-      final String msg = "Invalid CredService Url, configure it correctly";
-      LOG.error(msg, e);
-      throw new IOException(msg, e);
-    }
 
-    if (credServiceUrl == null || credServiceUrl.isEmpty()) {
-      final String msg = "CredService Url not found in configuration to "
-          + "initialize RemoteSASKeyGenerator";
-      LOG.error(msg);
-      throw new IOException(msg);
+    this.retryPolicy = RetryUtils.getMultipleLinearRandomRetry(conf,
+        SAS_KEY_GENERATOR_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY, true,
+        SAS_KEY_GENERATOR_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
+        SAS_KEY_GENERATOR_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT);
+
+    this.isKerberosSupportEnabled =
+        conf.getBoolean(Constants.AZURE_KERBEROS_SUPPORT_PROPERTY_NAME, false);
+    this.commaSeparatedUrls = conf.getTrimmedStrings(KEY_CRED_SERVICE_URLS);
+    if (this.commaSeparatedUrls == null || this.commaSeparatedUrls.length <= 0) {
+      throw new IOException(
+          KEY_CRED_SERVICE_URLS + " config not set" + " in configuration.");
+    }
+    if (isKerberosSupportEnabled && UserGroupInformation.isSecurityEnabled()) {
+      this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, false);
+    } else {
+      this.remoteCallHelper = new WasbRemoteCallHelper(retryPolicy);
     }
-
-    remoteCallHelper = new WasbRemoteCallHelper();
-    this.isSecurityEnabled = UserGroupInformation.isSecurityEnabled();
-    this.isKerberosSupportEnabled = conf.getBoolean(
-        Constants.AZURE_KERBEROS_SUPPORT_PROPERTY_NAME, false);
     LOG.debug("Initialization of RemoteSASKeyGenerator instance successful");
   }
 
   @Override
-  public URI getContainerSASUri(String storageAccount, String container)
-      throws SASKeyGenerationException {
+  public URI getContainerSASUri(String storageAccount,
+      String container) throws SASKeyGenerationException {
+    RemoteSASKeyGenerationResponse sasKeyResponse = null;
     try {
-      LOG.debug("Generating Container SAS Key for Container {} "
-          + "inside Storage Account {} ", container, storageAccount);
-      setDelegationToken();
-      URIBuilder uriBuilder = new URIBuilder(credServiceUrl);
+      URIBuilder uriBuilder = new URIBuilder();
       uriBuilder.setPath("/" + CONTAINER_SAS_OP);
-      uriBuilder.addParameter(STORAGE_ACCOUNT_QUERY_PARAM_NAME,
-          storageAccount);
-      uriBuilder.addParameter(CONTAINER_QUERY_PARAM_NAME,
-          container);
-      uriBuilder.addParameter(SAS_EXPIRY_QUERY_PARAM_NAME, ""
-          + getSasKeyExpiryPeriod());
-      if (isSecurityEnabled && StringUtils.isNotEmpty(delegationToken)) {
-        uriBuilder.addParameter(DELEGATION_TOKEN_QUERY_PARAM_NAME,
-            this.delegationToken);
-      }
+      uriBuilder.addParameter(STORAGE_ACCOUNT_QUERY_PARAM_NAME, storageAccount);
+      uriBuilder.addParameter(CONTAINER_QUERY_PARAM_NAME, container);
+      uriBuilder.addParameter(SAS_EXPIRY_QUERY_PARAM_NAME,
+          "" + getSasKeyExpiryPeriod());
 
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      UserGroupInformation connectUgi = ugi.getRealUser();
-      if (connectUgi == null) {
-        connectUgi = ugi;
+      sasKeyResponse = makeRemoteRequest(commaSeparatedUrls, uriBuilder.getPath(),
+              uriBuilder.getQueryParams());
+
+      if (sasKeyResponse.getResponseCode() == REMOTE_CALL_SUCCESS_CODE) {
+        return new URI(sasKeyResponse.getSasKey());
       } else {
-        uriBuilder.addParameter(Constants.DOAS_PARAM, ugi.getShortUserName());
+        throw new SASKeyGenerationException(
+            "Remote Service encountered error in SAS Key generation : "
+                + sasKeyResponse.getResponseMessage());
       }
-      return getSASKey(uriBuilder.build(), connectUgi);
     } catch (URISyntaxException uriSyntaxEx) {
-      throw new SASKeyGenerationException("Encountered URISyntaxException "
-          + "while building the HttpGetRequest to remote cred service",
+      throw new SASKeyGenerationException("Encountered URISyntaxException"
+          + " while building the HttpGetRequest to remote service for ",
           uriSyntaxEx);
-    } catch (IOException e) {
-      throw new SASKeyGenerationException("Encountered IOException"
-          + " while building the HttpGetRequest to remote service", e);
     }
   }
 
   @Override
-  public URI getRelativeBlobSASUri(String storageAccount, String container,
-      String relativePath) throws SASKeyGenerationException {
+  public URI getRelativeBlobSASUri(String storageAccount,
+      String container, String relativePath) throws SASKeyGenerationException {
+
     try {
-      LOG.debug("Generating RelativePath SAS Key for relativePath {} inside"
-              + " Container {} inside Storage Account {} ",
-          relativePath, container, storageAccount);
-      setDelegationToken();
-      URIBuilder uriBuilder = new URIBuilder(credServiceUrl);
+      URIBuilder uriBuilder = new URIBuilder();
       uriBuilder.setPath("/" + BLOB_SAS_OP);
-      uriBuilder.addParameter(STORAGE_ACCOUNT_QUERY_PARAM_NAME,
-          storageAccount);
-      uriBuilder.addParameter(CONTAINER_QUERY_PARAM_NAME,
-          container);
-      uriBuilder.addParameter(RELATIVE_PATH_QUERY_PARAM_NAME,
-          relativePath);
-      uriBuilder.addParameter(SAS_EXPIRY_QUERY_PARAM_NAME, ""
-          + getSasKeyExpiryPeriod());
-
-      if (isSecurityEnabled && StringUtils.isNotEmpty(
-          delegationToken)) {
-        uriBuilder.addParameter(DELEGATION_TOKEN_QUERY_PARAM_NAME,
-            this.delegationToken);
-      }
-
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      UserGroupInformation connectUgi = ugi.getRealUser();
-      if (connectUgi == null) {
-        connectUgi = ugi;
+      uriBuilder.addParameter(STORAGE_ACCOUNT_QUERY_PARAM_NAME, storageAccount);
+      uriBuilder.addParameter(CONTAINER_QUERY_PARAM_NAME, container);
+      uriBuilder.addParameter(RELATIVE_PATH_QUERY_PARAM_NAME, relativePath);
+      uriBuilder.addParameter(SAS_EXPIRY_QUERY_PARAM_NAME,
+          "" + getSasKeyExpiryPeriod());
+
+      RemoteSASKeyGenerationResponse sasKeyResponse =
+          makeRemoteRequest(commaSeparatedUrls, uriBuilder.getPath(),
+              uriBuilder.getQueryParams());
+      if (sasKeyResponse.getResponseCode() == REMOTE_CALL_SUCCESS_CODE) {
+        return new URI(sasKeyResponse.getSasKey());
       } else {
-        uriBuilder.addParameter(Constants.DOAS_PARAM, ugi.getShortUserName());
+        throw new SASKeyGenerationException(
+            "Remote Service encountered error in SAS Key generation : "
+                + sasKeyResponse.getResponseMessage());
       }
-      return getSASKey(uriBuilder.build(), connectUgi);
     } catch (URISyntaxException uriSyntaxEx) {
       throw new SASKeyGenerationException("Encountered URISyntaxException"
           + " while building the HttpGetRequest to " + " remote service",
           uriSyntaxEx);
-    } catch (IOException e) {
-      throw new SASKeyGenerationException("Encountered IOException"
-          + " while building the HttpGetRequest to remote service", e);
-    }
-  }
-
-  private URI getSASKey(final URI uri, UserGroupInformation connectUgi)
-      throws URISyntaxException, SASKeyGenerationException {
-    final RemoteSASKeyGenerationResponse sasKeyResponse;
-    try {
-      sasKeyResponse = connectUgi.doAs(
-          new PrivilegedExceptionAction<RemoteSASKeyGenerationResponse>() {
-            @Override
-            public RemoteSASKeyGenerationResponse run() throws Exception {
-              AuthenticatedURL.Token token = null;
-              if (isKerberosSupportEnabled && UserGroupInformation
-                  .isSecurityEnabled() && (delegationToken == null
-                  || delegationToken.isEmpty())) {
-                token = new AuthenticatedURL.Token();
-                final Authenticator kerberosAuthenticator =
-                    new KerberosDelegationTokenAuthenticator();
-                try {
-                  kerberosAuthenticator.authenticate(uri.toURL(), token);
-                  Validate.isTrue(token.isSet(),
-                      "Authenticated Token is NOT present. "
-                          + "The request cannot proceed.");
-                } catch (AuthenticationException e) {
-                  throw new IOException(
-                      "Authentication failed in check authorization", e);
-                }
-              }
-              return makeRemoteRequest(uri,
-                  (token != null ? token.toString() : null));
-            }
-          });
-    } catch (InterruptedException | IOException e) {
-      final String msg = "Error fetching SAS Key from Remote Service: " + uri;
-      LOG.error(msg, e);
-      if (e instanceof InterruptedException) {
-        Thread.currentThread().interrupt();
-      }
-      throw new SASKeyGenerationException(msg, e);
-    }
-
-    if (sasKeyResponse.getResponseCode() == REMOTE_CALL_SUCCESS_CODE) {
-      return new URI(sasKeyResponse.getSasKey());
-    } else {
-      throw new SASKeyGenerationException(
-          "Remote Service encountered error in SAS Key generation : "
-              + sasKeyResponse.getResponseMessage());
     }
   }
 
   /**
    * Helper method to make a remote request.
-   * @param uri - Uri to use for the remote request
-   * @param token - hadoop.auth token for the remote request
+   *
+   * @param urls        - Urls to use for the remote request
+   * @param path        - hadoop.auth token for the remote request
+   * @param queryParams - queryParams to be used.
    * @return RemoteSASKeyGenerationResponse
    */
-  private RemoteSASKeyGenerationResponse makeRemoteRequest(URI uri,
-      String token) throws SASKeyGenerationException {
+  private RemoteSASKeyGenerationResponse makeRemoteRequest(String[] urls,
+      String path, List<NameValuePair> queryParams)
+      throws SASKeyGenerationException {
 
     try {
-      HttpGet httpGet = new HttpGet(uri);
-      if (token != null) {
-        httpGet.setHeader("Cookie", AuthenticatedURL.AUTH_COOKIE + "=" + token);
-      }
-      String responseBody = remoteCallHelper.makeRemoteGetRequest(httpGet);
+      String responseBody = remoteCallHelper
+          .makeRemoteRequest(urls, path, queryParams, HttpGet.METHOD_NAME);
       return RESPONSE_READER.readValue(responseBody);
+
     } catch (WasbRemoteCallException remoteCallEx) {
       throw new SASKeyGenerationException("Encountered RemoteCallException"
           + " while retrieving SAS key from remote service", remoteCallEx);
     } catch (JsonParseException jsonParserEx) {
       throw new SASKeyGenerationException("Encountered JsonParseException "
           + "while parsing the response from remote"
-          + " service into RemoteSASKeyGenerationResponse object", jsonParserEx);
+          + " service into RemoteSASKeyGenerationResponse object",
+          jsonParserEx);
     } catch (JsonMappingException jsonMappingEx) {
       throw new SASKeyGenerationException("Encountered JsonMappingException"
           + " while mapping the response from remote service into "
@@ -297,10 +233,6 @@ public class RemoteSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
           + "accessing remote service to retrieve SAS Key", ioEx);
     }
   }
-
-  private void setDelegationToken() throws IOException {
-    this.delegationToken = SecurityUtils.getDelegationTokenFromCredentials();
-  }
 }
 
 /**
@@ -309,9 +241,9 @@ public class RemoteSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
  * The remote SAS Key generation service is expected to
  * return SAS key in json format:
  * {
- *    "responseCode" : 0 or non-zero <int>,
- *    "responseMessage" : relavant message on failure <String>,
- *    "sasKey" : Requested SAS Key <String>
+ *   "responseCode" : 0 or non-zero <int>,
+ *   "responseMessage" : relavant message on failure <String>,
+ *   "sasKey" : Requested SAS Key <String>
  * }
  */
 class RemoteSASKeyGenerationResponse {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
index b1e671d..e2d515c 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
@@ -24,23 +24,17 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.Validate;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.azure.security.Constants;
-import org.apache.hadoop.fs.azure.security.SecurityUtils;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.authentication.client.Authenticator;
-import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
 import org.apache.http.client.methods.HttpGet;
 import org.apache.http.client.utils.URIBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.net.URISyntaxException;
-import java.security.PrivilegedExceptionAction;
 
 import static org.apache.hadoop.fs.azure.WasbRemoteCallHelper.REMOTE_CALL_SUCCESS_CODE;
 
@@ -55,54 +49,59 @@ public class RemoteWasbAuthorizerImpl implements WasbAuthorizerInterface {
   public static final Logger LOG = LoggerFactory
       .getLogger(RemoteWasbAuthorizerImpl.class);
   private static final ObjectReader RESPONSE_READER = new ObjectMapper()
-      .readerFor(RemoteAuthorizerResponse.class);
-
-  private String remoteAuthorizerServiceUrl = null;
+      .readerFor(RemoteWasbAuthorizerResponse.class);
 
   /**
    * Configuration parameter name expected in the Configuration object to
-   * provide the url of the remote service. {@value}
+   * provide the urls of the remote service instances. {@value}
    */
-  public static final String KEY_REMOTE_AUTH_SERVICE_URL =
-      "fs.azure.authorization.remote.service.url";
-
+  public static final String KEY_REMOTE_AUTH_SERVICE_URLS =
+      "fs.azure.authorization.remote.service.urls";
   /**
    * Authorization operation OP name in the remote service {@value}
    */
-  private static final String CHECK_AUTHORIZATION_OP =
-      "CHECK_AUTHORIZATION";
-
+  private static final String CHECK_AUTHORIZATION_OP = "CHECK_AUTHORIZATION";
   /**
    * Query parameter specifying the access operation type. {@value}
    */
   private static final String ACCESS_OPERATION_QUERY_PARAM_NAME =
       "operation_type";
-
   /**
    * Query parameter specifying the wasb absolute path. {@value}
    */
   private static final String WASB_ABSOLUTE_PATH_QUERY_PARAM_NAME =
       "wasb_absolute_path";
+  /**
+   *  Query parameter name for sending owner of the specific resource {@value}
+   */
+  private static final String WASB_RESOURCE_OWNER_QUERY_PARAM_NAME =
+      "wasb_resource_owner";
 
   /**
-   * Query parameter name for user info {@value}
+   * Authorization Remote http client retry policy enabled configuration key. {@value}
    */
-  private static final String DELEGATION_TOKEN_QUERY_PARAM_NAME =
-      "delegation";
+  private static final String AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY =
+      "fs.azure.authorizer.http.retry.policy.enabled";
 
   /**
-   *  Query parameter name for sending owner of the specific resource {@value}
+   * Authorization Remote http client retry policy spec. {@value}
    */
-  private static final String WASB_RESOURCE_OWNER_QUERY_PARAM_NAME =
-      "wasb_resource_owner";
+  private static final String AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_SPEC =
+      "fs.azure.authorizer.http.retry.policy.spec";
+
+  /**
+   * Authorization Remote http client retry policy spec default value. {@value}
+   */
+  private static final String AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
+      "1000,3,10000,2";
 
   private WasbRemoteCallHelper remoteCallHelper = null;
-  private String delegationToken;
-  private boolean isSecurityEnabled;
   private boolean isKerberosSupportEnabled;
+  private RetryPolicy retryPolicy;
+  private String[] commaSeparatedUrls = null;
 
-  @VisibleForTesting
-  public void updateWasbRemoteCallHelper(WasbRemoteCallHelper helper) {
+  @VisibleForTesting public void updateWasbRemoteCallHelper(
+      WasbRemoteCallHelper helper) {
     this.remoteCallHelper = helper;
   }
 
@@ -110,114 +109,63 @@ public class RemoteWasbAuthorizerImpl implements WasbAuthorizerInterface {
   public void init(Configuration conf)
       throws WasbAuthorizationException, IOException {
     LOG.debug("Initializing RemoteWasbAuthorizerImpl instance");
-    setDelegationToken();
-    remoteAuthorizerServiceUrl = SecurityUtils
-        .getRemoteAuthServiceUrls(conf);
-
-    if (remoteAuthorizerServiceUrl == null
-          || remoteAuthorizerServiceUrl.isEmpty()) {
-      throw new WasbAuthorizationException(
-          "fs.azure.authorization.remote.service.url config not set"
-              + " in configuration.");
+    this.isKerberosSupportEnabled =
+        conf.getBoolean(Constants.AZURE_KERBEROS_SUPPORT_PROPERTY_NAME, false);
+    this.commaSeparatedUrls =
+        conf.getTrimmedStrings(KEY_REMOTE_AUTH_SERVICE_URLS);
+    if (this.commaSeparatedUrls == null
+        || this.commaSeparatedUrls.length <= 0) {
+      throw new IOException(KEY_REMOTE_AUTH_SERVICE_URLS + " config not set"
+          + " in configuration.");
+    }
+    this.retryPolicy = RetryUtils.getMultipleLinearRandomRetry(conf,
+        AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY, true,
+        AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_SPEC,
+        AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT);
+    if (isKerberosSupportEnabled && UserGroupInformation.isSecurityEnabled()) {
+      this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, false);
+    } else {
+      this.remoteCallHelper = new WasbRemoteCallHelper(retryPolicy);
     }
-
-    this.remoteCallHelper = new WasbRemoteCallHelper();
-    this.isSecurityEnabled = UserGroupInformation.isSecurityEnabled();
-    this.isKerberosSupportEnabled = conf
-        .getBoolean(Constants.AZURE_KERBEROS_SUPPORT_PROPERTY_NAME, false);
   }
 
   @Override
   public boolean authorize(String wasbAbsolutePath, String accessType, String resourceOwner)
       throws WasbAuthorizationException, IOException {
 
-      try {
-
+    try {
         /* Make an exception for the internal -RenamePending files */
-        if (wasbAbsolutePath.endsWith(NativeAzureFileSystem.FolderRenamePending.SUFFIX)) {
-          return true;
-        }
-
-        setDelegationToken();
-        URIBuilder uriBuilder = new URIBuilder(remoteAuthorizerServiceUrl);
-        uriBuilder.setPath("/" + CHECK_AUTHORIZATION_OP);
-        uriBuilder.addParameter(WASB_ABSOLUTE_PATH_QUERY_PARAM_NAME,
-            wasbAbsolutePath);
-        uriBuilder.addParameter(ACCESS_OPERATION_QUERY_PARAM_NAME,
-            accessType);
-        if (isSecurityEnabled && StringUtils.isNotEmpty(delegationToken)) {
-          uriBuilder.addParameter(DELEGATION_TOKEN_QUERY_PARAM_NAME,
-              delegationToken);
-        }
-        if (resourceOwner != null && StringUtils.isNotEmpty(resourceOwner)) {
-          uriBuilder.addParameter(WASB_RESOURCE_OWNER_QUERY_PARAM_NAME,
-              resourceOwner);
-        }
-
-        String responseBody = null;
-        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-        UserGroupInformation connectUgi = ugi.getRealUser();
-        if (connectUgi == null) {
-          connectUgi = ugi;
-        } else {
-          uriBuilder.addParameter(Constants.DOAS_PARAM, ugi.getShortUserName());
-        }
-
-        try {
-          responseBody = connectUgi
-              .doAs(new PrivilegedExceptionAction<String>() {
-                @Override
-                public String run() throws Exception {
-                  AuthenticatedURL.Token token = null;
-                  HttpGet httpGet = new HttpGet(uriBuilder.build());
-                  if (isKerberosSupportEnabled && UserGroupInformation
-                      .isSecurityEnabled() && (delegationToken == null
-                      || delegationToken.isEmpty())) {
-                    token = new AuthenticatedURL.Token();
-                    final Authenticator kerberosAuthenticator = new KerberosDelegationTokenAuthenticator();
-                    try {
-                      kerberosAuthenticator
-                          .authenticate(uriBuilder.build().toURL(), token);
-                      Validate.isTrue(token.isSet(),
-                          "Authenticated Token is NOT present. The request cannot proceed.");
-                    } catch (AuthenticationException e){
-                      throw new IOException("Authentication failed in check authorization", e);
-                    }
-                    if (token != null) {
-                      httpGet.setHeader("Cookie",
-                          AuthenticatedURL.AUTH_COOKIE + "=" + token);
-                    }
-                  }
-                  return remoteCallHelper.makeRemoteGetRequest(httpGet);
-                }
-              });
-        } catch (InterruptedException e) {
-          LOG.error("Error in check authorization", e);
-          throw new WasbAuthorizationException("Error in check authorize", e);
-        }
-
-        RemoteAuthorizerResponse authorizerResponse =
-            RESPONSE_READER.readValue(responseBody);
-
-        if (authorizerResponse == null) {
-          throw new WasbAuthorizationException(
-              "RemoteAuthorizerResponse object null from remote call");
-        } else if (authorizerResponse.getResponseCode()
-            == REMOTE_CALL_SUCCESS_CODE) {
-          return authorizerResponse.getAuthorizationResult();
-        } else {
-          throw new WasbAuthorizationException("Remote authorization"
-              + " service encountered an error "
-              + authorizerResponse.getResponseMessage());
-        }
-      } catch (URISyntaxException | WasbRemoteCallException
-          | JsonParseException | JsonMappingException ex) {
-        throw new WasbAuthorizationException(ex);
+      final URIBuilder uriBuilder = new URIBuilder();
+      uriBuilder.setPath("/" + CHECK_AUTHORIZATION_OP);
+      uriBuilder
+          .addParameter(WASB_ABSOLUTE_PATH_QUERY_PARAM_NAME, wasbAbsolutePath);
+      uriBuilder.addParameter(ACCESS_OPERATION_QUERY_PARAM_NAME, accessType);
+      if (resourceOwner != null && StringUtils.isNotEmpty(resourceOwner)) {
+        uriBuilder.addParameter(WASB_RESOURCE_OWNER_QUERY_PARAM_NAME,
+            resourceOwner);
       }
-  }
 
-  private void setDelegationToken() throws IOException {
-    this.delegationToken = SecurityUtils.getDelegationTokenFromCredentials();
+      String responseBody = remoteCallHelper
+          .makeRemoteRequest(commaSeparatedUrls, uriBuilder.getPath(),
+              uriBuilder.getQueryParams(), HttpGet.METHOD_NAME);
+
+      RemoteWasbAuthorizerResponse authorizerResponse = RESPONSE_READER
+          .readValue(responseBody);
+
+      if (authorizerResponse == null) {
+        throw new WasbAuthorizationException(
+            "RemoteWasbAuthorizerResponse object null from remote call");
+      } else if (authorizerResponse.getResponseCode()
+          == REMOTE_CALL_SUCCESS_CODE) {
+        return authorizerResponse.getAuthorizationResult();
+      } else {
+        throw new WasbAuthorizationException(
+            "Remote authorization" + " service encountered an error "
+                + authorizerResponse.getResponseMessage());
+      }
+    } catch (WasbRemoteCallException | JsonParseException | JsonMappingException ex) {
+      throw new WasbAuthorizationException(ex);
+    }
   }
 }
 
@@ -227,30 +175,19 @@ public class RemoteWasbAuthorizerImpl implements WasbAuthorizerInterface {
  * The remote service is expected to return the authorization
  * response in the following JSON format
  * {
- *    "responseCode" : 0 or non-zero <int>,
- *    "responseMessage" : relevant message of failure <String>
- *    "authorizationResult" : authorization result <boolean>
- *                            true - if auhorization allowed
- *                            false - otherwise.
- *
+ *   "responseCode" : 0 or non-zero <int>,
+ *   "responseMessage" : relevant message of failure <String>
+ *   "authorizationResult" : authorization result <boolean>
+ *   true - if auhorization allowed
+ *   false - otherwise.
  * }
  */
-class RemoteAuthorizerResponse {
+class RemoteWasbAuthorizerResponse {
 
   private int responseCode;
   private boolean authorizationResult;
   private String responseMessage;
 
-  public RemoteAuthorizerResponse(int responseCode,
-      boolean authorizationResult, String message) {
-    this.responseCode = responseCode;
-    this.authorizationResult = authorizationResult;
-    this.responseMessage = message;
-  }
-
-  public RemoteAuthorizerResponse() {
-  }
-
   public int getResponseCode() {
     return responseCode;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java
new file mode 100644
index 0000000..7f8bc0e
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java
@@ -0,0 +1,210 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.commons.lang.Validate;
+import org.apache.hadoop.fs.azure.security.Constants;
+import org.apache.hadoop.fs.azure.security.WasbDelegationTokenIdentifier;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.Authenticator;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
+import org.apache.http.NameValuePair;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.client.utils.URIBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.security.PrivilegedExceptionAction;
+import java.util.List;
+
+/**
+ * Helper class the has constants and helper methods
+ * used in WASB when integrating with a remote http cred
+ * service which uses Kerberos and delegation tokens.
+ * Currently, remote service will be used to generate
+ * SAS keys, authorization and delegation token operations.
+ */
+public class SecureWasbRemoteCallHelper extends WasbRemoteCallHelper {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(SecureWasbRemoteCallHelper.class);
+  /**
+   * Delegation token query parameter to be used when making rest call.
+   */
+  private static final String DELEGATION_TOKEN_QUERY_PARAM_NAME = "delegation";
+
+  /**
+   * Delegation token to be used for making the remote call.
+   */
+  private Token<?> delegationToken = null;
+
+  /**
+   * Does Remote Http Call requires Kerberos Authentication always, even if the delegation token is present.
+   */
+  private boolean alwaysRequiresKerberosAuth;
+
+  public SecureWasbRemoteCallHelper(RetryPolicy retryPolicy,
+      boolean alwaysRequiresKerberosAuth) {
+    super(retryPolicy);
+    this.alwaysRequiresKerberosAuth = alwaysRequiresKerberosAuth;
+  }
+
+  @Override
+  public String makeRemoteRequest(final String[] urls,
+      final String path, final List<NameValuePair> queryParams,
+      final String httpMethod) throws IOException {
+    final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    UserGroupInformation connectUgi = ugi.getRealUser();
+    if (connectUgi == null) {
+      connectUgi = ugi;
+    }
+    if (delegationToken == null) {
+      connectUgi.checkTGTAndReloginFromKeytab();
+    }
+    String s = null;
+    try {
+      s = connectUgi.doAs(new PrivilegedExceptionAction<String>() {
+        @Override public String run() throws Exception {
+          return retryableRequest(urls, path, queryParams, httpMethod);
+        }
+      });
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      throw new IOException(e.getMessage(), e);
+    }
+    return s;
+  }
+
+  @Override
+  public HttpUriRequest getHttpRequest(String[] urls, String path,
+      List<NameValuePair> queryParams, int urlIndex, String httpMethod)
+      throws URISyntaxException, IOException {
+    final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    UserGroupInformation connectUgi = ugi.getRealUser();
+    if (connectUgi != null) {
+      queryParams.add(new NameValuePair() {
+        @Override public String getName() {
+          return Constants.DOAS_PARAM;
+        }
+
+        @Override public String getValue() {
+          return ugi.getShortUserName();
+        }
+      });
+    }
+
+    final Token delegationToken = getDelegationToken(ugi);
+    if (!alwaysRequiresKerberosAuth && delegationToken != null) {
+      final String delegationTokenEncodedUrlString =
+          delegationToken.encodeToUrlString();
+      queryParams.add(new NameValuePair() {
+        @Override public String getName() {
+          return DELEGATION_TOKEN_QUERY_PARAM_NAME;
+        }
+
+        @Override public String getValue() {
+          return delegationTokenEncodedUrlString;
+        }
+      });
+    }
+
+    URIBuilder uriBuilder =
+        new URIBuilder(urls[urlIndex]).setPath(path).setParameters(queryParams);
+    HttpUriRequest httpUriRequest = null;
+    switch (httpMethod) {
+    case HttpPut.METHOD_NAME:
+      httpUriRequest = new HttpPut(uriBuilder.build());
+      break;
+    case HttpPost.METHOD_NAME:
+      httpUriRequest = new HttpPost(uriBuilder.build());
+      break;
+    default:
+      httpUriRequest = new HttpGet(uriBuilder.build());
+      break;
+    }
+
+    LOG.debug("SecureWasbRemoteCallHelper#getHttpRequest() {}",
+        uriBuilder.build().toURL());
+    if (alwaysRequiresKerberosAuth || delegationToken == null) {
+      AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+      final Authenticator kerberosAuthenticator =
+          new KerberosDelegationTokenAuthenticator();
+      try {
+        kerberosAuthenticator.authenticate(uriBuilder.build().toURL(), token);
+      } catch (AuthenticationException e) {
+        throw new WasbRemoteCallException(
+            Constants.AUTHENTICATION_FAILED_ERROR_MESSAGE, e);
+      }
+      Validate.isTrue(token.isSet(),
+          "Authenticated Token is NOT present. The request cannot proceed.");
+
+      httpUriRequest.setHeader("Cookie",
+          AuthenticatedURL.AUTH_COOKIE + "=" + token);
+    }
+    return httpUriRequest;
+  }
+
+  private synchronized Token<?> getDelegationToken(
+      UserGroupInformation userGroupInformation) throws IOException {
+    if (this.delegationToken == null) {
+      Token<?> token = null;
+      for (Token iterToken : userGroupInformation.getTokens()) {
+        if (iterToken.getKind()
+            .equals(WasbDelegationTokenIdentifier.TOKEN_KIND)) {
+          token = iterToken;
+          LOG.debug("{} token found in cache : {}",
+              WasbDelegationTokenIdentifier.TOKEN_KIND, iterToken);
+          break;
+        }
+      }
+      LOG.debug("UGI Information: {}", userGroupInformation.toString());
+
+      // ugi tokens are usually indicative of a task which can't
+      // refetch tokens.  even if ugi has credentials, don't attempt
+      // to get another token to match hdfs/rpc behavior
+      if (token != null) {
+        LOG.debug("Using UGI token: {}", token);
+        setDelegationToken(token);
+      }
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Delegation token from cache - {}", delegationToken != null
+          ? delegationToken.encodeToUrlString()
+          : "null");
+    }
+    return this.delegationToken;
+  }
+
+  private <T extends TokenIdentifier> void setDelegationToken(
+      final Token<T> token) {
+    synchronized (this) {
+      this.delegationToken = token;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
index b43e5ae..7c26e8a 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -19,19 +19,31 @@
 package org.apache.hadoop.fs.azure;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.fs.azure.security.Constants;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.http.Header;
 import org.apache.http.HttpResponse;
 import org.apache.http.HttpStatus;
+import org.apache.http.NameValuePair;
 import org.apache.http.StatusLine;
-import org.apache.http.client.ClientProtocolException;
 import org.apache.http.client.HttpClient;
 import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.client.utils.URIBuilder;
 import org.apache.http.impl.client.HttpClientBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
+import java.io.InterruptedIOException;
+import java.net.URISyntaxException;
 import java.nio.charset.StandardCharsets;
+import java.util.List;
+import java.util.Random;
 
 /**
  * Helper class the has constants and helper methods
@@ -39,101 +51,212 @@ import java.nio.charset.StandardCharsets;
  * service. Currently, remote service will be used to generate
  * SAS keys.
  */
-class WasbRemoteCallHelper {
+public class WasbRemoteCallHelper {
 
+  public static final Logger LOG =
+      LoggerFactory.getLogger(WasbRemoteCallHelper.class);
   /**
    * Return code when the remote call is successful. {@value}
    */
   public static final int REMOTE_CALL_SUCCESS_CODE = 0;
 
   /**
+   * Application Json content type.
+   */
+  private static final String APPLICATION_JSON = "application/json";
+
+  /**
+   * Max content length of the response.
+   */
+  private static final int MAX_CONTENT_LENGTH = 1024;
+
+  /**
    * Client instance to be used for making the remote call.
    */
   private HttpClient client = null;
 
+  private Random random = new Random();
+
+  private RetryPolicy retryPolicy = null;
+
+  public WasbRemoteCallHelper(RetryPolicy retryPolicy) {
+    this.client = HttpClientBuilder.create().build();
+    this.retryPolicy = retryPolicy;
+  }
+
   @VisibleForTesting
   public void updateHttpClient(HttpClient client) {
     this.client = client;
   }
 
-  public WasbRemoteCallHelper() {
-    this.client = HttpClientBuilder.create().build();
-  }
-
   /**
    * Helper method to make remote HTTP Get request.
-   * @param getRequest - HttpGet request object constructed by caller.
+   *
+   * @param urls        - Service urls to be used, if one fails try another.
+   * @param path        - URL endpoint for the resource.
+   * @param queryParams - list of query parameters
+   * @param httpMethod  - http Method to be used.
    * @return Http Response body returned as a string. The caller
-   *  is expected to semantically understand the response.
-   * @throws WasbRemoteCallException
-   * @throws IOException
+   * is expected to semantically understand the response.
+   * @throws IOException when there an error in executing the remote http request.
    */
-  public String makeRemoteGetRequest(HttpGet getRequest)
-      throws WasbRemoteCallException, IOException {
+  public String makeRemoteRequest(String[] urls, String path,
+      List<NameValuePair> queryParams, String httpMethod) throws IOException {
 
-    try {
+    return retryableRequest(urls, path, queryParams, httpMethod);
+  }
 
-      final String APPLICATION_JSON = "application/json";
-      final int MAX_CONTENT_LENGTH = 1024;
+  protected String retryableRequest(String[] urls, String path,
+      List<NameValuePair> queryParams, String httpMethod) throws IOException {
+    HttpResponse response = null;
+    HttpUriRequest httpRequest = null;
 
-      getRequest.setHeader("Accept", APPLICATION_JSON);
+    for (int retry = 0, index =
+         random.nextInt(urls.length);; retry++, index++) {
+      if (index >= urls.length) {
+        index = index % urls.length;
+      }
 
-      HttpResponse response = client.execute(getRequest);
+      try {
+        httpRequest =
+            getHttpRequest(urls, path, queryParams, index, httpMethod);
 
-      StatusLine statusLine = response.getStatusLine();
-      if (statusLine == null || statusLine.getStatusCode() != HttpStatus.SC_OK) {
-        throw new WasbRemoteCallException(getRequest.getURI().toString() + ":" +
-            ((statusLine!=null) ? statusLine.toString() : "NULL")
-        );
-      }
+        httpRequest.setHeader("Accept", APPLICATION_JSON);
+        response = client.execute(httpRequest);
+        StatusLine statusLine = response.getStatusLine();
+        if (statusLine == null
+            || statusLine.getStatusCode() != HttpStatus.SC_OK) {
+          throw new WasbRemoteCallException(
+              httpRequest.getURI().toString() + ":" + ((statusLine != null)
+                                                       ? statusLine.toString()
+                                                       : "NULL"));
+        }
 
-      Header contentTypeHeader = response.getFirstHeader("Content-Type");
-      if (contentTypeHeader == null
-          || !APPLICATION_JSON.equals(contentTypeHeader.getValue())) {
-        throw new WasbRemoteCallException(getRequest.getURI().toString() + ":" +
-            "Content-Type mismatch: expected: " + APPLICATION_JSON +
-            ", got " + ((contentTypeHeader!=null) ? contentTypeHeader.getValue() : "NULL")
-        );
-      }
+        Header contentTypeHeader = response.getFirstHeader("Content-Type");
+        if (contentTypeHeader == null || !APPLICATION_JSON
+            .equals(contentTypeHeader.getValue())) {
+          throw new WasbRemoteCallException(
+              httpRequest.getURI().toString() + ":"
+                  + "Content-Type mismatch: expected: " + APPLICATION_JSON
+                  + ", got " + ((contentTypeHeader != null) ? contentTypeHeader
+                  .getValue() : "NULL"));
+        }
 
-      Header contentLengthHeader = response.getFirstHeader("Content-Length");
-      if (contentLengthHeader == null) {
-        throw new WasbRemoteCallException(getRequest.getURI().toString() + ":" +
-            "Content-Length header missing"
-        );
-      }
+        Header contentLengthHeader = response.getFirstHeader("Content-Length");
+        if (contentLengthHeader == null) {
+          throw new WasbRemoteCallException(
+              httpRequest.getURI().toString() + ":"
+                  + "Content-Length header missing");
+        }
 
-      try {
-        if (Integer.parseInt(contentLengthHeader.getValue()) > MAX_CONTENT_LENGTH) {
-          throw new WasbRemoteCallException(getRequest.getURI().toString() + ":" +
-              "Content-Length:" + contentLengthHeader.getValue() +
-              "exceeded max:" + MAX_CONTENT_LENGTH
-          );
+        try {
+          if (Integer.parseInt(contentLengthHeader.getValue())
+              > MAX_CONTENT_LENGTH) {
+            throw new WasbRemoteCallException(
+                httpRequest.getURI().toString() + ":" + "Content-Length:"
+                    + contentLengthHeader.getValue() + "exceeded max:"
+                    + MAX_CONTENT_LENGTH);
+          }
+        } catch (NumberFormatException nfe) {
+          throw new WasbRemoteCallException(
+              httpRequest.getURI().toString() + ":"
+                  + "Invalid Content-Length value :" + contentLengthHeader
+                  .getValue());
+        }
+
+        BufferedReader rd = null;
+        StringBuilder responseBody = new StringBuilder();
+        try {
+          rd = new BufferedReader(
+              new InputStreamReader(response.getEntity().getContent(),
+                  StandardCharsets.UTF_8));
+          String responseLine = "";
+          while ((responseLine = rd.readLine()) != null) {
+            responseBody.append(responseLine);
+          }
+        } finally {
+          rd.close();
+        }
+        return responseBody.toString();
+      } catch (URISyntaxException uriSyntaxEx) {
+        throw new WasbRemoteCallException("Encountered URISyntaxException "
+            + "while building the HttpGetRequest to remote service",
+            uriSyntaxEx);
+      } catch (IOException e) {
+        LOG.debug(e.getMessage(), e);
+        try {
+          shouldRetry(e, retry, (httpRequest != null)
+                                ? httpRequest.getURI().toString()
+                                : urls[index]);
+        } catch (IOException ioex) {
+          String message =
+              "Encountered error while making remote call to " + String
+                  .join(",", urls) + " retried " + retry + " time(s).";
+          LOG.error(message, ioex);
+          throw new WasbRemoteCallException(message, ioex);
         }
       }
-      catch (NumberFormatException nfe) {
-        throw new WasbRemoteCallException(getRequest.getURI().toString() + ":" +
-            "Invalid Content-Length value :" + contentLengthHeader.getValue()
-        );
-      }
+    }
+  }
+
+  protected HttpUriRequest getHttpRequest(String[] urls, String path,
+      List<NameValuePair> queryParams, int urlIndex, String httpMethod)
+      throws URISyntaxException, IOException {
+    URIBuilder uriBuilder = null;
+    uriBuilder =
+        new URIBuilder(urls[urlIndex]).setPath(path).setParameters(queryParams);
+    HttpUriRequest httpUriRequest = null;
+    switch (httpMethod) {
+    case HttpPut.METHOD_NAME:
+      httpUriRequest = new HttpPut(uriBuilder.build());
+      break;
+    case HttpPost.METHOD_NAME:
+      httpUriRequest = new HttpPost(uriBuilder.build());
+      break;
+    default:
+      httpUriRequest = new HttpGet(uriBuilder.build());
+      break;
+    }
+    return httpUriRequest;
+  }
+
+  private void shouldRetry(final IOException ioe, final int retry,
+      final String url) throws IOException {
+    CharSequence authenticationExceptionMessage =
+        Constants.AUTHENTICATION_FAILED_ERROR_MESSAGE;
+    if (ioe instanceof WasbRemoteCallException && ioe.getMessage()
+        .equals(authenticationExceptionMessage)) {
+      throw ioe;
+    }
+    try {
+      final RetryPolicy.RetryAction a = (retryPolicy != null)
+                                        ? retryPolicy
+                                            .shouldRetry(ioe, retry, 0, true)
+                                        : RetryPolicy.RetryAction.FAIL;
+
+      boolean isRetry = a.action == RetryPolicy.RetryAction.RetryDecision.RETRY;
+      boolean isFailoverAndRetry =
+          a.action == RetryPolicy.RetryAction.RetryDecision.FAILOVER_AND_RETRY;
+
+      if (isRetry || isFailoverAndRetry) {
+        LOG.debug("Retrying connect to Remote service:{}. Already tried {}"
+                + " time(s); retry policy is {}, " + "delay {}ms.", url, retry,
+            retryPolicy, a.delayMillis);
 
-      BufferedReader rd = new BufferedReader(
-          new InputStreamReader(response.getEntity().getContent(),
-              StandardCharsets.UTF_8));
-      StringBuilder responseBody = new StringBuilder();
-      String responseLine = "";
-      while ((responseLine = rd.readLine()) != null) {
-        responseBody.append(responseLine);
+        Thread.sleep(a.delayMillis);
+        return;
       }
-      rd.close();
-      return responseBody.toString();
-
-    } catch (ClientProtocolException clientProtocolEx) {
-      throw new WasbRemoteCallException(getRequest.getURI().toString() + ":" +
-          "Encountered ClientProtocolException while making remote call", clientProtocolEx);
-    } catch (IOException ioEx) {
-      throw new WasbRemoteCallException(getRequest.getURI().toString() + ":" +
-          "Encountered IOException while making remote call", ioEx);
+    } catch(InterruptedIOException e) {
+      LOG.warn(e.getMessage(), e);
+      Thread.currentThread().interrupt();
+      return;
+    } catch (Exception e) {
+      LOG.warn("Original exception is ", ioe);
+      throw new WasbRemoteCallException(e.getMessage(), e);
     }
+    LOG.debug("Not retrying anymore, already retried the urls {} time(s)",
+        retry);
+    throw new WasbRemoteCallException(
+        url + ":" + "Encountered IOException while making remote call", ioe);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/Constants.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/Constants.java
index 79531a9..cacdfc5 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/Constants.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/Constants.java
@@ -27,21 +27,6 @@ public final class Constants {
   }
 
   /**
-   * Configuration parameter name expected in the Configuration
-   * object to provide the url of the remote service {@value}
-   */
-  public static final String KEY_CRED_SERVICE_URL = "fs.azure.cred.service.url";
-  /**
-   * Default port of the remote service used as delegation token manager and Azure storage SAS key generator.
-   */
-  public static final int DEFAULT_CRED_SERVICE_PORT = 50911;
-
-  /**
-   * Default remote delegation token manager endpoint.
-   */
-  public static final String DEFAULT_DELEGATION_TOKEN_MANAGER_ENDPOINT = "/tokenmanager/v1";
-
-  /**
    * The configuration property to enable Kerberos support.
    */
 
@@ -51,4 +36,9 @@ public final class Constants {
    * Parameter to be used for impersonation.
    */
   public static final String DOAS_PARAM = "doas";
+
+  /**
+   * Error message for Authentication failures.
+   */
+  public static final String AUTHENTICATION_FAILED_ERROR_MESSAGE = "Authentication Failed ";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/JsonUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/JsonUtils.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/JsonUtils.java
new file mode 100644
index 0000000..20dd470
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/JsonUtils.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.security;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * Utility class to parse JSON.
+ */
+public final class JsonUtils {
+  public static final Logger LOG = LoggerFactory.getLogger(JsonUtils.class);
+
+  private JsonUtils() {
+  }
+
+  public static Map<?, ?> parse(final String jsonString) throws IOException {
+    try {
+      ObjectMapper mapper = new ObjectMapper();
+      return mapper.readerFor(Map.class).readValue(jsonString);
+    } catch (Exception e) {
+      LOG.debug("JSON Parsing exception: {} while parsing {}", e.getMessage(),
+          jsonString);
+      if (jsonString.toLowerCase(Locale.ENGLISH).contains("server error")) {
+        LOG.error(
+            "Internal Server Error was encountered while making a request");
+      }
+      throw new IOException("JSON Parsing Error: " + e.getMessage(), e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/RemoteWasbDelegationTokenManager.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/RemoteWasbDelegationTokenManager.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/RemoteWasbDelegationTokenManager.java
new file mode 100644
index 0000000..1078f88
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/RemoteWasbDelegationTokenManager.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.security;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.azure.SecureWasbRemoteCallHelper;
+import org.apache.hadoop.fs.azure.WasbRemoteCallHelper;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryUtils;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.client.utils.URIBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *  Class to manage delegation token operations by making rest call to remote service.
+ */
+public class RemoteWasbDelegationTokenManager
+    implements WasbDelegationTokenManager {
+
+  /**
+   * Configuration parameter name expected in the configuration
+   * object to provide the url of the delegation token service to fetch the delegation tokens.
+   */
+  public static final String KEY_DELEGATION_TOKEN_SERVICE_URLS =
+      "fs.azure.delegation.token.service.urls";
+  /**
+   * Configuration key to enable http retry policy for delegation token service calls.
+   */
+  public static final String DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY =
+      "fs.azure.delegationtokenservice.http.retry.policy.enabled";
+  /**
+   * Configuration key for delegation token service http retry policy spec.
+   */
+  public static final String DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY =
+      "fs.azure.delegationtokenservice.http.retry.policy.spec";
+  /**
+   * Default remote delegation token manager endpoint.
+   */
+  private static final String DEFAULT_DELEGATION_TOKEN_MANAGER_ENDPOINT =
+      "/tokenmanager/v1";
+  /**
+   * Default for delegation token service http retry policy spec.
+   */
+  private static final String DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
+      "1000,3,10000,2";
+
+  private static final boolean
+      DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = true;
+
+  private static final Text WASB_DT_SERVICE_NAME = new Text("WASB_DT_SERVICE");
+  /**
+   *  Query parameter value for Getting delegation token http request
+   */
+  private static final String GET_DELEGATION_TOKEN_OP = "GETDELEGATIONTOKEN";
+  /**
+   * Query parameter value for renewing delegation token http request
+   */
+  private static final String RENEW_DELEGATION_TOKEN_OP = "RENEWDELEGATIONTOKEN";
+  /**
+   * Query parameter value for canceling the delegation token http request
+   */
+  private static final String CANCEL_DELEGATION_TOKEN_OP = "CANCELDELEGATIONTOKEN";
+  /**
+   * op parameter to represent the operation.
+   */
+  private static final String OP_PARAM_KEY_NAME = "op";
+  /**
+   * renewer parameter to represent the renewer of the delegation token.
+   */
+  private static final String RENEWER_PARAM_KEY_NAME = "renewer";
+  /**
+   * service parameter to represent the service which returns delegation tokens.
+   */
+  private static final String SERVICE_PARAM_KEY_NAME = "service";
+  /**
+   * token parameter to represent the delegation token.
+   */
+  private static final String TOKEN_PARAM_KEY_NAME = "token";
+  private WasbRemoteCallHelper remoteCallHelper;
+  private String[] dtServiceUrls;
+
+  public RemoteWasbDelegationTokenManager(Configuration conf)
+      throws IOException {
+    RetryPolicy retryPolicy = RetryUtils.getMultipleLinearRandomRetry(conf,
+        DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY,
+        DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
+        DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
+        DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT);
+
+    remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, true);
+    this.dtServiceUrls =
+        conf.getTrimmedStrings(KEY_DELEGATION_TOKEN_SERVICE_URLS);
+    if (this.dtServiceUrls == null || this.dtServiceUrls.length <= 0) {
+      throw new IOException(
+          KEY_DELEGATION_TOKEN_SERVICE_URLS + " config not set"
+              + " in configuration.");
+    }
+  }
+
+  @Override
+  public Token<DelegationTokenIdentifier> getDelegationToken(
+      String renewer) throws IOException {
+    URIBuilder uriBuilder =
+        new URIBuilder().setPath(DEFAULT_DELEGATION_TOKEN_MANAGER_ENDPOINT)
+            .addParameter(OP_PARAM_KEY_NAME, GET_DELEGATION_TOKEN_OP)
+            .addParameter(RENEWER_PARAM_KEY_NAME, renewer)
+            .addParameter(SERVICE_PARAM_KEY_NAME, WASB_DT_SERVICE_NAME.toString());
+    String responseBody = remoteCallHelper
+        .makeRemoteRequest(dtServiceUrls, uriBuilder.getPath(),
+            uriBuilder.getQueryParams(), HttpGet.METHOD_NAME);
+    return TokenUtils.toDelegationToken(JsonUtils.parse(responseBody));
+  }
+
+  @Override
+  public long renewDelegationToken(Token<?> token)
+      throws IOException {
+    URIBuilder uriBuilder =
+        new URIBuilder().setPath(DEFAULT_DELEGATION_TOKEN_MANAGER_ENDPOINT)
+            .addParameter(OP_PARAM_KEY_NAME, RENEW_DELEGATION_TOKEN_OP)
+            .addParameter(TOKEN_PARAM_KEY_NAME, token.encodeToUrlString());
+
+    String responseBody = remoteCallHelper
+        .makeRemoteRequest(dtServiceUrls, uriBuilder.getPath(),
+            uriBuilder.getQueryParams(), HttpPut.METHOD_NAME);
+
+    Map<?, ?> parsedResp = JsonUtils.parse(responseBody);
+    return ((Number) parsedResp.get("long")).longValue();
+  }
+
+  @Override
+  public void cancelDelegationToken(Token<?> token)
+      throws IOException {
+    URIBuilder uriBuilder =
+        new URIBuilder().setPath(DEFAULT_DELEGATION_TOKEN_MANAGER_ENDPOINT)
+            .addParameter(OP_PARAM_KEY_NAME, CANCEL_DELEGATION_TOKEN_OP)
+            .addParameter(TOKEN_PARAM_KEY_NAME, token.encodeToUrlString());
+    remoteCallHelper.makeRemoteRequest(dtServiceUrls, uriBuilder.getPath(),
+        uriBuilder.getQueryParams(), HttpPut.METHOD_NAME);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/SecurityUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/SecurityUtils.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/SecurityUtils.java
deleted file mode 100644
index 61bf846..0000000
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/SecurityUtils.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.security;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.azure.RemoteWasbAuthorizerImpl;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.Iterator;
-
-/**
- * Security Utils class for WASB.
- */
-public final class SecurityUtils {
-
-  private SecurityUtils() {
-  }
-
-  /**
-   * Utility method to get remote service URLs from the configuration.
-   * @param conf configuration object.
-   * @return remote service URL
-   * @throws UnknownHostException thrown when getting the default value.
-   */
-  public static String getCredServiceUrls(Configuration conf)
-      throws UnknownHostException {
-    return conf.get(Constants.KEY_CRED_SERVICE_URL, String
-        .format("http://%s:%s",
-            InetAddress.getLocalHost().getCanonicalHostName(),
-            Constants.DEFAULT_CRED_SERVICE_PORT));
-  }
-
-  /**
-   * Utility method to get remote Authorization service URLs from the configuration.
-   * @param conf Configuration object.
-   * @return remote Authorization server URL
-   * @throws UnknownHostException thrown when getting the default value.
-   */
-  public static String getRemoteAuthServiceUrls(Configuration conf)
-      throws UnknownHostException {
-    return conf.get(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URL, String
-        .format("http://%s:%s",
-            InetAddress.getLocalHost().getCanonicalHostName(),
-            Constants.DEFAULT_CRED_SERVICE_PORT));
-  }
-
-  /**
-   * Utility method to get delegation token from the UGI credentials.
-   * @return delegation token
-   * @throws IOException thrown when getting the current user.
-   */
-  public static String getDelegationTokenFromCredentials() throws IOException {
-    String delegationToken = null;
-    Iterator<Token<? extends TokenIdentifier>> tokenIterator = UserGroupInformation
-        .getCurrentUser().getCredentials().getAllTokens().iterator();
-    while (tokenIterator.hasNext()) {
-      Token<? extends TokenIdentifier> iteratedToken = tokenIterator.next();
-      if (iteratedToken.getKind()
-          .equals(WasbDelegationTokenIdentifier.TOKEN_KIND)) {
-        delegationToken = iteratedToken.encodeToUrlString();
-      }
-    }
-    return delegationToken;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/TokenUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/TokenUtils.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/TokenUtils.java
new file mode 100644
index 0000000..90b9082
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/TokenUtils.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.security;
+
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Utility methods common for token management
+ */
+public final class TokenUtils {
+  public static final Logger LOG = LoggerFactory.getLogger(TokenUtils.class);
+  public static final String URL_STRING = "urlString";
+
+  private TokenUtils() {
+  }
+
+  public static Token<DelegationTokenIdentifier> toDelegationToken(
+      final Map<?, ?> inputMap) throws IOException {
+    final Map<?, ?> m = (Map<?, ?>) inputMap.get(Token.class.getSimpleName());
+    return (Token<DelegationTokenIdentifier>) toToken(m);
+  }
+
+  public static Token<? extends TokenIdentifier> toToken(final Map<?, ?> m)
+      throws IOException {
+    if (m == null) {
+      return null;
+    }
+    String urlString = (String) m.get(URL_STRING);
+    if (urlString != null) {
+      final Token<DelegationTokenIdentifier> token = new Token<>();
+      LOG.debug("Read url string param - {}", urlString);
+      token.decodeFromUrlString(urlString);
+      return token;
+    }
+    return null;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/WasbDelegationTokenManager.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/WasbDelegationTokenManager.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/WasbDelegationTokenManager.java
new file mode 100644
index 0000000..1d73416
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/WasbDelegationTokenManager.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.security;
+
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
+
+import java.io.IOException;
+
+/**
+ * Interface for Managing the Delegation tokens.
+ */
+public interface WasbDelegationTokenManager {
+
+  /**
+   * Get Delegation token
+   * @param renewer delegation token renewer
+   * @return delegation token
+   * @throws IOException when error in getting the delegation token
+   */
+  Token<DelegationTokenIdentifier> getDelegationToken(String renewer)
+      throws IOException;
+
+  /**
+   * Renew the delegation token
+   * @param token delegation token.
+   * @return renewed time.
+   * @throws IOException when error in renewing the delegation token
+   */
+  long renewDelegationToken(Token<?> token) throws IOException;
+
+  /**
+   * Cancel the delegation token
+   * @param token delegation token.
+   * @throws IOException when error in cancelling the delegation token.
+   */
+  void cancelDelegationToken(Token<?> token) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/WasbTokenRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/WasbTokenRenewer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/WasbTokenRenewer.java
index 7994bde..6df7647 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/WasbTokenRenewer.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/WasbTokenRenewer.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -20,27 +20,19 @@ package org.apache.hadoop.fs.azure.security;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenRenewer;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
-import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
-import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
-import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.net.InetAddress;
-import java.net.URL;
-import java.security.PrivilegedExceptionAction;
 
 /**
  * Token Renewer for renewing WASB delegation tokens with remote service.
  */
 public class WasbTokenRenewer extends TokenRenewer {
-  public static final Logger LOG = LoggerFactory
-      .getLogger(WasbTokenRenewer.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(WasbTokenRenewer.class);
 
   /**
    * Checks if this particular object handles the Kind of token passed.
@@ -75,32 +67,7 @@ public class WasbTokenRenewer extends TokenRenewer {
   public long renew(final Token<?> token, Configuration conf)
       throws IOException, InterruptedException {
     LOG.debug("Renewing the delegation token");
-    final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    UserGroupInformation connectUgi = ugi.getRealUser();
-    final UserGroupInformation proxyUser = connectUgi;
-    if (connectUgi == null) {
-      connectUgi = ugi;
-    }
-    connectUgi.checkTGTAndReloginFromKeytab();
-    final DelegationTokenAuthenticatedURL.Token authToken = new DelegationTokenAuthenticatedURL.Token();
-    authToken
-        .setDelegationToken((Token<AbstractDelegationTokenIdentifier>) token);
-    final String credServiceUrl = conf.get(Constants.KEY_CRED_SERVICE_URL,
-        String.format("http://%s:%s",
-            InetAddress.getLocalHost().getCanonicalHostName(),
-            Constants.DEFAULT_CRED_SERVICE_PORT));
-    DelegationTokenAuthenticator authenticator = new KerberosDelegationTokenAuthenticator();
-    final DelegationTokenAuthenticatedURL authURL = new DelegationTokenAuthenticatedURL(
-        authenticator);
-
-    return connectUgi.doAs(new PrivilegedExceptionAction<Long>() {
-      @Override
-      public Long run() throws Exception {
-        return authURL.renewDelegationToken(new URL(credServiceUrl
-                + Constants.DEFAULT_DELEGATION_TOKEN_MANAGER_ENDPOINT),
-            authToken, (proxyUser != null) ? ugi.getShortUserName() : null);
-      }
-    });
+    return getInstance(conf).renewDelegationToken(token);
   }
 
   /**
@@ -114,31 +81,11 @@ public class WasbTokenRenewer extends TokenRenewer {
   public void cancel(final Token<?> token, Configuration conf)
       throws IOException, InterruptedException {
     LOG.debug("Cancelling the delegation token");
-    final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    UserGroupInformation connectUgi = ugi.getRealUser();
-    final UserGroupInformation proxyUser = connectUgi;
-    if (connectUgi == null) {
-      connectUgi = ugi;
-    }
-    connectUgi.checkTGTAndReloginFromKeytab();
-    final DelegationTokenAuthenticatedURL.Token authToken = new DelegationTokenAuthenticatedURL.Token();
-    authToken
-        .setDelegationToken((Token<AbstractDelegationTokenIdentifier>) token);
-    final String credServiceUrl = conf.get(Constants.KEY_CRED_SERVICE_URL,
-        String.format("http://%s:%s",
-            InetAddress.getLocalHost().getCanonicalHostName(),
-            Constants.DEFAULT_CRED_SERVICE_PORT));
-    DelegationTokenAuthenticator authenticator = new KerberosDelegationTokenAuthenticator();
-    final DelegationTokenAuthenticatedURL authURL = new DelegationTokenAuthenticatedURL(
-        authenticator);
-    connectUgi.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-        authURL.cancelDelegationToken(new URL(credServiceUrl
-                + Constants.DEFAULT_DELEGATION_TOKEN_MANAGER_ENDPOINT),
-            authToken, (proxyUser != null) ? ugi.getShortUserName() : null);
-        return null;
-      }
-    });
+    getInstance(conf).cancelDelegationToken(token);
+  }
+
+  private WasbDelegationTokenManager getInstance(Configuration conf)
+      throws IOException {
+    return new RemoteWasbDelegationTokenManager(conf);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/index.md b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
index 9c57e60..740be52 100644
--- a/hadoop-tools/hadoop-azure/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
@@ -316,12 +316,12 @@ To enable SAS key generation locally following property needs to be set to true.
 </property>
 ```
 
-To use the remote SAS key generation mode, an external REST service is expected to provided required SAS keys.
+To use the remote SAS key generation mode, comma separated external REST services are expected to provided required SAS keys.
 Following property can used to provide the end point to use for remote SAS Key generation:
 
 ```xml
 <property>
-  <name>fs.azure.cred.service.url</name>
+  <name>fs.azure.cred.service.urls</name>
   <value>{URL}</value>
 </property>
 ```
@@ -354,11 +354,11 @@ Authorization support can be enabled in WASB using the following configuration:
 ```
 
 The current implementation of authorization relies on the presence of an external service that can enforce
-the authorization. The service is expected to be running on a URL provided by the following config.
+the authorization. The service is expected to be running on comma separated URLs provided by the following config.
 
 ```xml
 <property>
-  <name>fs.azure.authorization.remote.service.url</name>
+  <name>fs.azure.authorization.remote.service.urls</name>
   <value>{URL}</value>
 </property>
 ```
@@ -377,6 +377,42 @@ The service is expected to return a response in JSON format:
 }
 ```
 
+### Delegation token support in WASB
+
+Delegation token support support can be enabled in WASB using the following configuration:
+
+```xml
+<property>
+  <name>fs.azure.enable.kerberos.support</name>
+  <value>true</value>
+</property>
+```
+
+The current implementation of delegation token implementation relies on the presence of an external service instances that can generate and manage delegation tokens. The service is expected to be running on comma separated URLs provided by the following config.
+
+```xml
+<property>
+  <name>fs.azure.delegation.token.service.urls</name>
+  <value>{URL}</value>
+</property>
+```
+
+The remote service is expected to provide support for the following REST call: ```{URL}?op=GETDELEGATIONTOKEN```, ```{URL}?op=RENEWDELEGATIONTOKEN``` and ```{URL}?op=CANCELDELEGATIONTOKEN```
+An example request:
+  ```{URL}?op=GETDELEGATIONTOKEN&renewer=<renewer>```
+  ```{URL}?op=RENEWDELEGATIONTOKEN&token=<delegation token>```
+  ```{URL}?op=CANCELDELEGATIONTOKEN&token=<delegation token>```
+
+The service is expected to return a response in JSON format for GETDELEGATIONTOKEN request:
+
+```json
+{
+    "Token" : {
+        "urlString": URL string of delegation token.
+    }
+}
+```
+
 ## Testing the hadoop-azure Module
 
 The hadoop-azure module includes a full suite of unit tests.  Most of the tests


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: YARN-6770. A small mistake in the example of TimelineClient. Contributed by Jinjiang Ling.

Posted by as...@apache.org.
YARN-6770. A small mistake in the example of TimelineClient. Contributed by Jinjiang Ling.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba5b056e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba5b056e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba5b056e

Branch: refs/heads/YARN-5972
Commit: ba5b056e8a151fad9d7573c6e62cffc40cde7da3
Parents: 626d730
Author: Naganarasimha <na...@apache.org>
Authored: Sun Jul 9 21:28:24 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Sun Jul 9 21:28:24 2017 +0530

----------------------------------------------------------------------
 .../hadoop-yarn-site/src/site/markdown/TimelineServer.md         | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba5b056e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index ac9b2ec..f610cde 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -274,7 +274,7 @@ Here is an example:
 
     try {
       TimelineDomain myDomain = new TimelineDomain();
-      myDomain.setID("MyDomain");
+      myDomain.setId("MyDomain");
       // Compose other Domain info ....
 
       client.putDomain(myDomain);
@@ -282,7 +282,7 @@ Here is an example:
       TimelineEntity myEntity = new TimelineEntity();
       myEntity.setDomainId(myDomain.getId());
       myEntity.setEntityType("APPLICATION");
-      myEntity.setEntityID("MyApp1")
+      myEntity.setEntityId("MyApp1");
       // Compose other entity info
 
       TimelinePutResponse response = client.putEntities(entity);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: MAPREDUCE-6905. HADOOP_JOB_HISTORY_OPTS should be HADOOP_JOB_HISTORYSERVER_OPTS in mapred-config.sh. Contributed by LiXin Ge.

Posted by as...@apache.org.
MAPREDUCE-6905. HADOOP_JOB_HISTORY_OPTS should be HADOOP_JOB_HISTORYSERVER_OPTS in mapred-config.sh. Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf1f5993
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf1f5993
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf1f5993

Branch: refs/heads/YARN-5972
Commit: bf1f59937dd5d860c9ed7fefce203d6a9f645182
Parents: fa1aaee
Author: Naganarasimha <na...@apache.org>
Authored: Sun Jul 2 15:56:27 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Sun Jul 2 15:56:27 2017 +0530

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/fs/TestDFSIO.java | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf1f5993/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
index 34eac83..12fbdad 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
@@ -294,8 +294,17 @@ public class TestDFSIO implements Tool {
                                   int nrFiles
                                 ) throws IOException {
     LOG.info("creating control file: "+nrBytes+" bytes, "+nrFiles+" files");
-
+    final int maxDirItems = config.getInt(
+        DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);
     Path controlDir = getControlDir(config);
+
+    if (nrFiles > maxDirItems) {
+      final String message = "The directory item limit of " + controlDir +
+          " is exceeded: limit=" + maxDirItems + " items=" + nrFiles;
+      throw new IOException(message);
+    }
+
     fs.delete(controlDir, true);
 
     for(int i=0; i < nrFiles; i++) {
@@ -310,8 +319,9 @@ public class TestDFSIO implements Tool {
       } catch(Exception e) {
         throw new IOException(e.getLocalizedMessage());
       } finally {
-        if (writer != null)
+        if (writer != null) {
           writer.close();
+        }
         writer = null;
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: HADOOP-14596. AWS SDK 1.11+ aborts() on close() if > 0 bytes in stream; logs error. Contributed by Steve Loughran

Posted by as...@apache.org.
HADOOP-14596. AWS SDK 1.11+ aborts() on close() if > 0 bytes in stream; logs error. Contributed by Steve Loughran

Change-Id: I49173bf6163796903d64594a8ca8a4bd26ad2bfc


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72993b33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72993b33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72993b33

Branch: refs/heads/YARN-5972
Commit: 72993b33b704991f2a0bf743f31b164e58a2dabc
Parents: ec97519
Author: Mingliang Liu <li...@apache.org>
Authored: Thu Jun 29 17:00:25 2017 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Thu Jun 29 17:07:52 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/fs/s3a/S3AInputStream.java    | 26 +++++++++++++++++---
 1 file changed, 22 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72993b33/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index 7d322a5..b88b7c1 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileSystem;
 
 import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.EOFException;
 import java.io.IOException;
@@ -78,7 +79,8 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
   private final String key;
   private final long contentLength;
   private final String uri;
-  public static final Logger LOG = S3AFileSystem.LOG;
+  private static final Logger LOG =
+      LoggerFactory.getLogger(S3AInputStream.class);
   private final S3AInstrumentation.InputStreamStatistics streamStatistics;
   private S3AEncryptionMethods serverSideEncryptionAlgorithm;
   private String serverSideEncryptionKey;
@@ -451,13 +453,27 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
       // if the amount of data remaining in the current request is greater
       // than the readahead value: abort.
       long remaining = remainingInCurrentRequest();
+      LOG.debug("Closing stream {}: {}", reason,
+          forceAbort ? "abort" : "soft");
       boolean shouldAbort = forceAbort || remaining > readahead;
       if (!shouldAbort) {
         try {
           // clean close. This will read to the end of the stream,
           // so, while cleaner, can be pathological on a multi-GB object
+
+          // explicitly drain the stream
+          long drained = 0;
+          while (wrappedStream.read() >= 0) {
+            drained++;
+          }
+          LOG.debug("Drained stream of {} bytes", drained);
+
+          // now close it
           wrappedStream.close();
-          streamStatistics.streamClose(false, remaining);
+          // this MUST come after the close, so that if the IO operations fail
+          // and an abort is triggered, the initial attempt's statistics
+          // aren't collected.
+          streamStatistics.streamClose(false, drained);
         } catch (IOException e) {
           // exception escalates to an abort
           LOG.debug("When closing {} stream for {}", uri, reason, e);
@@ -467,13 +483,15 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
       if (shouldAbort) {
         // Abort, rather than just close, the underlying stream.  Otherwise, the
         // remaining object payload is read from S3 while closing the stream.
+        LOG.debug("Aborting stream");
         wrappedStream.abort();
         streamStatistics.streamClose(true, remaining);
       }
-      LOG.debug("Stream {} {}: {}; streamPos={}, nextReadPos={}," +
+      LOG.debug("Stream {} {}: {}; remaining={} streamPos={},"
+              + " nextReadPos={}," +
           " request range {}-{} length={}",
           uri, (shouldAbort ? "aborted" : "closed"), reason,
-          pos, nextReadPos,
+          remaining, pos, nextReadPos,
           contentRangeStart, contentRangeFinish,
           length);
       wrappedStream = null;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: Add release notes, changes, jdiff for 3.0.0-alpha4

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f10864a8/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.md
new file mode 100644
index 0000000..4d4d0bc
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.md
@@ -0,0 +1,880 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# "Apache Hadoop" Changelog
+
+## Release 3.0.0-alpha4 - 2017-06-30
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-10860](https://issues.apache.org/jira/browse/HDFS-10860) | Switch HttpFS from Tomcat to Jetty |  Blocker | httpfs | John Zhuge | John Zhuge |
+| [HADOOP-13929](https://issues.apache.org/jira/browse/HADOOP-13929) | ADLS connector should not check in contract-test-options.xml |  Major | fs/adl, test | John Zhuge | John Zhuge |
+| [HDFS-11100](https://issues.apache.org/jira/browse/HDFS-11100) | Recursively deleting file protected by sticky bit should fail |  Critical | fs | John Zhuge | John Zhuge |
+| [HADOOP-13805](https://issues.apache.org/jira/browse/HADOOP-13805) | UGI.getCurrentUser() fails if user does not have a keytab associated |  Major | security | Alejandro Abdelnur | Xiao Chen |
+| [HDFS-11405](https://issues.apache.org/jira/browse/HDFS-11405) | Rename "erasurecode" CLI subcommand to "ec" |  Blocker | erasure-coding | Andrew Wang | Manoj Govindassamy |
+| [HDFS-11426](https://issues.apache.org/jira/browse/HDFS-11426) | Refactor EC CLI to be similar to storage policies CLI |  Major | erasure-coding, shell | Andrew Wang | Andrew Wang |
+| [HDFS-11427](https://issues.apache.org/jira/browse/HDFS-11427) | Rename "rs-default" to "rs" |  Major | erasure-coding | Andrew Wang | Andrew Wang |
+| [HDFS-11382](https://issues.apache.org/jira/browse/HDFS-11382) | Persist Erasure Coding Policy ID in a new optional field in INodeFile in FSImage |  Major | hdfs | Manoj Govindassamy | Manoj Govindassamy |
+| [HDFS-11428](https://issues.apache.org/jira/browse/HDFS-11428) | Change setErasureCodingPolicy to take a required string EC policy name |  Major | erasure-coding | Andrew Wang | Andrew Wang |
+| [HADOOP-14138](https://issues.apache.org/jira/browse/HADOOP-14138) | Remove S3A ref from META-INF service discovery, rely on existing core-default entry |  Critical | fs/s3 | Steve Loughran | Steve Loughran |
+| [HDFS-11152](https://issues.apache.org/jira/browse/HDFS-11152) | Start erasure coding policy ID number from 1 instead of 0 to void potential unexpected errors |  Blocker | erasure-coding | SammiChen | SammiChen |
+| [HDFS-11314](https://issues.apache.org/jira/browse/HDFS-11314) | Enforce set of enabled EC policies on the NameNode |  Blocker | erasure-coding | Andrew Wang | Andrew Wang |
+| [HDFS-11505](https://issues.apache.org/jira/browse/HDFS-11505) | Do not enable any erasure coding policies by default |  Major | erasure-coding | Andrew Wang | Manoj Govindassamy |
+| [HADOOP-10101](https://issues.apache.org/jira/browse/HADOOP-10101) | Update guava dependency to the latest version |  Major | . | Rakesh R | Tsuyoshi Ozawa |
+| [HADOOP-14267](https://issues.apache.org/jira/browse/HADOOP-14267) | Make DistCpOptions class immutable |  Major | tools/distcp | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14202](https://issues.apache.org/jira/browse/HADOOP-14202) | fix jsvc/secure user var inconsistencies |  Major | scripts | Allen Wittenauer | Allen Wittenauer |
+| [HADOOP-14174](https://issues.apache.org/jira/browse/HADOOP-14174) | Set default ADLS access token provider type to ClientCredential |  Major | fs/adl | John Zhuge | John Zhuge |
+| [YARN-6298](https://issues.apache.org/jira/browse/YARN-6298) | Metric preemptCall is not used in new preemption |  Blocker | fairscheduler | Yufei Gu | Yufei Gu |
+| [HADOOP-14285](https://issues.apache.org/jira/browse/HADOOP-14285) | Update minimum version of Maven from 3.0 to 3.3 |  Major | . | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-14225](https://issues.apache.org/jira/browse/HADOOP-14225) | Remove xmlenc dependency |  Minor | . | Chris Douglas | Chris Douglas |
+| [HADOOP-13665](https://issues.apache.org/jira/browse/HADOOP-13665) | Erasure Coding codec should support fallback coder |  Blocker | io | Wei-Chiu Chuang | Kai Sasaki |
+| [HADOOP-14248](https://issues.apache.org/jira/browse/HADOOP-14248) | Retire SharedInstanceProfileCredentialsProvider in trunk. |  Major | fs/s3 | Mingliang Liu | Mingliang Liu |
+| [HDFS-11565](https://issues.apache.org/jira/browse/HDFS-11565) | Use compact identifiers for built-in ECPolicies in HdfsFileStatus |  Blocker | erasure-coding | Andrew Wang | Andrew Wang |
+| [YARN-3427](https://issues.apache.org/jira/browse/YARN-3427) | Remove deprecated methods from ResourceCalculatorProcessTree |  Blocker | . | Karthik Kambatla | Miklos Szegedi |
+| [HDFS-11402](https://issues.apache.org/jira/browse/HDFS-11402) | HDFS Snapshots should capture point-in-time copies of OPEN files |  Major | hdfs | Manoj Govindassamy | Manoj Govindassamy |
+| [HADOOP-10105](https://issues.apache.org/jira/browse/HADOOP-10105) | remove httpclient dependency |  Blocker | build | Colin P. McCabe | Akira Ajisaka |
+| [HADOOP-13200](https://issues.apache.org/jira/browse/HADOOP-13200) | Implement customizable and configurable erasure coders |  Blocker | . | Kai Zheng | Tim Yao |
+| [YARN-2962](https://issues.apache.org/jira/browse/YARN-2962) | ZKRMStateStore: Limit the number of znodes under a znode |  Critical | resourcemanager | Karthik Kambatla | Varun Saxena |
+| [HADOOP-14386](https://issues.apache.org/jira/browse/HADOOP-14386) | Rewind trunk from Guava 21.0 back to Guava 11.0.2 |  Blocker | . | Andrew Wang | Andrew Wang |
+| [HADOOP-14401](https://issues.apache.org/jira/browse/HADOOP-14401) | maven-project-info-reports-plugin can be removed |  Major | . | Andras Bokor | Andras Bokor |
+| [HADOOP-14375](https://issues.apache.org/jira/browse/HADOOP-14375) | Remove tomcat support from hadoop-functions.sh |  Minor | scripts | Allen Wittenauer | John Zhuge |
+| [HADOOP-14419](https://issues.apache.org/jira/browse/HADOOP-14419) | Remove findbugs report from docs profile |  Minor | . | Andras Bokor | Andras Bokor |
+| [HADOOP-14426](https://issues.apache.org/jira/browse/HADOOP-14426) | Upgrade Kerby version from 1.0.0-RC2 to 1.0.0 |  Blocker | security | Jiajia Li | Jiajia Li |
+| [HADOOP-13921](https://issues.apache.org/jira/browse/HADOOP-13921) | Remove Log4j classes from JobConf |  Critical | conf | Sean Busbey | Sean Busbey |
+| [HADOOP-8143](https://issues.apache.org/jira/browse/HADOOP-8143) | Change distcp to have -pb on by default |  Minor | . | Dave Thompson | Mithun Radhakrishnan |
+| [HADOOP-14502](https://issues.apache.org/jira/browse/HADOOP-14502) | Confusion/name conflict between NameNodeActivity#BlockReportNumOps and RpcDetailedActivity#BlockReportNumOps |  Minor | metrics | Erik Krogen | Erik Krogen |
+| [HDFS-11067](https://issues.apache.org/jira/browse/HDFS-11067) | DFS#listStatusIterator(..) should throw FileNotFoundException if the directory deleted before fetching next batch of entries |  Major | hdfs-client | Vinayakumar B | Vinayakumar B |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [YARN-5910](https://issues.apache.org/jira/browse/YARN-5910) | Support for multi-cluster delegation tokens |  Minor | security | Clay B. | Jian He |
+| [YARN-5864](https://issues.apache.org/jira/browse/YARN-5864) | YARN Capacity Scheduler - Queue Priorities |  Major | . | Wangda Tan | Wangda Tan |
+| [HDFS-11194](https://issues.apache.org/jira/browse/HDFS-11194) | Maintain aggregated peer performance metrics on NameNode |  Major | namenode | Xiaobing Zhou | Arpit Agarwal |
+| [HADOOP-14049](https://issues.apache.org/jira/browse/HADOOP-14049) | Honour AclBit flag associated to file/folder permission for Azure datalake account |  Major | fs/adl | Vishwajeet Dusane | Vishwajeet Dusane |
+| [YARN-5280](https://issues.apache.org/jira/browse/YARN-5280) | Allow YARN containers to run with Java Security Manager |  Minor | nodemanager, yarn | Greg Phillips | Greg Phillips |
+| [HADOOP-14048](https://issues.apache.org/jira/browse/HADOOP-14048) | REDO operation of WASB#AtomicRename should create placeholder blob for destination folder |  Critical | fs/azure | NITIN VERMA | NITIN VERMA |
+| [YARN-6451](https://issues.apache.org/jira/browse/YARN-6451) | Add RM monitor validating metrics invariants |  Major | . | Carlo Curino | Carlo Curino |
+| [MAPREDUCE-6871](https://issues.apache.org/jira/browse/MAPREDUCE-6871) | Allow users to specify racks and nodes for strict locality for AMs |  Major | client | Robert Kanter | Robert Kanter |
+| [HDFS-11417](https://issues.apache.org/jira/browse/HDFS-11417) | Add datanode admin command to get the storage info. |  Major | . | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [YARN-679](https://issues.apache.org/jira/browse/YARN-679) | add an entry point that can start any Yarn service |  Major | api | Steve Loughran | Steve Loughran |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-14002](https://issues.apache.org/jira/browse/HADOOP-14002) | Document -DskipShade property in BUILDING.txt |  Minor | build, documentation | Hanisha Koneru | Hanisha Koneru |
+| [HADOOP-13956](https://issues.apache.org/jira/browse/HADOOP-13956) | Read ADLS credentials from Credential Provider |  Critical | fs/adl | John Zhuge | John Zhuge |
+| [HADOOP-13962](https://issues.apache.org/jira/browse/HADOOP-13962) | Update ADLS SDK to 2.1.4 |  Major | fs/adl | John Zhuge | John Zhuge |
+| [YARN-5547](https://issues.apache.org/jira/browse/YARN-5547) | NMLeveldbStateStore should be more tolerant of unknown keys |  Major | nodemanager | Jason Lowe | Ajith S |
+| [HADOOP-13990](https://issues.apache.org/jira/browse/HADOOP-13990) | Document KMS usage of CredentialProvider API |  Minor | documentation, kms | John Zhuge | John Zhuge |
+| [HDFS-10534](https://issues.apache.org/jira/browse/HDFS-10534) | NameNode WebUI should display DataNode usage histogram |  Major | namenode, ui | Zhe Zhang | Kai Sasaki |
+| [MAPREDUCE-6829](https://issues.apache.org/jira/browse/MAPREDUCE-6829) | Add peak memory usage counter for each task |  Major | mrv2 | Yufei Gu | Miklos Szegedi |
+| [HDFS-11374](https://issues.apache.org/jira/browse/HDFS-11374) | Skip FSync in Test util CreateEditsLog to speed up edit log generation |  Minor | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [HDFS-9884](https://issues.apache.org/jira/browse/HDFS-9884) | Use doxia macro to generate in-page TOC of HDFS site documentation |  Major | documentation | Masatake Iwasaki | Masatake Iwasaki |
+| [YARN-6131](https://issues.apache.org/jira/browse/YARN-6131) | FairScheduler: Lower update interval for faster tests |  Major | fairscheduler | Karthik Kambatla | Karthik Kambatla |
+| [YARN-6106](https://issues.apache.org/jira/browse/YARN-6106) | Document FairScheduler 'allowPreemptionFrom' queue property |  Minor | fairscheduler | Yufei Gu | Yufei Gu |
+| [YARN-4658](https://issues.apache.org/jira/browse/YARN-4658) | Typo in o.a.h.yarn.server.resourcemanager.scheduler.fair.TestFairScheduler comment |  Major | . | Daniel Templeton | Udai Kiran Potluri |
+| [MAPREDUCE-6644](https://issues.apache.org/jira/browse/MAPREDUCE-6644) | Use doxia macro to generate in-page TOC of MapReduce site documentation |  Major | documentation | Masatake Iwasaki | Masatake Iwasaki |
+| [HDFS-11370](https://issues.apache.org/jira/browse/HDFS-11370) | Optimize NamenodeFsck#getReplicaInfo |  Minor | namenode | Takanobu Asanuma | Takanobu Asanuma |
+| [HDFS-11112](https://issues.apache.org/jira/browse/HDFS-11112) | Journal Nodes should refuse to format non-empty directories |  Major | . | Arpit Agarwal | Yiqun Lin |
+| [HDFS-11353](https://issues.apache.org/jira/browse/HDFS-11353) | Improve the unit tests relevant to DataNode volume failure testing |  Major | . | Yiqun Lin | Yiqun Lin |
+| [HADOOP-14053](https://issues.apache.org/jira/browse/HADOOP-14053) | Update the link to HTrace SpanReceivers |  Minor | documentation | Akira Ajisaka | Yiqun Lin |
+| [HADOOP-12097](https://issues.apache.org/jira/browse/HADOOP-12097) | Allow port range to be specified while starting webapp |  Major | . | Varun Saxena | Varun Saxena |
+| [HDFS-10219](https://issues.apache.org/jira/browse/HDFS-10219) | Change the default value for dfs.namenode.reconstruction.pending.timeout-sec from -1 to 300 |  Minor | . | Akira Ajisaka | Yiqun Lin |
+| [MAPREDUCE-6404](https://issues.apache.org/jira/browse/MAPREDUCE-6404) | Allow AM to specify a port range for starting its webapp |  Major | applicationmaster | Varun Saxena | Varun Saxena |
+| [MAPREDUCE-6842](https://issues.apache.org/jira/browse/MAPREDUCE-6842) | Update the links in PiEstimator document |  Minor | documentation | Akira Ajisaka | Jung Yoo |
+| [HDFS-11210](https://issues.apache.org/jira/browse/HDFS-11210) | Enhance key rolling to guarantee new KeyVersion is returned from generateEncryptedKeys after a key is rolled |  Major | encryption, kms | Xiao Chen | Xiao Chen |
+| [HDFS-11409](https://issues.apache.org/jira/browse/HDFS-11409) | DatanodeInfo getNetworkLocation and setNetworkLocation shoud use volatile instead of synchronized |  Minor | performance | Chen Liang | Chen Liang |
+| [YARN-6061](https://issues.apache.org/jira/browse/YARN-6061) | Add an UncaughtExceptionHandler for critical threads in RM |  Major | resourcemanager | Yufei Gu | Yufei Gu |
+| [YARN-4753](https://issues.apache.org/jira/browse/YARN-4753) | Use doxia macro to generate in-page TOC of YARN site documentation |  Major | documentation | Masatake Iwasaki | Masatake Iwasaki |
+| [HDFS-11333](https://issues.apache.org/jira/browse/HDFS-11333) | Print a user friendly error message when plugins are not found |  Minor | namenode | Wei-Chiu Chuang | Wei-Chiu Chuang |
+| [YARN-6174](https://issues.apache.org/jira/browse/YARN-6174) | Log files pattern should be same for both running and finished container |  Major | yarn | Sumana Sathish | Xuan Gong |
+| [HDFS-11375](https://issues.apache.org/jira/browse/HDFS-11375) | Display the volume storage type in datanode UI |  Minor | datanode, ui | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [YARN-6125](https://issues.apache.org/jira/browse/YARN-6125) | The application attempt's diagnostic message should have a maximum size |  Critical | resourcemanager | Daniel Templeton | Andras Piros |
+| [HADOOP-14077](https://issues.apache.org/jira/browse/HADOOP-14077) | Improve the patch of HADOOP-13119 |  Major | security | Yuanbo Liu | Yuanbo Liu |
+| [HDFS-11406](https://issues.apache.org/jira/browse/HDFS-11406) | Remove unused getStartInstance and getFinalizeInstance in FSEditLogOp |  Trivial | . | Andrew Wang | Alison Yu |
+| [HDFS-11438](https://issues.apache.org/jira/browse/HDFS-11438) | Fix typo in error message of StoragePolicyAdmin tool |  Trivial | . | Alison Yu | Alison Yu |
+| [YARN-6194](https://issues.apache.org/jira/browse/YARN-6194) | Cluster capacity in SchedulingPolicy is updated only on allocation file reload |  Major | fairscheduler | Karthik Kambatla | Yufei Gu |
+| [HADOOP-13321](https://issues.apache.org/jira/browse/HADOOP-13321) | Deprecate FileSystem APIs that promote inefficient call patterns. |  Major | fs | Chris Nauroth | Mingliang Liu |
+| [HADOOP-14097](https://issues.apache.org/jira/browse/HADOOP-14097) | Remove Java6 specific code from GzipCodec.java |  Minor | . | Akira Ajisaka | Elek, Marton |
+| [HADOOP-13817](https://issues.apache.org/jira/browse/HADOOP-13817) | Add a finite shell command timeout to ShellBasedUnixGroupsMapping |  Minor | security | Harsh J | Harsh J |
+| [HDFS-11295](https://issues.apache.org/jira/browse/HDFS-11295) | Check storage remaining instead of node remaining in BlockPlacementPolicyDefault.chooseReplicaToDelete() |  Major | namenode | Xiao Liang | Elek, Marton |
+| [HADOOP-14127](https://issues.apache.org/jira/browse/HADOOP-14127) | Add log4j configuration to enable logging in hadoop-distcp's tests |  Minor | test | Xiao Chen | Xiao Chen |
+| [HDFS-11466](https://issues.apache.org/jira/browse/HDFS-11466) | Change dfs.namenode.write-lock-reporting-threshold-ms default from 1000ms to 5000ms |  Major | namenode | Andrew Wang | Andrew Wang |
+| [YARN-6189](https://issues.apache.org/jira/browse/YARN-6189) | Improve application status log message when RM restarted when app is in NEW state |  Major | . | Yesha Vora | Junping Du |
+| [HDFS-11432](https://issues.apache.org/jira/browse/HDFS-11432) | Federation : Support fully qualified path for Quota/Snapshot/cacheadmin/cryptoadmin commands |  Major | federation | Brahma Reddy Battula | Brahma Reddy Battula |
+| [HDFS-11461](https://issues.apache.org/jira/browse/HDFS-11461) | DataNode Disk Outlier Detection |  Major | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [HDFS-11416](https://issues.apache.org/jira/browse/HDFS-11416) | Refactor out system default erasure coding policy |  Major | erasure-coding | Andrew Wang | Andrew Wang |
+| [HADOOP-13930](https://issues.apache.org/jira/browse/HADOOP-13930) | Azure: Add Authorization support to WASB |  Major | fs/azure | Dushyanth | Sivaguru Sankaridurg |
+| [HDFS-11494](https://issues.apache.org/jira/browse/HDFS-11494) | Log message when DN is not selected for block replication |  Minor | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-8741](https://issues.apache.org/jira/browse/HDFS-8741) | Proper error msg to be printed when invalid operation type is given to WebHDFS operations |  Minor | webhdfs | Archana T | Surendra Singh Lilhore |
+| [HADOOP-14108](https://issues.apache.org/jira/browse/HADOOP-14108) | CLI MiniCluster: add an option to specify NameNode HTTP port |  Minor | . | Takanobu Asanuma | Takanobu Asanuma |
+| [HDFS-10838](https://issues.apache.org/jira/browse/HDFS-10838) | Last full block report received time for each DN should be easily discoverable |  Major | ui | Arpit Agarwal | Surendra Singh Lilhore |
+| [HDFS-11477](https://issues.apache.org/jira/browse/HDFS-11477) | Simplify file IO profiling configuration |  Minor | . | Hanisha Koneru | Hanisha Koneru |
+| [YARN-6287](https://issues.apache.org/jira/browse/YARN-6287) | RMCriticalThreadUncaughtExceptionHandler.rmContext should be final |  Minor | resourcemanager | Daniel Templeton | Corey Barker |
+| [HADOOP-14150](https://issues.apache.org/jira/browse/HADOOP-14150) | Implement getHomeDirectory() method in NativeAzureFileSystem |  Critical | fs/azure | Namit Maheshwari | Santhosh G Nayak |
+| [YARN-6300](https://issues.apache.org/jira/browse/YARN-6300) | NULL\_UPDATE\_REQUESTS is redundant in TestFairScheduler |  Minor | . | Daniel Templeton | Yuanbo Liu |
+| [HDFS-11506](https://issues.apache.org/jira/browse/HDFS-11506) | Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test code |  Major | erasure-coding | Andrew Wang | Manoj Govindassamy |
+| [HADOOP-13946](https://issues.apache.org/jira/browse/HADOOP-13946) | Document how HDFS updates timestamps in the FS spec; compare with object stores |  Minor | documentation, fs | Steve Loughran | Steve Loughran |
+| [YARN-6042](https://issues.apache.org/jira/browse/YARN-6042) | Dump scheduler and queue state information into FairScheduler DEBUG log |  Major | fairscheduler | Yufei Gu | Yufei Gu |
+| [HDFS-11511](https://issues.apache.org/jira/browse/HDFS-11511) | Support Timeout when checking single disk |  Major | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [HDFS-10601](https://issues.apache.org/jira/browse/HDFS-10601) | Improve log message to include hostname when the NameNode is in safemode |  Minor | . | Kuhu Shukla | Kuhu Shukla |
+| [HDFS-11517](https://issues.apache.org/jira/browse/HDFS-11517) | Expose slow disks via DataNode JMX |  Major | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [HADOOP-14169](https://issues.apache.org/jira/browse/HADOOP-14169) | Implement listStatusIterator, listLocatedStatus for ViewFs |  Minor | viewfs | Erik Krogen | Erik Krogen |
+| [HDFS-11547](https://issues.apache.org/jira/browse/HDFS-11547) | Add logs for slow BlockReceiver while writing data to disk |  Major | datanode | Xiaobing Zhou | Xiaobing Zhou |
+| [MAPREDUCE-6865](https://issues.apache.org/jira/browse/MAPREDUCE-6865) | Fix typo in javadoc for DistributedCache |  Trivial | . | Attila Sasvari | Attila Sasvari |
+| [YARN-6309](https://issues.apache.org/jira/browse/YARN-6309) | Fair scheduler docs should have the queue and queuePlacementPolicy elements listed in bold so that they're easier to see |  Minor | fairscheduler | Daniel Templeton | esmaeil mirzaee |
+| [HADOOP-13945](https://issues.apache.org/jira/browse/HADOOP-13945) | Azure: Add Kerberos and Delegation token support to WASB client. |  Major | fs/azure | Santhosh G Nayak | Santhosh G Nayak |
+| [HDFS-11545](https://issues.apache.org/jira/browse/HDFS-11545) | Propagate DataNode's slow disks info to the NameNode via Heartbeat |  Major | . | Hanisha Koneru | Hanisha Koneru |
+| [YARN-6284](https://issues.apache.org/jira/browse/YARN-6284) | hasAlreadyRun should be final in ResourceManager.StandByTransitionRunnable |  Major | resourcemanager | Daniel Templeton | Laura Adams |
+| [HADOOP-14213](https://issues.apache.org/jira/browse/HADOOP-14213) | Move Configuration runtime check for hadoop-site.xml to initialization |  Major | . | Jonathan Eagles | Jonathan Eagles |
+| [HDFS-10649](https://issues.apache.org/jira/browse/HDFS-10649) | Remove unused PermissionStatus#applyUMask |  Trivial | . | John Zhuge | Chen Liang |
+| [HDFS-11574](https://issues.apache.org/jira/browse/HDFS-11574) | Spelling mistakes in the Java source |  Trivial | . | hu xiaodong | hu xiaodong |
+| [HDFS-11534](https://issues.apache.org/jira/browse/HDFS-11534) | Add counters for number of blocks in pending IBR |  Major | hdfs | Xiaobing Zhou | Xiaobing Zhou |
+| [YARN-5956](https://issues.apache.org/jira/browse/YARN-5956) | Refactor ClientRMService to unify error handling across apis |  Minor | resourcemanager | Kai Sasaki | Kai Sasaki |
+| [YARN-6379](https://issues.apache.org/jira/browse/YARN-6379) | Remove unused argument in ClientRMService |  Trivial | . | Kai Sasaki | Kai Sasaki |
+| [HADOOP-14233](https://issues.apache.org/jira/browse/HADOOP-14233) | Delay construction of PreCondition.check failure message in Configuration#set |  Major | . | Jonathan Eagles | Jonathan Eagles |
+| [HADOOP-14240](https://issues.apache.org/jira/browse/HADOOP-14240) | Configuration#get return value optimization |  Major | . | Jonathan Eagles | Jonathan Eagles |
+| [YARN-6339](https://issues.apache.org/jira/browse/YARN-6339) | Improve performance for createAndGetApplicationReport |  Major | . | yunjiong zhao | yunjiong zhao |
+| [HDFS-11170](https://issues.apache.org/jira/browse/HDFS-11170) | Add builder-based create API to FileSystem |  Major | . | SammiChen | SammiChen |
+| [YARN-6329](https://issues.apache.org/jira/browse/YARN-6329) | Remove unnecessary TODO comment from AppLogAggregatorImpl.java |  Minor | . | Akira Ajisaka | victor bertschinger |
+| [HDFS-9705](https://issues.apache.org/jira/browse/HDFS-9705) | Refine the behaviour of getFileChecksum when length = 0 |  Minor | . | Kai Zheng | SammiChen |
+| [HADOOP-14250](https://issues.apache.org/jira/browse/HADOOP-14250) | Correct spelling of 'separate' and variants |  Minor | . | Doris Gu | Doris Gu |
+| [HDFS-10974](https://issues.apache.org/jira/browse/HDFS-10974) | Document replication factor for EC files. |  Major | documentation, erasure-coding | Wei-Chiu Chuang | Yiqun Lin |
+| [HDFS-11551](https://issues.apache.org/jira/browse/HDFS-11551) | Handle SlowDiskReport from DataNode at the NameNode |  Major | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [HDFS-11603](https://issues.apache.org/jira/browse/HDFS-11603) | Improve slow mirror/disk warnings in BlockReceiver |  Major | datanode | Arpit Agarwal | Arpit Agarwal |
+| [HDFS-11560](https://issues.apache.org/jira/browse/HDFS-11560) | Expose slow disks via NameNode JMX |  Major | namenode | Hanisha Koneru | Hanisha Koneru |
+| [HDFS-11598](https://issues.apache.org/jira/browse/HDFS-11598) | Improve -setrep for Erasure Coded files |  Major | shell | Wei-Chiu Chuang | Yiqun Lin |
+| [HDFS-9651](https://issues.apache.org/jira/browse/HDFS-9651) | All web UIs should include a robots.txt file |  Minor | . | Lars Francke | Lars Francke |
+| [HADOOP-14280](https://issues.apache.org/jira/browse/HADOOP-14280) | Fix compilation of TestKafkaMetrics |  Major | tools | Andrew Wang | Andrew Wang |
+| [HDFS-11628](https://issues.apache.org/jira/browse/HDFS-11628) | Clarify the behavior of HDFS Mover in documentation |  Major | documentation | Xiaobing Zhou | Xiaobing Zhou |
+| [YARN-6381](https://issues.apache.org/jira/browse/YARN-6381) | FSAppAttempt has several variables that should be final |  Major | fairscheduler | Daniel Templeton | Ameet Zaveri |
+| [HDFS-11302](https://issues.apache.org/jira/browse/HDFS-11302) | Improve Logging for SSLHostnameVerifier |  Major | security | Xiaoyu Yao | Chen Liang |
+| [HADOOP-14104](https://issues.apache.org/jira/browse/HADOOP-14104) | Client should always ask namenode for kms provider path. |  Major | kms | Rushabh S Shah | Rushabh S Shah |
+| [YARN-5797](https://issues.apache.org/jira/browse/YARN-5797) | Add metrics to the node manager for cleaning the PUBLIC and PRIVATE caches |  Major | . | Chris Trezzo | Chris Trezzo |
+| [HADOOP-14276](https://issues.apache.org/jira/browse/HADOOP-14276) | Add a nanosecond API to Time/Timer/FakeTimer |  Minor | util | Erik Krogen | Erik Krogen |
+| [HDFS-11623](https://issues.apache.org/jira/browse/HDFS-11623) | Move system erasure coding policies into hadoop-hdfs-client |  Major | erasure-coding | Andrew Wang | Andrew Wang |
+| [HADOOP-14008](https://issues.apache.org/jira/browse/HADOOP-14008) | Upgrade to Apache Yetus 0.4.0 |  Major | build, documentation, test | Allen Wittenauer | Allen Wittenauer |
+| [YARN-6195](https://issues.apache.org/jira/browse/YARN-6195) | Export UsedCapacity and AbsoluteUsedCapacity to JMX |  Major | capacityscheduler, metrics, yarn | Benson Qiu | Benson Qiu |
+| [HDFS-11558](https://issues.apache.org/jira/browse/HDFS-11558) | BPServiceActor thread name is too long |  Minor | datanode | Tsz Wo Nicholas Sze | Xiaobing Zhou |
+| [HADOOP-14246](https://issues.apache.org/jira/browse/HADOOP-14246) | Authentication Tokens should use SecureRandom instead of Random and 256 bit secrets |  Major | security | Robert Kanter | Robert Kanter |
+| [HDFS-11645](https://issues.apache.org/jira/browse/HDFS-11645) | DataXceiver thread should log the actual error when getting InvalidMagicNumberException |  Minor | datanode | Chen Liang | Chen Liang |
+| [HDFS-11648](https://issues.apache.org/jira/browse/HDFS-11648) | Lazy construct the IIP pathname |  Major | . | Daryn Sharp | Daryn Sharp |
+| [HADOOP-14274](https://issues.apache.org/jira/browse/HADOOP-14274) | Azure: Simplify Ranger-WASB policy model |  Major | fs/azure | Sivaguru Sankaridurg | Sivaguru Sankaridurg |
+| [MAPREDUCE-6673](https://issues.apache.org/jira/browse/MAPREDUCE-6673) | Add a test example job that grows in memory usage over time |  Major | test | Karthik Kambatla | Karthik Kambatla |
+| [HADOOP-11794](https://issues.apache.org/jira/browse/HADOOP-11794) | Enable distcp to copy blocks in parallel |  Major | tools/distcp | dhruba borthakur | Yongjun Zhang |
+| [YARN-6406](https://issues.apache.org/jira/browse/YARN-6406) | Remove SchedulerRequestKeys when no more pending ResourceRequest |  Major | . | Arun Suresh | Arun Suresh |
+| [HDFS-11652](https://issues.apache.org/jira/browse/HDFS-11652) | Improve ECSchema and ErasureCodingPolicy toString, hashCode, equals |  Minor | erasure-coding | Andrew Wang | Andrew Wang |
+| [HDFS-11634](https://issues.apache.org/jira/browse/HDFS-11634) | Optimize BlockIterator when iterating starts in the middle. |  Major | . | Konstantin Shvachko | Konstantin Shvachko |
+| [HDFS-11531](https://issues.apache.org/jira/browse/HDFS-11531) | Expose hedged read metrics via libHDFS API |  Major | libhdfs | Sailesh Mukil | Sailesh Mukil |
+| [HADOOP-14316](https://issues.apache.org/jira/browse/HADOOP-14316) | Switch from Findbugs to Spotbugs |  Major | build | Allen Wittenauer | Allen Wittenauer |
+| [YARN-6164](https://issues.apache.org/jira/browse/YARN-6164) | Expose Queue Configurations per Node Label through YARN client api |  Major | . | Benson Qiu | Benson Qiu |
+| [YARN-6392](https://issues.apache.org/jira/browse/YARN-6392) | Add submit time to Application Summary log |  Minor | resourcemanager | zhihai xu | zhihai xu |
+| [HADOOP-12856](https://issues.apache.org/jira/browse/HADOOP-12856) | FileUtil.checkDest() and RawLocalFileSystem.mkdirs() to throw stricter IOEs; RawLocalFS contract tests to verify |  Minor | fs | Steve Loughran | Steve Loughran |
+| [HADOOP-14340](https://issues.apache.org/jira/browse/HADOOP-14340) | Enable KMS and HttpFS to exclude SSL ciphers |  Minor | kms | John Zhuge | John Zhuge |
+| [HDFS-11384](https://issues.apache.org/jira/browse/HDFS-11384) | Add option for balancer to disperse getBlocks calls to avoid NameNode's rpc.CallQueueLength spike |  Major | balancer & mover | yunjiong zhao | Konstantin Shvachko |
+| [HADOOP-14309](https://issues.apache.org/jira/browse/HADOOP-14309) | Add PowerShell NodeFencer |  Minor | ha | Inigo Goiri | Inigo Goiri |
+| [HADOOP-14359](https://issues.apache.org/jira/browse/HADOOP-14359) | Remove unnecessary shading of commons-httpclient |  Minor | . | Akira Ajisaka | Wei-Chiu Chuang |
+| [HADOOP-14367](https://issues.apache.org/jira/browse/HADOOP-14367) | Remove unused setting from pom.xml |  Minor | build | Akira Ajisaka | Chen Liang |
+| [HADOOP-14352](https://issues.apache.org/jira/browse/HADOOP-14352) | Make some HttpServer2 SSL properties optional |  Minor | kms | John Zhuge | John Zhuge |
+| [HDFS-11722](https://issues.apache.org/jira/browse/HDFS-11722) | Change Datanode file IO profiling sampling to percentage |  Major | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [HDFS-11687](https://issues.apache.org/jira/browse/HDFS-11687) | Add new public encryption APIs required by Hive |  Major | encryption | Andrew Wang | Lei (Eddy) Xu |
+| [HADOOP-14382](https://issues.apache.org/jira/browse/HADOOP-14382) | Remove usages of MoreObjects.toStringHelper |  Minor | metrics | Andrew Wang | Andrew Wang |
+| [HDFS-9807](https://issues.apache.org/jira/browse/HDFS-9807) | Add an optional StorageID to writes |  Major | . | Chris Douglas | Ewan Higgs |
+| [HADOOP-14390](https://issues.apache.org/jira/browse/HADOOP-14390) | Correct spelling of 'succeed' and variants |  Trivial | . | Dongtao Zhang | Dongtao Zhang |
+| [HADOOP-14383](https://issues.apache.org/jira/browse/HADOOP-14383) | Implement FileSystem that reads from HTTP / HTTPS endpoints |  Major | fs | Haohui Mai | Haohui Mai |
+| [YARN-6457](https://issues.apache.org/jira/browse/YARN-6457) | Allow custom SSL configuration to be supplied in WebApps |  Major | webapp, yarn | Sanjay M Pujare | Sanjay M Pujare |
+| [HADOOP-14216](https://issues.apache.org/jira/browse/HADOOP-14216) | Improve Configuration XML Parsing Performance |  Major | . | Jonathan Eagles | Jonathan Eagles |
+| [MAPREDUCE-6883](https://issues.apache.org/jira/browse/MAPREDUCE-6883) | AuditLogger and TestAuditLogger are dead code |  Minor | client | Daniel Templeton | Vrushali C |
+| [HDFS-11800](https://issues.apache.org/jira/browse/HDFS-11800) | Document output of 'hdfs count -u' should contain PATHNAME |  Minor | hdfs | Xiaobing Zhou | Xiaobing Zhou |
+| [HADOOP-14413](https://issues.apache.org/jira/browse/HADOOP-14413) | Add Javadoc comment for jitter parameter on CachingGetSpaceUsed |  Trivial | . | Erik Krogen | Erik Krogen |
+| [HDFS-11757](https://issues.apache.org/jira/browse/HDFS-11757) | Query StreamCapabilities when creating balancer's lock file |  Major | balancer & mover | Andrew Wang | SammiChen |
+| [HDFS-11641](https://issues.apache.org/jira/browse/HDFS-11641) | Reduce cost of audit logging by using FileStatus instead of HdfsFileStatus |  Major | hdfs | Daryn Sharp | Daryn Sharp |
+| [YARN-6447](https://issues.apache.org/jira/browse/YARN-6447) | Provide container sandbox policies for groups |  Minor | nodemanager, yarn | Greg Phillips | Greg Phillips |
+| [HADOOP-14415](https://issues.apache.org/jira/browse/HADOOP-14415) | Use java.lang.AssertionError instead of junit.framework.AssertionFailedError |  Minor | . | Akira Ajisaka | Chen Liang |
+| [HDFS-11803](https://issues.apache.org/jira/browse/HDFS-11803) | Add -v option for du command to show header line |  Major | hdfs | Xiaobing Zhou | Xiaobing Zhou |
+| [YARN-6493](https://issues.apache.org/jira/browse/YARN-6493) | Print requested node partition in assignContainer logs |  Major | . | Jonathan Hung | Jonathan Hung |
+| [HDFS-11793](https://issues.apache.org/jira/browse/HDFS-11793) | Allow to enable user defined erasure coding policy |  Major | erasure-coding | SammiChen | SammiChen |
+| [HADOOP-14407](https://issues.apache.org/jira/browse/HADOOP-14407) | DistCp - Introduce a configurable copy buffer size |  Major | tools/distcp | Omkar Aradhya K S | Omkar Aradhya K S |
+| [YARN-6582](https://issues.apache.org/jira/browse/YARN-6582) | FSAppAttempt demand can be updated atomically in updateDemand() |  Major | . | Karthik Kambatla | Karthik Kambatla |
+| [HDFS-11421](https://issues.apache.org/jira/browse/HDFS-11421) | Make WebHDFS' ACLs RegEx configurable |  Major | webhdfs | Harsh J | Harsh J |
+| [HDFS-11891](https://issues.apache.org/jira/browse/HDFS-11891) | DU#refresh should print the path of the directory when an exception is caught |  Minor | . | Chen Liang | Chen Liang |
+| [HADOOP-14442](https://issues.apache.org/jira/browse/HADOOP-14442) | Owner support for ranger-wasb integration |  Major | fs, fs/azure | Varada Hemeswari | Varada Hemeswari |
+| [HDFS-11832](https://issues.apache.org/jira/browse/HDFS-11832) | Switch leftover logs to slf4j format in BlockManager.java |  Minor | namenode | Hui Xu | Chen Liang |
+| [YARN-6477](https://issues.apache.org/jira/browse/YARN-6477) | Dispatcher no longer needs the raw types suppression |  Minor | . | Daniel Templeton | Maya Wexler |
+| [YARN-6497](https://issues.apache.org/jira/browse/YARN-6497) | Method length of ResourceManager#serviceInit() is too long |  Minor | resourcemanager | Yufei Gu | Gergely Novák |
+| [HDFS-11383](https://issues.apache.org/jira/browse/HDFS-11383) | Intern strings in BlockLocation and ExtendedBlock |  Major | . | Misha Dmitriev | Misha Dmitriev |
+| [YARN-6208](https://issues.apache.org/jira/browse/YARN-6208) | Improve the log when FinishAppEvent sent to the NodeManager which didn't run the application |  Minor | . | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-14440](https://issues.apache.org/jira/browse/HADOOP-14440) | Add metrics for connections dropped |  Major | . | Eric Badger | Eric Badger |
+| [HADOOP-14485](https://issues.apache.org/jira/browse/HADOOP-14485) | Redundant 'final' modifier in try-with-resources statement |  Minor | . | Wenxin He | Wenxin He |
+| [HADOOP-14491](https://issues.apache.org/jira/browse/HADOOP-14491) | Azure has messed doc structure |  Major | documentation, fs/azure | Mingliang Liu | Mingliang Liu |
+| [HDFS-11914](https://issues.apache.org/jira/browse/HDFS-11914) | Add more diagnosis info for fsimage transfer failure. |  Major | . | Yongjun Zhang | Yongjun Zhang |
+| [HDFS-11840](https://issues.apache.org/jira/browse/HDFS-11840) | Log HDFS Mover exception message of exit to its own log |  Minor | balancer & mover | LiXin Ge | LiXin Ge |
+| [HDFS-11861](https://issues.apache.org/jira/browse/HDFS-11861) | ipc.Client.Connection#sendRpcRequest should log request name |  Trivial | ipc | John Zhuge | John Zhuge |
+| [HADOOP-14465](https://issues.apache.org/jira/browse/HADOOP-14465) | LdapGroupsMapping - support user and group search base |  Major | common, security | Shwetha G S | Shwetha G S |
+| [HADOOP-14310](https://issues.apache.org/jira/browse/HADOOP-14310) | RolloverSignerSecretProvider.LOG should be @VisibleForTesting |  Minor | security | Daniel Templeton | Arun Shanmugam Kumar |
+| [HDFS-11907](https://issues.apache.org/jira/browse/HDFS-11907) | Add metric for time taken by NameNode resource check |  Major | . | Chen Liang | Chen Liang |
+| [HADOOP-14503](https://issues.apache.org/jira/browse/HADOOP-14503) | Make RollingAverages a mutable metric |  Major | common | Hanisha Koneru | Hanisha Koneru |
+| [HADOOP-14506](https://issues.apache.org/jira/browse/HADOOP-14506) | Add create() contract test that verifies ancestor dir creation |  Minor | fs | Aaron Fabbri | Sean Mackrory |
+| [HADOOP-14523](https://issues.apache.org/jira/browse/HADOOP-14523) | OpensslAesCtrCryptoCodec.finalize() holds excessive amounts of memory |  Major | . | Misha Dmitriev | Misha Dmitriev |
+| [HADOOP-14524](https://issues.apache.org/jira/browse/HADOOP-14524) | Make CryptoCodec Closeable so it can be cleaned up proactively |  Major | . | Xiao Chen | Xiao Chen |
+| [HADOOP-14424](https://issues.apache.org/jira/browse/HADOOP-14424) | Add CRC32C performance test. |  Minor | common | LiXin Ge | LiXin Ge |
+| [HDFS-10480](https://issues.apache.org/jira/browse/HDFS-10480) | Add an admin command to list currently open files |  Major | . | Kihwal Lee | Manoj Govindassamy |
+| [HDFS-11345](https://issues.apache.org/jira/browse/HDFS-11345) | Document the configuration key for FSNamesystem lock fairness |  Minor | documentation, namenode | Zhe Zhang | Erik Krogen |
+| [HDFS-11647](https://issues.apache.org/jira/browse/HDFS-11647) | Add -E option in hdfs "count" command to show erasure policy summarization |  Major | . | SammiChen | luhuichun |
+| [HDFS-11789](https://issues.apache.org/jira/browse/HDFS-11789) | Maintain Short-Circuit Read Statistics |  Major | hdfs-client | Hanisha Koneru | Hanisha Koneru |
+| [HDFS-11943](https://issues.apache.org/jira/browse/HDFS-11943) | [Erasure coding] Warn log frequently print to screen in doEncode/doDecode functions |  Major | erasure-coding, native | liaoyuxiangqin | liaoyuxiangqin |
+| [HDFS-11992](https://issues.apache.org/jira/browse/HDFS-11992) | Replace commons-logging APIs with slf4j in FsDatasetImpl |  Major | . | Akira Ajisaka | hu xiaodong |
+| [HDFS-11993](https://issues.apache.org/jira/browse/HDFS-11993) | Add log info when connect to datanode socket address failed |  Major | hdfs-client | chencan | chencan |
+| [YARN-6634](https://issues.apache.org/jira/browse/YARN-6634) | [API] Refactor ResourceManager WebServices to make API explicit |  Critical | resourcemanager | Subru Krishnan | Giovanni Matteo Fumarola |
+| [HDFS-12045](https://issues.apache.org/jira/browse/HDFS-12045) | Add log when Diskbalancer volume is transient storage type |  Major | diskbalancer | steven-wugang | steven-wugang |
+| [HADOOP-14536](https://issues.apache.org/jira/browse/HADOOP-14536) | Update azure-storage sdk to version 5.3.0 |  Major | fs/azure | Mingliang Liu | Georgi Chalakov |
+| [YARN-6738](https://issues.apache.org/jira/browse/YARN-6738) | LevelDBCacheTimelineStore should reuse ObjectMapper instances |  Major | timelineserver | Zoltan Haindrich | Zoltan Haindrich |
+| [HADOOP-14515](https://issues.apache.org/jira/browse/HADOOP-14515) | Specifically configure zookeeper-related log levels in KMS log4j |  Major | kms | Xiao Chen | Xiao Chen |
+| [HDFS-11646](https://issues.apache.org/jira/browse/HDFS-11646) | Add -E option in 'ls' to list erasure coding policy of each file and directory if applicable |  Major | erasure-coding | SammiChen | luhuichun |
+| [HDFS-11881](https://issues.apache.org/jira/browse/HDFS-11881) | NameNode consumes a lot of memory for snapshot diff report generation |  Major | hdfs, snapshots | Manoj Govindassamy | Manoj Govindassamy |
+| [HADOOP-14602](https://issues.apache.org/jira/browse/HADOOP-14602) | allow custom release notes/changelog during create-release |  Minor | build, scripts | Allen Wittenauer | Allen Wittenauer |
+| [HADOOP-14611](https://issues.apache.org/jira/browse/HADOOP-14611) | NetworkTopology.DEFAULT\_HOST\_LEVEL is unused |  Trivial | . | Daniel Templeton | Chen Liang |
+| [YARN-6751](https://issues.apache.org/jira/browse/YARN-6751) | Display reserved resources in web UI per queue |  Major | fairscheduler, webapp | Abdullah Yousufi | Abdullah Yousufi |
+| [YARN-5892](https://issues.apache.org/jira/browse/YARN-5892) | Support user-specific minimum user limit percentage in Capacity Scheduler |  Major | capacityscheduler | Eric Payne | Eric Payne |
+| [YARN-6280](https://issues.apache.org/jira/browse/YARN-6280) | Introduce deselect query param to skip ResourceRequest from getApp/getApps REST API |  Major | resourcemanager, restapi | Lantao Jin | Lantao Jin |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-13858](https://issues.apache.org/jira/browse/HADOOP-13858) | TestGridmixMemoryEmulation and TestResourceUsageEmulators fail on the environment other than Linux or Windows |  Major | test | Akira Ajisaka | Akira Ajisaka |
+| [YARN-6012](https://issues.apache.org/jira/browse/YARN-6012) | Remove node label (removeFromClusterNodeLabels) document is missing |  Major | documentation | Weiwei Yang | Ying Zhang |
+| [YARN-6117](https://issues.apache.org/jira/browse/YARN-6117) | SharedCacheManager does not start up |  Major | . | Chris Trezzo | Chris Trezzo |
+| [YARN-6082](https://issues.apache.org/jira/browse/YARN-6082) | Invalid REST api response for getApps since queueUsagePercentage is coming as INF |  Critical | . | Sunil G | Sunil G |
+| [HDFS-11365](https://issues.apache.org/jira/browse/HDFS-11365) | Log portnumber in PrivilegedNfsGatewayStarter |  Minor | nfs | Mukul Kumar Singh | Mukul Kumar Singh |
+| [MAPREDUCE-6808](https://issues.apache.org/jira/browse/MAPREDUCE-6808) | Log map attempts as part of shuffle handler audit log |  Major | . | Jonathan Eagles | Gergő Pásztor |
+| [HADOOP-13989](https://issues.apache.org/jira/browse/HADOOP-13989) | Remove erroneous source jar option from hadoop-client shade configuration |  Minor | build | Joe Pallas | Joe Pallas |
+| [HDFS-11369](https://issues.apache.org/jira/browse/HDFS-11369) | Change exception message in StorageLocationChecker |  Minor | datanode | Arpit Agarwal | Arpit Agarwal |
+| [YARN-4975](https://issues.apache.org/jira/browse/YARN-4975) | Fair Scheduler: exception thrown when a parent queue marked 'parent' has configured child queues |  Major | fairscheduler | Ashwin Shankar | Yufei Gu |
+| [HDFS-11364](https://issues.apache.org/jira/browse/HDFS-11364) | Add a test to verify Audit log entries for setfacl/getfacl commands over FS shell |  Major | hdfs, test | Manoj Govindassamy | Manoj Govindassamy |
+| [HDFS-11376](https://issues.apache.org/jira/browse/HDFS-11376) | Revert HDFS-8377 Support HTTP/2 in datanode |  Major | datanode | Andrew Wang | Xiao Chen |
+| [HADOOP-13988](https://issues.apache.org/jira/browse/HADOOP-13988) | KMSClientProvider does not work with WebHDFS and Apache Knox w/ProxyUser |  Major | common, kms | Greg Senia | Xiaoyu Yao |
+| [HADOOP-14029](https://issues.apache.org/jira/browse/HADOOP-14029) | Fix KMSClientProvider for non-secure proxyuser use case |  Major | common,kms | Xiaoyu Yao | Xiaoyu Yao |
+| [YARN-5641](https://issues.apache.org/jira/browse/YARN-5641) | Localizer leaves behind tarballs after container is complete |  Major | . | Eric Badger | Eric Badger |
+| [HADOOP-13992](https://issues.apache.org/jira/browse/HADOOP-13992) | KMS should load SSL configuration the same way as SSLFactory |  Major | kms, security | John Zhuge | John Zhuge |
+| [HDFS-11378](https://issues.apache.org/jira/browse/HDFS-11378) | Verify multiple DataNodes can be decommissioned/maintenance at the same time |  Major | hdfs | Manoj Govindassamy | Manoj Govindassamy |
+| [YARN-6103](https://issues.apache.org/jira/browse/YARN-6103) | Log updates for ZKRMStateStore |  Trivial | . | Bibin A Chundatt | Daniel Sturman |
+| [HADOOP-14018](https://issues.apache.org/jira/browse/HADOOP-14018) | shaded jars of hadoop-client modules are missing hadoop's root LICENSE and NOTICE files |  Critical | . | John Zhuge | Elek, Marton |
+| [HDFS-11335](https://issues.apache.org/jira/browse/HDFS-11335) | Remove HdfsClientConfigKeys.DFS\_CLIENT\_SLOW\_IO\_WARNING\_THRESHOLD\_KEY usage from DNConf |  Major | . | Manoj Govindassamy | Manoj Govindassamy |
+| [HADOOP-13895](https://issues.apache.org/jira/browse/HADOOP-13895) | Make FileStatus Serializable |  Minor | fs | Chris Douglas | Chris Douglas |
+| [HADOOP-14045](https://issues.apache.org/jira/browse/HADOOP-14045) | Aliyun OSS documentation missing from website |  Major | documentation, fs/oss | Andrew Wang | Yiqun Lin |
+| [HDFS-11363](https://issues.apache.org/jira/browse/HDFS-11363) | Need more diagnosis info when seeing Slow waitForAckedSeqno |  Major | . | Yongjun Zhang | Xiao Chen |
+| [HDFS-11387](https://issues.apache.org/jira/browse/HDFS-11387) | Socket reuse address option is not honored in PrivilegedNfsGatewayStarter |  Major | nfs | Mukul Kumar Singh | Mukul Kumar Singh |
+| [HADOOP-14044](https://issues.apache.org/jira/browse/HADOOP-14044) | Synchronization issue in delegation token cancel functionality |  Major | . | Hrishikesh Gadre | Hrishikesh Gadre |
+| [HDFS-11371](https://issues.apache.org/jira/browse/HDFS-11371) | Document missing metrics of erasure coding |  Minor | documentation, erasure-coding | Yiqun Lin | Yiqun Lin |
+| [MAPREDUCE-6338](https://issues.apache.org/jira/browse/MAPREDUCE-6338) | MR AppMaster does not honor ephemeral port range |  Major | mr-am, mrv2 | Frank Nguyen | Frank Nguyen |
+| [HDFS-11377](https://issues.apache.org/jira/browse/HDFS-11377) | Balancer hung due to no available mover threads |  Major | balancer & mover | yunjiong zhao | yunjiong zhao |
+| [HADOOP-14047](https://issues.apache.org/jira/browse/HADOOP-14047) | Require admin to access KMS instrumentation servlets |  Minor | kms | John Zhuge | John Zhuge |
+| [YARN-6135](https://issues.apache.org/jira/browse/YARN-6135) | Node manager REST API documentation is not up to date |  Trivial | nodemanager, restapi | Miklos Szegedi | Miklos Szegedi |
+| [YARN-6145](https://issues.apache.org/jira/browse/YARN-6145) | Improve log message on fail over |  Major | . | Jian He | Jian He |
+| [YARN-6031](https://issues.apache.org/jira/browse/YARN-6031) | Application recovery has failed when node label feature is turned off during RM recovery |  Minor | scheduler | Ying Zhang | Ying Zhang |
+| [YARN-6137](https://issues.apache.org/jira/browse/YARN-6137) | Yarn client implicitly invoke ATS client which accesses HDFS |  Major | . | Yesha Vora | Li Lu |
+| [HADOOP-13433](https://issues.apache.org/jira/browse/HADOOP-13433) | Race in UGI.reloginFromKeytab |  Major | security | Duo Zhang | Duo Zhang |
+| [YARN-6112](https://issues.apache.org/jira/browse/YARN-6112) | UpdateCallDuration is calculated only when debug logging is enabled |  Major | fairscheduler | Yufei Gu | Yufei Gu |
+| [YARN-6144](https://issues.apache.org/jira/browse/YARN-6144) | FairScheduler: preempted resources can become negative |  Blocker | fairscheduler, resourcemanager | Miklos Szegedi | Miklos Szegedi |
+| [YARN-6118](https://issues.apache.org/jira/browse/YARN-6118) | Add javadoc for Resources.isNone |  Minor | scheduler | Karthik Kambatla | Andres Perez |
+| [HADOOP-13119](https://issues.apache.org/jira/browse/HADOOP-13119) | Add ability to secure log servlet using proxy users |  Major | . | Jeffrey E  Rodriguez | Yuanbo Liu |
+| [YARN-6166](https://issues.apache.org/jira/browse/YARN-6166) | Unnecessary INFO logs in AMRMClientAsyncImpl$CallbackHandlerThread.run |  Trivial | . | Grant W | Grant W |
+| [HADOOP-14055](https://issues.apache.org/jira/browse/HADOOP-14055) | SwiftRestClient includes pass length in exception if auth fails |  Minor | security | Marcell Hegedus | Marcell Hegedus |
+| [HDFS-11403](https://issues.apache.org/jira/browse/HDFS-11403) | Zookeper ACLs on NN HA enabled clusters to be handled consistently |  Major | hdfs | Laszlo Puskas | Hanisha Koneru |
+| [HADOOP-13233](https://issues.apache.org/jira/browse/HADOOP-13233) | help of stat is confusing |  Trivial | documentation, fs | Xiaohe Lan | Attila Bukor |
+| [YARN-3933](https://issues.apache.org/jira/browse/YARN-3933) | FairScheduler: Multiple calls to completedContainer are not safe |  Major | fairscheduler | Lavkesh Lahngir | Shiwei Guo |
+| [HDFS-11407](https://issues.apache.org/jira/browse/HDFS-11407) | Document the missing usages of OfflineImageViewer processors |  Minor | documentation, tools | Yiqun Lin | Yiqun Lin |
+| [HDFS-11408](https://issues.apache.org/jira/browse/HDFS-11408) | The config name of balance bandwidth is out of date |  Minor | balancer & mover, documentation | Yiqun Lin | Yiqun Lin |
+| [HADOOP-14058](https://issues.apache.org/jira/browse/HADOOP-14058) | Fix NativeS3FileSystemContractBaseTest#testDirWithDifferentMarkersWorks |  Major | fs/s3, test | Akira Ajisaka | Yiqun Lin |
+| [HDFS-11084](https://issues.apache.org/jira/browse/HDFS-11084) | Add a regression test for sticky bit support of OIV ReverseXML processor |  Major | tools | Wei-Chiu Chuang | Wei-Chiu Chuang |
+| [HDFS-11391](https://issues.apache.org/jira/browse/HDFS-11391) | Numeric usernames do no work with WebHDFS FS (write access) |  Major | webhdfs | Pierre Villard | Pierre Villard |
+| [HADOOP-13924](https://issues.apache.org/jira/browse/HADOOP-13924) | Update checkstyle and checkstyle plugin version to handle indentation of JDK8 Lambdas |  Major | . | Xiaoyu Yao | Akira Ajisaka |
+| [HDFS-11238](https://issues.apache.org/jira/browse/HDFS-11238) | Fix checkstyle warnings in NameNode#createNameNode |  Trivial | namenode | Ethan Li | Ethan Li |
+| [YARN-4212](https://issues.apache.org/jira/browse/YARN-4212) | FairScheduler: Can't create a DRF queue under a FAIR policy queue |  Major | . | Arun Suresh | Yufei Gu |
+| [YARN-6177](https://issues.apache.org/jira/browse/YARN-6177) | Yarn client should exit with an informative error message if an incompatible Jersey library is used at client |  Major | . | Weiwei Yang | Weiwei Yang |
+| [YARN-6171](https://issues.apache.org/jira/browse/YARN-6171) | ConcurrentModificationException on FSAppAttempt.containersToPreempt |  Major | fairscheduler | Miklos Szegedi | Miklos Szegedi |
+| [HDFS-11410](https://issues.apache.org/jira/browse/HDFS-11410) | Use the cached instance when edit logging SetAclOp, SetXAttrOp and RemoveXAttrOp |  Major | namenode | Xiao Chen | Xiao Chen |
+| [YARN-6188](https://issues.apache.org/jira/browse/YARN-6188) | Fix OOM issue with decommissioningNodesWatcher in the case of clusters with large number of nodes |  Major | resourcemanager | Ajay Jadhav | Ajay Jadhav |
+| [HDFS-11379](https://issues.apache.org/jira/browse/HDFS-11379) | DFSInputStream may infinite loop requesting block locations |  Critical | hdfs-client | Daryn Sharp | Daryn Sharp |
+| [HADOOP-14092](https://issues.apache.org/jira/browse/HADOOP-14092) | Typo in hadoop-aws index.md |  Trivial | fs/s3 | John Zhuge | John Zhuge |
+| [HDFS-11177](https://issues.apache.org/jira/browse/HDFS-11177) | 'storagepolicies -getStoragePolicy' command should accept URI based path. |  Major | shell | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [HDFS-11404](https://issues.apache.org/jira/browse/HDFS-11404) | Increase timeout on TestShortCircuitLocalRead.testDeprecatedGetBlockLocalPathInfoRpc |  Major | . | Eric Badger | Eric Badger |
+| [HADOOP-13826](https://issues.apache.org/jira/browse/HADOOP-13826) | S3A Deadlock in multipart copy due to thread pool limits. |  Critical | fs/s3 | Sean Mackrory | Sean Mackrory |
+| [HADOOP-14017](https://issues.apache.org/jira/browse/HADOOP-14017) | User friendly name for ADLS user and group |  Major | fs/adl | John Zhuge | Vishwajeet Dusane |
+| [MAPREDUCE-6825](https://issues.apache.org/jira/browse/MAPREDUCE-6825) | YARNRunner#createApplicationSubmissionContext method is longer than 150 lines |  Trivial | . | Chris Trezzo | Gergely Novák |
+| [YARN-6210](https://issues.apache.org/jira/browse/YARN-6210) | FS: Node reservations can interfere with preemption |  Major | fairscheduler | Karthik Kambatla | Karthik Kambatla |
+| [YARN-6211](https://issues.apache.org/jira/browse/YARN-6211) | Synchronization improvement for moveApplicationAcrossQueues and updateApplicationPriority |  Major | . | Bibin A Chundatt | Bibin A Chundatt |
+| [YARN-6222](https://issues.apache.org/jira/browse/YARN-6222) | TestFairScheduler.testReservationMetrics is flaky |  Major | fairscheduler | Yufei Gu | Yufei Gu |
+| [HADOOP-14114](https://issues.apache.org/jira/browse/HADOOP-14114) | S3A can no longer handle unencoded + in URIs |  Minor | fs/s3 | Sean Mackrory | Sean Mackrory |
+| [HADOOP-14116](https://issues.apache.org/jira/browse/HADOOP-14116) | FailoverOnNetworkExceptionRetry does not wait when failover on certain exception |  Major | . | Jian He | Jian He |
+| [HDFS-11433](https://issues.apache.org/jira/browse/HDFS-11433) | Document missing usages of OfflineEditsViewer processors |  Minor | documentation, tools | Yiqun Lin | Yiqun Lin |
+| [HDFS-11462](https://issues.apache.org/jira/browse/HDFS-11462) | Fix occasional BindException in TestNameNodeMetricsLogger |  Major | test | Arpit Agarwal | Arpit Agarwal |
+| [HADOOP-14028](https://issues.apache.org/jira/browse/HADOOP-14028) | S3A BlockOutputStreams doesn't delete temporary files in multipart uploads or handle part upload failures |  Critical | fs/s3 | Seth Fitzsimmons | Steve Loughran |
+| [YARN-6172](https://issues.apache.org/jira/browse/YARN-6172) | FSLeafQueue demand update needs to be atomic |  Major | resourcemanager | Varun Saxena | Miklos Szegedi |
+| [HADOOP-14119](https://issues.apache.org/jira/browse/HADOOP-14119) | Remove unused imports from GzipCodec.java |  Minor | . | Akira Ajisaka | Yiqun Lin |
+| [MAPREDUCE-6841](https://issues.apache.org/jira/browse/MAPREDUCE-6841) | Fix dead link in MapReduce tutorial document |  Minor | documentation | Akira Ajisaka | Victor Nee |
+| [YARN-6231](https://issues.apache.org/jira/browse/YARN-6231) | FairSchedulerTestBase helper methods should call scheduler.update to avoid flakiness |  Major | . | Arun Suresh | Karthik Kambatla |
+| [YARN-6239](https://issues.apache.org/jira/browse/YARN-6239) | Fix javac warnings in YARN that caused by deprecated FileSystem APIs |  Minor | . | Yiqun Lin | Yiqun Lin |
+| [YARN-1728](https://issues.apache.org/jira/browse/YARN-1728) | Workaround guice3x-undecoded pathInfo in YARN WebApp |  Major | . | Abraham Elmahrek | Yuanbo Liu |
+| [HADOOP-12556](https://issues.apache.org/jira/browse/HADOOP-12556) | KafkaSink jar files are created but not copied to target dist |  Major | . | Babak Behzad | Babak Behzad |
+| [HDFS-11479](https://issues.apache.org/jira/browse/HDFS-11479) | Socket re-use address option should be used in SimpleUdpServer |  Major | nfs | Mukul Kumar Singh | Mukul Kumar Singh |
+| [HDFS-11478](https://issues.apache.org/jira/browse/HDFS-11478) | Update EC commands in HDFSCommands.md |  Minor | documentation, erasure-coding | Yiqun Lin | Yiqun Lin |
+| [MAPREDUCE-6852](https://issues.apache.org/jira/browse/MAPREDUCE-6852) | Job#updateStatus() failed with NPE due to race condition |  Major | . | Junping Du | Junping Du |
+| [MAPREDUCE-6753](https://issues.apache.org/jira/browse/MAPREDUCE-6753) | Variable in byte printed directly in mapreduce client |  Major | client | Nemo Chen | Kai Sasaki |
+| [HADOOP-6801](https://issues.apache.org/jira/browse/HADOOP-6801) | io.sort.mb and io.sort.factor were renamed and moved to mapreduce but are still in CommonConfigurationKeysPublic.java and used in SequenceFile.java |  Minor | . | Erik Steffl | Harsh J |
+| [YARN-6263](https://issues.apache.org/jira/browse/YARN-6263) | NMTokenSecretManagerInRM.createAndGetNMToken is not thread safe |  Major | yarn | Haibo Chen | Haibo Chen |
+| [YARN-6218](https://issues.apache.org/jira/browse/YARN-6218) | Fix TestAMRMClient when using FairScheduler |  Minor | . | Miklos Szegedi | Miklos Szegedi |
+| [HDFS-11476](https://issues.apache.org/jira/browse/HDFS-11476) | Fix NPE in FsDatasetImpl#checkAndUpdate |  Major | datanode | Xiaobing Zhou | Xiaobing Zhou |
+| [YARN-6271](https://issues.apache.org/jira/browse/YARN-6271) | yarn rmadin -getGroups returns information from standby RM |  Critical | yarn | Sumana Sathish | Jian He |
+| [YARN-6270](https://issues.apache.org/jira/browse/YARN-6270) | WebUtils.getRMWebAppURLWithScheme() needs to honor RM HA setting |  Major | . | Sumana Sathish | Xuan Gong |
+| [YARN-6248](https://issues.apache.org/jira/browse/YARN-6248) | user is not removed from UsersManager’s when app is killed with pending container requests. |  Major | . | Eric Payne | Eric Payne |
+| [HADOOP-14026](https://issues.apache.org/jira/browse/HADOOP-14026) | start-build-env.sh: invalid docker image name |  Major | build | Gergő Pásztor | Gergő Pásztor |
+| [HDFS-11441](https://issues.apache.org/jira/browse/HDFS-11441) | Add escaping to error message in KMS web UI |  Minor | security | Aaron T. Myers | Aaron T. Myers |
+| [YARN-5665](https://issues.apache.org/jira/browse/YARN-5665) | Enhance documentation for yarn.resourcemanager.scheduler.class property |  Trivial | documentation | Miklos Szegedi | Yufei Gu |
+| [HDFS-11498](https://issues.apache.org/jira/browse/HDFS-11498) | Make RestCsrfPreventionHandler and WebHdfsHandler compatible with Netty 4.0 |  Major | . | Andrew Wang | Andrew Wang |
+| [MAPREDUCE-6855](https://issues.apache.org/jira/browse/MAPREDUCE-6855) | Specify charset when create String in CredentialsTestJob |  Minor | . | Akira Ajisaka | Kai Sasaki |
+| [HADOOP-14087](https://issues.apache.org/jira/browse/HADOOP-14087) | S3A typo in pom.xml test exclusions |  Major | fs/s3 | Aaron Fabbri | Aaron Fabbri |
+| [HDFS-11508](https://issues.apache.org/jira/browse/HDFS-11508) | Fix bind failure in SimpleTCPServer & Portmap where bind fails because socket is in TIME\_WAIT state |  Major | nfs | Mukul Kumar Singh | Mukul Kumar Singh |
+| [MAPREDUCE-6839](https://issues.apache.org/jira/browse/MAPREDUCE-6839) | TestRecovery.testCrashed failed |  Major | test | Gergő Pásztor | Gergő Pásztor |
+| [YARN-6207](https://issues.apache.org/jira/browse/YARN-6207) | Move application across queues should handle delayed event processing |  Major | capacity scheduler | Bibin A Chundatt | Bibin A Chundatt |
+| [MAPREDUCE-6859](https://issues.apache.org/jira/browse/MAPREDUCE-6859) | hadoop-mapreduce-client-jobclient.jar sets a main class that isn't in the JAR |  Minor | client | Daniel Templeton | Daniel Templeton |
+| [YARN-6297](https://issues.apache.org/jira/browse/YARN-6297) | TestAppLogAggregatorImp.verifyFilesUploaded() should check # of filed uploaded with that of files expected |  Major | . | Haibo Chen | Haibo Chen |
+| [YARN-6165](https://issues.apache.org/jira/browse/YARN-6165) | Intra-queue preemption occurs even when preemption is turned off for a specific queue. |  Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [HADOOP-14052](https://issues.apache.org/jira/browse/HADOOP-14052) | Fix dead link in KMS document |  Minor | documentation | Akira Ajisaka | Christina Vu |
+| [YARN-6264](https://issues.apache.org/jira/browse/YARN-6264) | AM not launched when a single vcore is available on the cluster |  Major | fairscheduler | Yufei Gu | Yufei Gu |
+| [YARN-6310](https://issues.apache.org/jira/browse/YARN-6310) | OutputStreams in AggregatedLogFormat.LogWriter can be left open upon exceptions |  Major | yarn | Haibo Chen | Haibo Chen |
+| [HADOOP-14062](https://issues.apache.org/jira/browse/HADOOP-14062) | ApplicationMasterProtocolPBClientImpl.allocate fails with EOFException when RPC privacy is enabled |  Critical | . | Steven Rand | Steven Rand |
+| [YARN-6321](https://issues.apache.org/jira/browse/YARN-6321) | TestResources test timeouts are too aggressive |  Major | test | Jason Lowe | Eric Badger |
+| [HDFS-11340](https://issues.apache.org/jira/browse/HDFS-11340) | DataNode reconfigure for disks doesn't remove the failed volumes |  Major | . | Manoj Govindassamy | Manoj Govindassamy |
+| [HADOOP-14156](https://issues.apache.org/jira/browse/HADOOP-14156) | Fix grammar error in ConfTest.java |  Trivial | test | Andrey Dyatlov | Andrey Dyatlov |
+| [HDFS-11512](https://issues.apache.org/jira/browse/HDFS-11512) | Increase timeout on TestShortCircuitLocalRead#testSkipWithVerifyChecksum |  Minor | . | Eric Badger | Eric Badger |
+| [HDFS-11499](https://issues.apache.org/jira/browse/HDFS-11499) | Decommissioning stuck because of failing recovery |  Major | hdfs, namenode | Lukas Majercak | Lukas Majercak |
+| [HDFS-11395](https://issues.apache.org/jira/browse/HDFS-11395) | RequestHedgingProxyProvider#RequestHedgingInvocationHandler hides the Exception thrown from NameNode |  Major | ha | Nandakumar | Nandakumar |
+| [HDFS-11526](https://issues.apache.org/jira/browse/HDFS-11526) | Fix confusing block recovery message |  Minor | datanode | Wei-Chiu Chuang | Yiqun Lin |
+| [YARN-6327](https://issues.apache.org/jira/browse/YARN-6327) | Removing queues from CapacitySchedulerQueueManager and ParentQueue should be done with iterator |  Major | capacityscheduler | Jonathan Hung | Jonathan Hung |
+| [HADOOP-14170](https://issues.apache.org/jira/browse/HADOOP-14170) | FileSystemContractBaseTest is not cleaning up test directory clearly |  Major | fs | Mingliang Liu | Mingliang Liu |
+| [YARN-6331](https://issues.apache.org/jira/browse/YARN-6331) | Fix flakiness in TestFairScheduler#testDumpState |  Major | fairscheduler | Yufei Gu | Yufei Gu |
+| [YARN-6328](https://issues.apache.org/jira/browse/YARN-6328) | Fix a spelling mistake in CapacityScheduler |  Trivial | capacity scheduler | Jin Yibo | Jin Yibo |
+| [HDFS-11420](https://issues.apache.org/jira/browse/HDFS-11420) | Edit file should not be processed by the same type processor in OfflineEditsViewer |  Major | tools | Yiqun Lin | Yiqun Lin |
+| [YARN-6294](https://issues.apache.org/jira/browse/YARN-6294) | ATS client should better handle Socket closed case |  Major | timelineclient | Sumana Sathish | Li Lu |
+| [YARN-6332](https://issues.apache.org/jira/browse/YARN-6332) | Make RegistrySecurity use short user names for ZK ACLs |  Major | . | Billie Rinaldi | Billie Rinaldi |
+| [YARN-4051](https://issues.apache.org/jira/browse/YARN-4051) | ContainerKillEvent lost when container is still recovering and application finishes |  Critical | nodemanager | sandflee | sandflee |
+| [HDFS-11533](https://issues.apache.org/jira/browse/HDFS-11533) | reuseAddress option should be used for child channels in Portmap and SimpleTcpServer |  Major | nfs | Mukul Kumar Singh | Mukul Kumar Singh |
+| [HADOOP-14191](https://issues.apache.org/jira/browse/HADOOP-14191) | Duplicate hadoop-minikdc dependency in hadoop-common module |  Minor | build | Akira Ajisaka | Xiaobing Zhou |
+| [HDFS-10394](https://issues.apache.org/jira/browse/HDFS-10394) | move declaration of okhttp version from hdfs-client to hadoop-project POM |  Minor | build | Steve Loughran | Xiaobing Zhou |
+| [HDFS-11516](https://issues.apache.org/jira/browse/HDFS-11516) | Admin command line should print message to stderr in failure case |  Minor | . | Kai Sasaki | Kai Sasaki |
+| [YARN-6217](https://issues.apache.org/jira/browse/YARN-6217) | TestLocalCacheDirectoryManager test timeout is too aggressive |  Major | test | Jason Lowe | Miklos Szegedi |
+| [YARN-6353](https://issues.apache.org/jira/browse/YARN-6353) | Clean up OrderingPolicy javadoc |  Minor | resourcemanager | Daniel Templeton | Daniel Templeton |
+| [HADOOP-14059](https://issues.apache.org/jira/browse/HADOOP-14059) | typo in s3a rename(self, subdir) error message |  Minor | . | Steve Loughran | Steve Loughran |
+| [HDFS-6648](https://issues.apache.org/jira/browse/HDFS-6648) | Order of namenodes in ConfiguredFailoverProxyProvider is undefined |  Major | ha, hdfs-client | Rafal Wojdyla | Inigo Goiri |
+| [HADOOP-14204](https://issues.apache.org/jira/browse/HADOOP-14204) | S3A multipart commit failing, "UnsupportedOperationException at java.util.Collections$UnmodifiableList.sort" |  Critical | fs/s3 | Steve Loughran | Steve Loughran |
+| [HADOOP-14187](https://issues.apache.org/jira/browse/HADOOP-14187) | Update ZooKeeper dependency to 3.4.9 and Curator dependency to 2.12.0 |  Major | . | Tsuyoshi Ozawa | Tsuyoshi Ozawa |
+| [YARN-5934](https://issues.apache.org/jira/browse/YARN-5934) | Fix TestTimelineWebServices.testPrimaryFilterNumericString |  Major | test | Akira Ajisaka | Akira Ajisaka |
+| [HDFS-11561](https://issues.apache.org/jira/browse/HDFS-11561) | HttpFS doc errors |  Trivial | documentation, httpfs, test | Yuanbo Liu | Yuanbo Liu |
+| [HADOOP-9631](https://issues.apache.org/jira/browse/HADOOP-9631) | ViewFs should use underlying FileSystem's server side defaults |  Major | fs, viewfs | Lohit Vijayarenu | Erik Krogen |
+| [HADOOP-14214](https://issues.apache.org/jira/browse/HADOOP-14214) | DomainSocketWatcher::add()/delete() should not self interrupt while looping await() |  Critical | hdfs-client | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14195](https://issues.apache.org/jira/browse/HADOOP-14195) | CredentialProviderFactory$getProviders is not thread-safe |  Major | security | Vihang Karajgaonkar | Vihang Karajgaonkar |
+| [HADOOP-14211](https://issues.apache.org/jira/browse/HADOOP-14211) | FilterFs and ChRootedFs are too aggressive about enforcing "authorityNeeded" |  Major | viewfs | Erik Krogen | Erik Krogen |
+| [YARN-6360](https://issues.apache.org/jira/browse/YARN-6360) | Prevent FS state dump logger from cramming other log files |  Major | fairscheduler | Yufei Gu | Yufei Gu |
+| [YARN-6334](https://issues.apache.org/jira/browse/YARN-6334) | TestRMFailover#testAutomaticFailover always passes even when it should fail |  Major | . | Yufei Gu | Yufei Gu |
+| [MAPREDUCE-6866](https://issues.apache.org/jira/browse/MAPREDUCE-6866) | Fix getNumMapTasks() documentation in JobConf |  Minor | documentation | Joe Mészáros | Joe Mészáros |
+| [MAPREDUCE-6868](https://issues.apache.org/jira/browse/MAPREDUCE-6868) | License check for jdiff output files should be ignored |  Major | build | Akira Ajisaka | Akira Ajisaka |
+| [HDFS-11555](https://issues.apache.org/jira/browse/HDFS-11555) | Fix typos in class OfflineImageReconstructor |  Trivial | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-10506](https://issues.apache.org/jira/browse/HDFS-10506) | OIV's ReverseXML processor cannot reconstruct some snapshot details |  Major | tools | Colin P. McCabe | Akira Ajisaka |
+| [HDFS-11486](https://issues.apache.org/jira/browse/HDFS-11486) | Client close() should not fail fast if the last block is being decommissioned |  Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang |
+| [YARN-6359](https://issues.apache.org/jira/browse/YARN-6359) | TestRM#testApplicationKillAtAcceptedState fails rarely due to race condition |  Major | test | Robert Kanter | Robert Kanter |
+| [YARN-5368](https://issues.apache.org/jira/browse/YARN-5368) | Memory leak in timeline server |  Critical | timelineserver | Wataru Yukawa | Jonathan Eagles |
+| [YARN-6050](https://issues.apache.org/jira/browse/YARN-6050) | AMs can't be scheduled on racks or nodes |  Major | . | Robert Kanter | Robert Kanter |
+| [HDFS-11571](https://issues.apache.org/jira/browse/HDFS-11571) | Typo in DataStorage exception message |  Minor | datanode | Daniel Templeton | Anna Budai |
+| [YARN-5685](https://issues.apache.org/jira/browse/YARN-5685) | RM configuration allows all failover methods to disabled when automatic failover is enabled |  Critical | resourcemanager | Daniel Templeton | Daniel Templeton |
+| [HADOOP-14223](https://issues.apache.org/jira/browse/HADOOP-14223) | Extend FileStatus#toString() to include details like Erasure Coding and Encryption |  Major | fs | Manoj Govindassamy | Manoj Govindassamy |
+| [HADOOP-14247](https://issues.apache.org/jira/browse/HADOOP-14247) | FileContextMainOperationsBaseTest should clean up test root path |  Minor | fs, test | Mingliang Liu | Mingliang Liu |
+| [MAPREDUCE-6862](https://issues.apache.org/jira/browse/MAPREDUCE-6862) | Fragments are not handled correctly by resource limit checking |  Minor | . | Chris Trezzo | Chris Trezzo |
+| [MAPREDUCE-6873](https://issues.apache.org/jira/browse/MAPREDUCE-6873) | MR Job Submission Fails if MR framework application path not on defaultFS |  Minor | mrv2 | Erik Krogen | Erik Krogen |
+| [HADOOP-14256](https://issues.apache.org/jira/browse/HADOOP-14256) | [S3A DOC] Correct the format for "Seoul" example |  Minor | documentation, s3 | Brahma Reddy Battula | Brahma Reddy Battula |
+| [MAPREDUCE-6850](https://issues.apache.org/jira/browse/MAPREDUCE-6850) | Shuffle Handler keep-alive connections are closed from the server side |  Major | . | Jonathan Eagles | Jonathan Eagles |
+| [MAPREDUCE-6836](https://issues.apache.org/jira/browse/MAPREDUCE-6836) | exception thrown when accessing the job configuration web UI |  Minor | webapps | Sangjin Lee | Haibo Chen |
+| [HDFS-11592](https://issues.apache.org/jira/browse/HDFS-11592) | Closing a file has a wasteful preconditions in NameNode |  Major | namenode | Eric Badger | Eric Badger |
+| [YARN-6354](https://issues.apache.org/jira/browse/YARN-6354) | LeveldbRMStateStore can parse invalid keys when recovering reservations |  Major | resourcemanager | Jason Lowe | Jason Lowe |
+| [YARN-5703](https://issues.apache.org/jira/browse/YARN-5703) | ReservationAgents are not correctly configured |  Major | capacity scheduler, resourcemanager | Sean Po | Manikandan R |
+| [HADOOP-14268](https://issues.apache.org/jira/browse/HADOOP-14268) | Fix markdown itemization in hadoop-aws documents |  Minor | documentation, fs/s3 | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-14271](https://issues.apache.org/jira/browse/HADOOP-14271) | Correct spelling of 'occurred' and variants |  Trivial | . | Yeliang Cang | Yeliang Cang |
+| [HADOOP-14272](https://issues.apache.org/jira/browse/HADOOP-14272) | Azure: WasbRemoteCallHelper should use String equals for comparison. |  Major | fs/azure | Santhosh G Nayak | Santhosh G Nayak |
+| [HADOOP-14273](https://issues.apache.org/jira/browse/HADOOP-14273) | Azure: NativeAzureFileSystem should respect config for kerberosSupportEnabled flag |  Major | fs/azure | Santhosh G Nayak | Santhosh G Nayak |
+| [YARN-6436](https://issues.apache.org/jira/browse/YARN-6436) | TestSchedulingPolicy#testParseSchedulingPolicy timeout is too low |  Major | test | Jason Lowe | Eric Badger |
+| [YARN-6004](https://issues.apache.org/jira/browse/YARN-6004) | Refactor TestResourceLocalizationService#testDownloadingResourcesOnContainer so that it is less than 150 lines |  Trivial | test | Chris Trezzo | Chris Trezzo |
+| [YARN-6420](https://issues.apache.org/jira/browse/YARN-6420) | RM startup failure due to wrong order in nodelabel editlog |  Critical | . | Bibin A Chundatt | Bibin A Chundatt |
+| [MAPREDUCE-6824](https://issues.apache.org/jira/browse/MAPREDUCE-6824) | TaskAttemptImpl#createCommonContainerLaunchContext is longer than 150 lines |  Trivial | . | Chris Trezzo | Chris Trezzo |
+| [YARN-6403](https://issues.apache.org/jira/browse/YARN-6403) | Invalid local resource request can raise NPE and make NM exit |  Major | nodemanager | Tao Yang | Tao Yang |
+| [HDFS-11538](https://issues.apache.org/jira/browse/HDFS-11538) | Move ClientProtocol HA proxies into hadoop-hdfs-client |  Blocker | hdfs-client | Andrew Wang | Huafeng Wang |
+| [YARN-6437](https://issues.apache.org/jira/browse/YARN-6437) | TestSignalContainer#testSignalRequestDeliveryToNM fails intermittently |  Major | test | Jason Lowe | Jason Lowe |
+| [YARN-6448](https://issues.apache.org/jira/browse/YARN-6448) | Continuous scheduling thread crashes while sorting nodes |  Major | . | Yufei Gu | Yufei Gu |
+| [HDFS-11596](https://issues.apache.org/jira/browse/HDFS-11596) | hadoop-hdfs-client jar is in the wrong directory in release tarball |  Critical | build | Andrew Wang | Yuanbo Liu |
+| [MAPREDUCE-6846](https://issues.apache.org/jira/browse/MAPREDUCE-6846) | Fragments specified for libjar paths are not handled correctly |  Minor | . | Chris Trezzo | Chris Trezzo |
+| [HDFS-11131](https://issues.apache.org/jira/browse/HDFS-11131) | TestThrottledAsyncChecker#testCancellation is flaky |  Major | test | Arpit Agarwal | Arpit Agarwal |
+| [HDFS-11362](https://issues.apache.org/jira/browse/HDFS-11362) | StorageDirectory should initialize a non-null default StorageDirType |  Minor | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [HDFS-11608](https://issues.apache.org/jira/browse/HDFS-11608) | HDFS write crashed with block size greater than 2 GB |  Critical | hdfs-client | Xiaobing Zhou | Xiaobing Zhou |
+| [MAPREDUCE-6201](https://issues.apache.org/jira/browse/MAPREDUCE-6201) | TestNetworkedJob fails on trunk |  Major | . | Robert Kanter | Peter Bacsko |
+| [YARN-6288](https://issues.apache.org/jira/browse/YARN-6288) | Exceptions during aggregated log writes are mishandled |  Critical | log-aggregation | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-14287](https://issues.apache.org/jira/browse/HADOOP-14287) | Compiling trunk with -DskipShade fails |  Major | build | Arpit Agarwal | Arun Suresh |
+| [YARN-6368](https://issues.apache.org/jira/browse/YARN-6368) | Decommissioning an NM results in a -1 exit code |  Minor | . | Miklos Szegedi | Miklos Szegedi |
+| [HDFS-11633](https://issues.apache.org/jira/browse/HDFS-11633) | FSImage failover disables all erasure coding policies |  Critical | erasure-coding, namenode | Wei-Chiu Chuang | Wei-Chiu Chuang |
+| [HADOOP-14066](https://issues.apache.org/jira/browse/HADOOP-14066) | VersionInfo should be marked as public API |  Critical | common | Thejas M Nair | Akira Ajisaka |
+| [YARN-6343](https://issues.apache.org/jira/browse/YARN-6343) | Docker docs MR example is broken |  Major | nodemanager | Daniel Templeton | Prashant Jha |
+| [HADOOP-14293](https://issues.apache.org/jira/browse/HADOOP-14293) | Initialize FakeTimer with a less trivial value |  Major | test | Andrew Wang | Andrew Wang |
+| [HADOOP-13545](https://issues.apache.org/jira/browse/HADOOP-13545) | Upgrade HSQLDB to 2.3.4 |  Minor | build | Giovanni Matteo Fumarola | Giovanni Matteo Fumarola |
+| [HDFS-11637](https://issues.apache.org/jira/browse/HDFS-11637) | Fix javac warning caused by the deprecated key used in TestDFSClientRetries#testFailuresArePerOperation |  Minor | . | Yiqun Lin | Yiqun Lin |
+| [YARN-6461](https://issues.apache.org/jira/browse/YARN-6461) | TestRMAdminCLI has very low test timeouts |  Major | test | Jason Lowe | Eric Badger |
+| [YARN-6463](https://issues.apache.org/jira/browse/YARN-6463) | correct spelling mistake in FileSystemRMStateStore |  Trivial | . | Yeliang Cang | Yeliang Cang |
+| [YARN-6439](https://issues.apache.org/jira/browse/YARN-6439) | Fix ReservationSystem creation of default ReservationQueue |  Major | . | Carlo Curino | Carlo Curino |
+| [HDFS-11630](https://issues.apache.org/jira/browse/HDFS-11630) | TestThrottledAsyncCheckerTimeout fails intermittently in Jenkins builds |  Major | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [HDFS-11163](https://issues.apache.org/jira/browse/HDFS-11163) | Mover should move the file blocks to default storage once policy is unset |  Major | balancer & mover | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [YARN-6421](https://issues.apache.org/jira/browse/YARN-6421) | Upgrade frontend-maven-plugin to 1.1 to fix new YARN UI build error in ppc64le |  Major | yarn-ui-v2 | Sonia Garudi | Sonia Garudi |
+| [YARN-6450](https://issues.apache.org/jira/browse/YARN-6450) | TestContainerManagerWithLCE requires override for each new test added to ContainerManagerTest |  Major | test | Jason Lowe | Jason Lowe |
+| [YARN-3760](https://issues.apache.org/jira/browse/YARN-3760) | FSDataOutputStream leak in AggregatedLogFormat.LogWriter.close() |  Critical | nodemanager | Daryn Sharp | Haibo Chen |
+| [YARN-6216](https://issues.apache.org/jira/browse/YARN-6216) | Unify Container Resizing code paths with Container Updates making it scheduler agnostic |  Major | capacity scheduler, fairscheduler, resourcemanager | Arun Suresh | Arun Suresh |
+| [YARN-5994](https://issues.apache.org/jira/browse/YARN-5994) | TestCapacityScheduler.testAMLimitUsage fails intermittently |  Major | . | Eric Badger | Eric Badger |
+| [YARN-6433](https://issues.apache.org/jira/browse/YARN-6433) | Only accessible cgroup mount directories should be selected for a controller |  Major | nodemanager | Miklos Szegedi | Miklos Szegedi |
+| [YARN-6480](https://issues.apache.org/jira/browse/YARN-6480) | Timeout is too aggressive for TestAMRestart.testPreemptedAMRestartOnRMRestart |  Major | . | Eric Badger | Eric Badger |
+| [HADOOP-14311](https://issues.apache.org/jira/browse/HADOOP-14311) | Add python2.7-dev to Dockerfile |  Major | . | Allen Wittenauer | Allen Wittenauer |
+| [MAPREDUCE-6875](https://issues.apache.org/jira/browse/MAPREDUCE-6875) | Rename mapred-site.xml.template to mapred-site.xml |  Minor | build | Allen Wittenauer | Yuanbo Liu |
+| [YARN-6304](https://issues.apache.org/jira/browse/YARN-6304) | Skip rm.transitionToActive call to RM if RM is already active. |  Major | resourcemanager | Rohith Sharma K S | Rohith Sharma K S |
+| [HDFS-11615](https://issues.apache.org/jira/browse/HDFS-11615) | FSNamesystemLock metrics can be inaccurate due to millisecond precision |  Major | hdfs | Erik Krogen | Erik Krogen |
+| [HADOOP-14318](https://issues.apache.org/jira/browse/HADOOP-14318) | Remove non-existent setfattr command option from FileSystemShell.md |  Minor | documentation | Doris Gu | Doris Gu |
+| [HADOOP-14315](https://issues.apache.org/jira/browse/HADOOP-14315) | Python example in the rack awareness document doesn't work due to bad indentation |  Minor | documentation | Kengo Seki | Kengo Seki |
+| [HDFS-11665](https://issues.apache.org/jira/browse/HDFS-11665) | HttpFSServerWebServer$deprecateEnv may leak secret |  Major | httpfs, security | John Zhuge | John Zhuge |
+| [HADOOP-14317](https://issues.apache.org/jira/browse/HADOOP-14317) | KMSWebServer$deprecateEnv may leak secret |  Major | kms, security | John Zhuge | John Zhuge |
+| [HADOOP-13997](https://issues.apache.org/jira/browse/HADOOP-13997) | Typo in metrics docs |  Trivial | documentation | Daniel Templeton | Ana Krasteva |
+| [YARN-6438](https://issues.apache.org/jira/browse/YARN-6438) | Code can be improved in ContainersMonitorImpl.java |  Minor | nodemanager | Miklos Szegedi | Miklos Szegedi |
+| [YARN-6365](https://issues.apache.org/jira/browse/YARN-6365) | Get static SLS html resources from classpath |  Blocker | scheduler-load-simulator | Allen Wittenauer | Yufei Gu |
+| [YARN-6202](https://issues.apache.org/jira/browse/YARN-6202) | Configuration item Dispatcher.DISPATCHER\_EXIT\_ON\_ERROR\_KEY is disregarded |  Major | nodemanager, resourcemanager | Yufei Gu | Yufei Gu |
+| [YARN-6302](https://issues.apache.org/jira/browse/YARN-6302) | Fail the node if Linux Container Executor is not configured properly |  Minor | . | Miklos Szegedi | Miklos Szegedi |
+| [HDFS-11671](https://issues.apache.org/jira/browse/HDFS-11671) | TestReconstructStripedBlocks#test2RecoveryTasksForSameBlockGroup fails |  Major | erasure-coding, test | Andrew Wang | Andrew Wang |
+| [HDFS-11660](https://issues.apache.org/jira/browse/HDFS-11660) | TestFsDatasetCache#testPageRounder fails intermittently with AssertionError |  Major | test | Andrew Wang | Andrew Wang |
+| [YARN-6453](https://issues.apache.org/jira/browse/YARN-6453) | fairscheduler-statedump.log gets generated regardless of service |  Blocker | fairscheduler, scheduler | Allen Wittenauer | Yufei Gu |
+| [YARN-6363](https://issues.apache.org/jira/browse/YARN-6363) | Extending SLS: Synthetic Load Generator |  Major | . | Carlo Curino | Carlo Curino |
+| [YARN-6153](https://issues.apache.org/jira/browse/YARN-6153) | keepContainer does not work when AM retry window is set |  Major | resourcemanager | kyungwan nam | kyungwan nam |
+| [HDFS-11689](https://issues.apache.org/jira/browse/HDFS-11689) | New exception thrown by DFSClient#isHDFSEncryptionEnabled broke hacky hive code |  Major | . | Yongjun Zhang | Yongjun Zhang |
+| [YARN-5889](https://issues.apache.org/jira/browse/YARN-5889) | Improve and refactor user-limit calculation in capacity scheduler |  Major | capacity scheduler | Sunil G | Sunil G |
+| [HDFS-11529](https://issues.apache.org/jira/browse/HDFS-11529) | Add libHDFS API to return last exception |  Critical | libhdfs | Sailesh Mukil | Sailesh Mukil |
+| [YARN-6500](https://issues.apache.org/jira/browse/YARN-6500) | Do not mount inaccessible cgroups directories in CgroupsLCEResourcesHandler |  Major | nodemanager | Miklos Szegedi | Miklos Szegedi |
+| [HDFS-11691](https://issues.apache.org/jira/browse/HDFS-11691) | Add a proper scheme to the datanode links in NN web UI |  Major | . | Kihwal Lee | Kihwal Lee |
+| [HADOOP-14341](https://issues.apache.org/jira/browse/HADOOP-14341) | Support multi-line value for ssl.server.exclude.cipher.list |  Major | . | John Zhuge | John Zhuge |
+| [YARN-5617](https://issues.apache.org/jira/browse/YARN-5617) | AMs only intended to run one attempt can be run more than once |  Major | resourcemanager | Jason Lowe | Jason Lowe |
+| [YARN-6510](https://issues.apache.org/jira/browse/YARN-6510) | Fix profs stat file warning caused by process names that includes parenthesis |  Major | . | Wilfred Spiegelenburg | Wilfred Spiegelenburg |
+| [HADOOP-14351](https://issues.apache.org/jira/browse/HADOOP-14351) | Azure: RemoteWasbAuthorizerImpl and RemoteSASKeyGeneratorImpl should not use Kerberos interactive user cache |  Major | fs/azure | Santhosh G Nayak | Santhosh G Nayak |
+| [MAPREDUCE-6881](https://issues.apache.org/jira/browse/MAPREDUCE-6881) | Fix warnings from Spotbugs in hadoop-mapreduce |  Major | . | Weiwei Yang | Weiwei Yang |
+| [HADOOP-14346](https://issues.apache.org/jira/browse/HADOOP-14346) | CryptoOutputStream throws IOException on flush() if stream is closed |  Major | . | Pierre Lacave | Pierre Lacave |
+| [HDFS-11709](https://issues.apache.org/jira/browse/HDFS-11709) | StandbyCheckpointer should handle an non-existing legacyOivImageDir gracefully |  Critical | ha, namenode | Zhe Zhang | Erik Krogen |
+| [YARN-6534](https://issues.apache.org/jira/browse/YARN-6534) | ResourceManager failed due to TimelineClient try to init SSLFactory even https is not enabled |  Blocker | . | Junping Du | Rohith Sharma K S |
+| [HADOOP-14354](https://issues.apache.org/jira/browse/HADOOP-14354) | SysInfoWindows is not thread safe |  Major | . | Inigo Goiri | Inigo Goiri |
+| [YARN-5894](https://issues.apache.org/jira/browse/YARN-5894) | fixed license warning caused by de.ruedigermoeller:fst:jar:2.24 |  Blocker | yarn | Haibo Chen | Haibo Chen |
+| [YARN-6472](https://issues.apache.org/jira/browse/YARN-6472) | Improve Java sandbox regex |  Major | . | Miklos Szegedi | Greg Phillips |
+| [HADOOP-14320](https://issues.apache.org/jira/browse/HADOOP-14320) | TestIPC.testIpcWithReaderQueuing fails intermittently |  Major | . | Eric Badger | Eric Badger |
+| [YARN-6536](https://issues.apache.org/jira/browse/YARN-6536) | TestAMRMClient.testAMRMClientWithSaslEncryption fails intermittently |  Major | . | Eric Badger | Jason Lowe |
+| [HDFS-11718](https://issues.apache.org/jira/browse/HDFS-11718) | DFSStripedOutputStream hsync/hflush should not throw UnsupportedOperationException |  Blocker | erasure-coding | Manoj Govindassamy | Manoj Govindassamy |
+| [HADOOP-14363](https://issues.apache.org/jira/browse/HADOOP-14363) | Inconsistent default block location in FileSystem javadoc |  Trivial | fs | Mingliang Liu | Chen Liang |
+| [HADOOP-13901](https://issues.apache.org/jira/browse/HADOOP-13901) | Fix ASF License warnings |  Major | build | Akira Ajisaka | Akira Ajisaka |
+| [YARN-6518](https://issues.apache.org/jira/browse/YARN-6518) | Fix warnings from Spotbugs in hadoop-yarn-server-timelineservice |  Major | . | Weiwei Yang | Weiwei Yang |
+| [YARN-6520](https://issues.apache.org/jira/browse/YARN-6520) | Fix warnings from Spotbugs in hadoop-yarn-client |  Major | . | Weiwei Yang | Weiwei Yang |
+| [HDFS-11609](https://issues.apache.org/jira/browse/HDFS-11609) | Some blocks can be permanently lost if nodes are decommissioned while dead |  Blocker | namenode | Kihwal Lee | Kihwal Lee |
+| [HDFS-11724](https://issues.apache.org/jira/browse/HDFS-11724) | libhdfs compilation is broken on OS X |  Blocker | libhdfs | Allen Wittenauer | John Zhuge |
+| [HDFS-8498](https://issues.apache.org/jira/browse/HDFS-8498) | Blocks can be committed with wrong size |  Critical | hdfs-client | Daryn Sharp | Jing Zhao |
+| [HDFS-11714](https://issues.apache.org/jira/browse/HDFS-11714) | Newly added NN storage directory won't get initialized and cause space exhaustion |  Critical | . | Kihwal Lee | Kihwal Lee |
+| [HADOOP-14366](https://issues.apache.org/jira/browse/HADOOP-14366) | maven upgrade broke start-build-env.sh |  Blocker | build | Allen Wittenauer | Akira Ajisaka |
+| [HDFS-11593](https://issues.apache.org/jira/browse/HDFS-11593) | Change SimpleHttpProxyHandler#exceptionCaught log level from info to debug |  Minor | datanode | Xiaoyu Yao | Xiaobing Zhou |
+| [HDFS-11710](https://issues.apache.org/jira/browse/HDFS-11710) | hadoop-hdfs-native-client build fails in trunk in Windows after HDFS-11529 |  Blocker | native | Vinayakumar B | Sailesh Mukil |
+| [HADOOP-14371](https://issues.apache.org/jira/browse/HADOOP-14371) | License error in TestLoadBalancingKMSClientProvider.java |  Major | . | hu xiaodong | hu xiaodong |
+| [HADOOP-14369](https://issues.apache.org/jira/browse/HADOOP-14369) | NetworkTopology calls expensive toString() when logging |  Major | . | Inigo Goiri | Inigo Goiri |
+| [HADOOP-14281](https://issues.apache.org/jira/browse/HADOOP-14281) | Fix TestKafkaMetrics#testPutMetrics |  Major | metrics | Akira Ajisaka | Alison Yu |
+| [YARN-6519](https://issues.apache.org/jira/browse/YARN-6519) | Fix warnings from Spotbugs in hadoop-yarn-server-resourcemanager |  Major | resourcemanager | Weiwei Yang | Weiwei Yang |
+| [YARN-6481](https://issues.apache.org/jira/browse/YARN-6481) | Yarn top shows negative container number in FS |  Major | yarn | Yufei Gu | Tao Jie |
+| [HADOOP-14306](https://issues.apache.org/jira/browse/HADOOP-14306) | TestLocalFileSystem tests have very low timeouts |  Major | . | Eric Badger | Eric Badger |
+| [HADOOP-14372](https://issues.apache.org/jira/browse/HADOOP-14372) | TestSymlinkLocalFS timeouts are too low |  Major | . | Eric Badger | Eric Badger |
+| [HDFS-11739](https://issues.apache.org/jira/browse/HDFS-11739) | Fix regression in tests caused by YARN-679 |  Major | test | Steve Loughran | Steve Loughran |
+| [HDFS-11643](https://issues.apache.org/jira/browse/HDFS-11643) | Add shouldReplicate option to create builder |  Blocker | balancer & mover, erasure-coding | Andrew Wang | SammiChen |
+| [HADOOP-14380](https://issues.apache.org/jira/browse/HADOOP-14380) | Make the Guava version Hadoop which builds with configurable |  Major | build | Steve Loughran | Steve Loughran |
+| [HDFS-11448](https://issues.apache.org/jira/browse/HDFS-11448) | JN log segment syncing should support HA upgrade |  Major | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [HADOOP-14207](https://issues.apache.org/jira/browse/HADOOP-14207) | "dfsadmin -refreshCallQueue" fails with DecayRpcScheduler |  Blocker | rpc-server | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [HADOOP-14298](https://issues.apache.org/jira/browse/HADOOP-14298) | TestHadoopArchiveLogsRunner fails |  Major | test | Akira Ajisaka | Akira Ajisaka |
+| [HDFS-11702](https://issues.apache.org/jira/browse/HDFS-11702) | Remove indefinite caching of key provider uri in DFSClient |  Major | hdfs-client | Rushabh S Shah | Rushabh S Shah |
+| [YARN-3839](https://issues.apache.org/jira/browse/YARN-3839) | Quit throwing NMNotYetReadyException |  Major | nodemanager | Karthik Kambatla | Manikandan R |
+| [HADOOP-14374](https://issues.apache.org

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: HDFS-12042. Lazy initialize AbstractINodeDiffList#diffs for snapshots to reduce memory consumption. Contributed by Misha Dmitriev.

Posted by as...@apache.org.
HDFS-12042. Lazy initialize AbstractINodeDiffList#diffs for snapshots to reduce memory consumption. Contributed by Misha Dmitriev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcba844d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcba844d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcba844d

Branch: refs/heads/YARN-5972
Commit: bcba844d1144cc334e2babbc34c9d42eac1c203a
Parents: 6a9dc5f
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Jun 30 10:28:01 2017 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Jun 30 10:28:01 2017 -0700

----------------------------------------------------------------------
 .../hdfs/server/namenode/INodeDirectory.java    |  7 ++-
 .../snapshot/AbstractINodeDiffList.java         | 53 +++++++++++++++-----
 .../namenode/TestTruncateQuotaUpdate.java       |  1 +
 3 files changed, 46 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcba844d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index a29a118..4012783 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -65,8 +65,11 @@ public class INodeDirectory extends INodeWithAdditionalFields
     return inode.asDirectory(); 
   }
 
-  protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
-  final static byte[] ROOT_NAME = DFSUtil.string2Bytes("");
+  // Profiling shows that most of the file lists are between 1 and 4 elements.
+  // Thus allocate the corresponding ArrayLists with a small initial capacity.
+  public static final int DEFAULT_FILES_PER_DIRECTORY = 2;
+
+  static final byte[] ROOT_NAME = DFSUtil.string2Bytes("");
 
   private List<INode> children = null;
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcba844d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
index 64825f1..98d8c53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
@@ -24,6 +24,7 @@ import java.util.List;
 
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 
 /**
  * A list of snapshot diffs for storing snapshot data.
@@ -35,17 +36,19 @@ abstract class AbstractINodeDiffList<N extends INode,
                                      A extends INodeAttributes,
                                      D extends AbstractINodeDiff<N, A, D>> 
     implements Iterable<D> {
-  /** Diff list sorted by snapshot IDs, i.e. in chronological order. */
-  private final List<D> diffs = new ArrayList<D>();
+  /** Diff list sorted by snapshot IDs, i.e. in chronological order.
+    * Created lazily to avoid wasting memory by empty lists. */
+  private List<D> diffs;
 
   /** @return this list as a unmodifiable {@link List}. */
   public final List<D> asList() {
-    return Collections.unmodifiableList(diffs);
+    return diffs != null ?
+        Collections.unmodifiableList(diffs) : Collections.emptyList();
   }
   
-  /** Get the size of the list and then clear it. */
+  /** Clear the list. */
   public void clear() {
-    diffs.clear();
+    diffs = null;
   }
 
   /** @return an {@link AbstractINodeDiff}. */
@@ -66,6 +69,9 @@ abstract class AbstractINodeDiffList<N extends INode,
    */
   public final void deleteSnapshotDiff(INode.ReclaimContext reclaimContext,
       final int snapshot, final int prior, final N currentINode) {
+    if (diffs == null) {
+      return;
+    }
     int snapshotIndex = Collections.binarySearch(diffs, snapshot);
 
     D removed;
@@ -75,6 +81,9 @@ abstract class AbstractINodeDiffList<N extends INode,
         diffs.get(snapshotIndex).setSnapshotId(prior);
       } else { // there is no snapshot before
         removed = diffs.remove(0);
+        if (diffs.isEmpty()) {
+          diffs = null;
+        }
         removed.destroyDiffAndCollectBlocks(reclaimContext, currentINode);
       }
     } else if (snapshotIndex > 0) {
@@ -103,6 +112,7 @@ abstract class AbstractINodeDiffList<N extends INode,
 
   /** Append the diff at the end of the list. */
   private D addLast(D diff) {
+    createDiffsIfNeeded();
     final D last = getLast();
     diffs.add(diff);
     if (last != null) {
@@ -113,15 +123,25 @@ abstract class AbstractINodeDiffList<N extends INode,
   
   /** Add the diff to the beginning of the list. */
   final void addFirst(D diff) {
-    final D first = diffs.isEmpty()? null: diffs.get(0);
+    createDiffsIfNeeded();
+    final D first = diffs.isEmpty()? null : diffs.get(0);
     diffs.add(0, diff);
     diff.setPosterior(first);
   }
 
   /** @return the last diff. */
   public final D getLast() {
-    final int n = diffs.size();
-    return n == 0? null: diffs.get(n - 1);
+    if (diffs == null) {
+      return null;
+    }
+    int n = diffs.size();
+    return n == 0 ? null : diffs.get(n - 1);
+  }
+
+  private void createDiffsIfNeeded() {
+    if (diffs == null) {
+      diffs = new ArrayList<>(INodeDirectory.DEFAULT_FILES_PER_DIRECTORY);
+    }
   }
 
   /** @return the id of the last snapshot. */
@@ -139,10 +159,14 @@ abstract class AbstractINodeDiffList<N extends INode,
    * @return The id of the latest snapshot before the given snapshot.
    */
   public final int getPrior(int anchorId, boolean exclusive) {
+    if (diffs == null) {
+      return Snapshot.NO_SNAPSHOT_ID;
+    }
     if (anchorId == Snapshot.CURRENT_STATE_ID) {
       int last = getLastSnapshotId();
-      if(exclusive && last == anchorId)
+      if (exclusive && last == anchorId) {
         return Snapshot.NO_SNAPSHOT_ID;
+      }
       return last;
     }
     final int i = Collections.binarySearch(diffs, anchorId);
@@ -181,7 +205,7 @@ abstract class AbstractINodeDiffList<N extends INode,
   }
   
   public final D getDiffById(final int snapshotId) {
-    if (snapshotId == Snapshot.CURRENT_STATE_ID) {
+    if (snapshotId == Snapshot.CURRENT_STATE_ID || diffs == null) {
       return null;
     }
     final int i = Collections.binarySearch(diffs, snapshotId);
@@ -193,7 +217,7 @@ abstract class AbstractINodeDiffList<N extends INode,
       // given snapshot and the next state so that the diff for the given
       // snapshot was not recorded. Thus, return the next state.
       final int j = -i - 1;
-      return j < diffs.size()? diffs.get(j): null;
+      return j < diffs.size() ? diffs.get(j) : null;
     }
   }
   
@@ -207,6 +231,9 @@ abstract class AbstractINodeDiffList<N extends INode,
   }
 
   final int[] changedBetweenSnapshots(Snapshot from, Snapshot to) {
+    if (diffs == null) {
+      return null;
+    }
     Snapshot earlier = from;
     Snapshot later = to;
     if (Snapshot.ID_COMPARATOR.compare(from, to) > 0) {
@@ -275,11 +302,11 @@ abstract class AbstractINodeDiffList<N extends INode,
   
   @Override
   public Iterator<D> iterator() {
-    return diffs.iterator();
+    return diffs != null ? diffs.iterator() : Collections.emptyIterator();
   }
 
   @Override
   public String toString() {
-    return getClass().getSimpleName() + ": " + diffs;
+    return getClass().getSimpleName() + ": " + (diffs != null ? diffs : "[]");
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcba844d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
index 106edad..fcdd650 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
@@ -156,6 +156,7 @@ public class TestTruncateQuotaUpdate {
     FileDiff diff = mock(FileDiff.class);
     when(diff.getBlocks()).thenReturn(blocks);
     FileDiffList diffList = new FileDiffList();
+    Whitebox.setInternalState(diffList, "diffs", new ArrayList<FileDiff>());
     @SuppressWarnings("unchecked")
     ArrayList<FileDiff> diffs = ((ArrayList<FileDiff>)Whitebox.getInternalState
         (diffList, "diffs"));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: HADOOP-14615. Add ServiceOperations.stopQuietly that accept slf4j logger API. Contributed by Wenxin He.

Posted by as...@apache.org.
HADOOP-14615. Add ServiceOperations.stopQuietly that accept slf4j logger API.
Contributed by Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1aaa7f1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1aaa7f1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1aaa7f1e

Branch: refs/heads/YARN-5972
Commit: 1aaa7f1eacab20d1c27f410333a536033cad1aab
Parents: 186650d
Author: Steve Loughran <st...@apache.org>
Authored: Tue Jul 4 10:48:02 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Tue Jul 4 10:48:02 2017 +0100

----------------------------------------------------------------------
 .../hadoop/service/ServiceOperations.java       | 20 ++++++
 .../hadoop/service/TestServiceOperations.java   | 65 ++++++++++++++++++++
 2 files changed, 85 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1aaa7f1e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
index 6c03e25..a0a77ce 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.slf4j.Logger;
 
 /**
  * This class contains a set of methods to work with services, especially
@@ -87,6 +88,25 @@ public final class ServiceOperations {
     return null;
   }
 
+  /**
+   * Stop a service; if it is null do nothing. Exceptions are caught and
+   * logged at warn level. (but not Throwables). This operation is intended to
+   * be used in cleanup operations
+   *
+   * @param log the log to warn at
+   * @param service a service; may be null
+   * @return any exception that was caught; null if none was.
+   * @see ServiceOperations#stopQuietly(Service)
+   */
+  public static Exception stopQuietly(Logger log, Service service) {
+    try {
+      stop(service);
+    } catch (Exception e) {
+      log.warn("When stopping the service {} : {}", service.getName(), e, e);
+      return e;
+    }
+    return null;
+  }
 
   /**
    * Class to manage a list of {@link ServiceStateChangeListener} instances,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1aaa7f1e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
new file mode 100644
index 0000000..5df973d
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.service;
+
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.PrintWriter;
+
+import static org.apache.hadoop.test.GenericTestUtils.LogCapturer.captureLogs;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.junit.Assert.assertThat;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Test miscellaneous service operations through mocked failures.
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class TestServiceOperations {
+
+  @Mock
+  private Service service;
+
+  @Mock
+  private RuntimeException e;
+
+  @Test
+  public void testStopQuietlyWhenServiceStopThrowsException() throws Exception {
+    Logger logger = LoggerFactory.getLogger(TestServiceOperations.class);
+    LogCapturer logCapturer = captureLogs(logger);
+    doThrow(e).when(service).stop();
+
+    ServiceOperations.stopQuietly(logger, service);
+
+    assertThat(logCapturer.getOutput(),
+        containsString("When stopping the service " + service.getName()
+            + " : " + e));
+    verify(e, times(1)).printStackTrace(Mockito.any(PrintWriter.class));
+  }
+
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: MAPREDUCE-6246. DBOutputFormat.java appending extra semicolon to query which is incompatible with DB2. Contributed by ramtin and Gergely Novák.

Posted by as...@apache.org.
MAPREDUCE-6246. DBOutputFormat.java appending extra semicolon to query which is incompatible with DB2. Contributed by ramtin and Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f484a6ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f484a6ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f484a6ff

Branch: refs/heads/YARN-5972
Commit: f484a6ff602d48413556a1d046670e2003c71c2e
Parents: f10864a
Author: Junping Du <ju...@apache.org>
Authored: Fri Jul 7 13:23:43 2017 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Fri Jul 7 13:26:16 2017 -0700

----------------------------------------------------------------------
 .../hadoop/mapreduce/lib/db/DBOutputFormat.java | 15 ++++++-
 .../mapreduce/lib/db/TestDBOutputFormat.java    | 45 ++++++++++++++++++++
 2 files changed, 58 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f484a6ff/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
index 2e3a9d8..c222bf5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.lib.db;
 
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.DatabaseMetaData;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 
@@ -51,6 +52,8 @@ public class DBOutputFormat<K  extends DBWritable, V>
 extends OutputFormat<K,V> {
 
   private static final Log LOG = LogFactory.getLog(DBOutputFormat.class);
+  public String dbProductName = "DEFAULT";
+
   public void checkOutputSpecs(JobContext context) 
       throws IOException, InterruptedException {}
 
@@ -158,7 +161,12 @@ extends OutputFormat<K,V> {
         query.append(",");
       }
     }
-    query.append(");");
+
+    if (dbProductName.startsWith("DB2") || dbProductName.startsWith("ORACLE")) {
+      query.append(")");
+    } else {
+      query.append(");");
+    }
 
     return query.toString();
   }
@@ -177,7 +185,10 @@ extends OutputFormat<K,V> {
     try {
       Connection connection = dbConf.getConnection();
       PreparedStatement statement = null;
-  
+
+      DatabaseMetaData dbMeta = connection.getMetaData();
+      this.dbProductName = dbMeta.getDatabaseProductName().toUpperCase();
+
       statement = connection.prepareStatement(
                     constructQuery(tableName, fieldNames));
       return new DBRecordWriter(connection, statement);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f484a6ff/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
index 014855f..e547c8a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
@@ -18,7 +18,9 @@
 package org.apache.hadoop.mapreduce.lib.db;
 
 import java.io.IOException;
+import java.lang.reflect.Field;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
@@ -26,6 +28,7 @@ import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
 
 public class TestDBOutputFormat {
   private String[] fieldNames = new String[] { "id", "name", "value" };
@@ -47,6 +50,48 @@ public class TestDBOutputFormat {
   }
 
   @Test
+  public void testDB2ConstructQuery() {
+    String db2expected = StringUtils.removeEnd(expected, ";");
+    String db2nullExpected = StringUtils.removeEnd(nullExpected, ";");
+
+    try {
+      Class<?> clazz = this.format.getClass();
+      Field field = clazz.getDeclaredField("dbProductName");
+      field.setAccessible(true);
+      field.set(format, "DB2");
+    } catch (IllegalAccessException | NoSuchFieldException e) {
+      fail(e.getMessage());
+    }
+
+    String actual = format.constructQuery("hadoop_output", fieldNames);
+    assertEquals(db2expected, actual);
+
+    actual = format.constructQuery("hadoop_output", nullFieldNames);
+    assertEquals(db2nullExpected, actual);
+  }
+
+  @Test
+  public void testORACLEConstructQuery() {
+    String oracleExpected = StringUtils.removeEnd(expected, ";");
+    String oracleNullExpected = StringUtils.removeEnd(nullExpected, ";");
+
+    try {
+      Class<?> clazz = this.format.getClass();
+      Field field = clazz.getDeclaredField("dbProductName");
+      field.setAccessible(true);
+      field.set(format, "ORACLE");
+    } catch (IllegalAccessException | NoSuchFieldException e) {
+      fail(e.getMessage());
+    }
+
+    String actual = format.constructQuery("hadoop_output", fieldNames);
+    assertEquals(oracleExpected, actual);
+
+    actual = format.constructQuery("hadoop_output", nullFieldNames);
+    assertEquals(oracleNullExpected, actual);
+  }
+
+  @Test
   public void testSetOutput() throws IOException {
     Job job = Job.getInstance(new Configuration());
     DBOutputFormat.setOutput(job, "hadoop_output", fieldNames);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: YARN-5292. NM Container lifecycle and state transitions to support for PAUSED container state. (Hitesh Sharma via asuresh)

Posted by as...@apache.org.
YARN-5292. NM Container lifecycle and state transitions to support for PAUSED container state. (Hitesh Sharma via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f796e77
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f796e77
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f796e77

Branch: refs/heads/YARN-5972
Commit: 0f796e779b477fa4d423861950b271dcf7df25f7
Parents: ac0a04a
Author: Arun Suresh <as...@apache.org>
Authored: Fri Dec 9 07:51:03 2016 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Tue Jul 11 21:29:30 2017 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/api/records/ContainerState.java |   7 +-
 .../src/main/proto/yarn_protos.proto            |   1 +
 .../server/nodemanager/ContainerExecutor.java   |  22 +++
 .../container/ContainerEventType.java           |   6 +-
 .../container/ContainerImpl.java                | 170 ++++++++++++++++++-
 .../container/ContainerPauseEvent.java          |  40 +++++
 .../container/ContainerResumeEvent.java         |  39 +++++
 .../container/ContainerState.java               |   3 +-
 .../launcher/ContainerLaunch.java               |  90 +++++++++-
 .../launcher/ContainersLauncher.java            |  32 ++++
 .../launcher/ContainersLauncherEventType.java   |   3 +
 .../scheduler/ContainerSchedulerEventType.java  |   1 +
 .../container/TestContainer.java                |  51 ++++++
 13 files changed, 454 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
index 696fe06..45e5bd4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
@@ -33,11 +33,14 @@ public enum ContainerState {
   
   /** Running container */
   RUNNING, 
-  
+
   /** Completed container */
   COMPLETE,
 
   /** Scheduled (awaiting resources) at the NM. */
   @InterfaceStability.Unstable
-  SCHEDULED
+  SCHEDULED,
+
+  /** Paused at the NM. */
+  PAUSED
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 81ebd79..b299c23 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -83,6 +83,7 @@ enum ContainerStateProto {
   C_RUNNING = 2;
   C_COMPLETE = 3;
   C_SCHEDULED = 4;
+  C_PAUSED = 5;
 }
 
 message ContainerProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 0581878..732c4c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -694,6 +694,28 @@ public abstract class ContainerExecutor implements Configurable {
   }
 
   /**
+   * Pause the container. The default implementation is to raise a kill event.
+   * Specific executor implementations can override this behavior.
+   * @param container
+   *          the Container
+   */
+  public void pauseContainer(Container container) {
+    LOG.warn(container.getContainerId() + " doesn't support pausing.");
+    throw new UnsupportedOperationException();
+  }
+
+  /**
+   * Resume the container from pause state. The default implementation ignores
+   * this event. Specific implementations can override this behavior.
+   * @param container
+   *          the Container
+   */
+  public void resumeContainer(Container container) {
+    LOG.warn(container.getContainerId() + " doesn't support resume.");
+    throw new UnsupportedOperationException();
+  }
+
+  /**
    * Get the process-identifier for the container.
    *
    * @param containerID the container ID

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEventType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEventType.java
index afea0e6..1475435 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEventType.java
@@ -27,6 +27,8 @@ public enum ContainerEventType {
   CONTAINER_DONE,
   REINITIALIZE_CONTAINER,
   ROLLBACK_REINIT,
+  PAUSE_CONTAINER,
+  RESUME_CONTAINER,
 
   // DownloadManager
   CONTAINER_INITED,
@@ -38,5 +40,7 @@ public enum ContainerEventType {
   CONTAINER_LAUNCHED,
   CONTAINER_EXITED_WITH_SUCCESS,
   CONTAINER_EXITED_WITH_FAILURE,
-  CONTAINER_KILLED_ON_REQUEST
+  CONTAINER_KILLED_ON_REQUEST,
+  CONTAINER_PAUSED,
+  CONTAINER_RESUMED
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 46f8fa0..44068be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -302,6 +302,8 @@ public class ContainerImpl implements Container {
         UPDATE_DIAGNOSTICS_TRANSITION)
     .addTransition(ContainerState.NEW, ContainerState.DONE,
         ContainerEventType.KILL_CONTAINER, new KillOnNewTransition())
+    .addTransition(ContainerState.NEW, ContainerState.DONE,
+            ContainerEventType.PAUSE_CONTAINER, new KillOnPauseTransition())
 
     // From LOCALIZING State
     .addTransition(ContainerState.LOCALIZING,
@@ -317,6 +319,8 @@ public class ContainerImpl implements Container {
     .addTransition(ContainerState.LOCALIZING, ContainerState.KILLING,
         ContainerEventType.KILL_CONTAINER,
         new KillBeforeRunningTransition())
+    .addTransition(ContainerState.LOCALIZING, ContainerState.KILLING,
+        ContainerEventType.PAUSE_CONTAINER, new KillOnPauseTransition())
 
     // From LOCALIZATION_FAILED State
     .addTransition(ContainerState.LOCALIZATION_FAILED,
@@ -330,7 +334,8 @@ public class ContainerImpl implements Container {
     // container not launched so kill is a no-op
     .addTransition(ContainerState.LOCALIZATION_FAILED,
         ContainerState.LOCALIZATION_FAILED,
-        ContainerEventType.KILL_CONTAINER)
+        EnumSet.of(ContainerEventType.KILL_CONTAINER,
+            ContainerEventType.PAUSE_CONTAINER))
     // container cleanup triggers a release of all resources
     // regardless of whether they were localized or not
     // LocalizedResource handles release event in all states
@@ -386,6 +391,76 @@ public class ContainerImpl implements Container {
         ContainerState.EXITED_WITH_FAILURE,
         ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
         new KilledExternallyTransition())
+    .addTransition(ContainerState.RUNNING, ContainerState.PAUSING,
+    ContainerEventType.PAUSE_CONTAINER, new PauseContainerTransition())
+
+    // From PAUSING State
+    .addTransition(ContainerState.PAUSING, ContainerState.KILLING,
+        ContainerEventType.KILL_CONTAINER, new KillTransition())
+    .addTransition(ContainerState.PAUSING, ContainerState.PAUSING,
+        ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+        UPDATE_DIAGNOSTICS_TRANSITION)
+    .addTransition(ContainerState.PAUSING, ContainerState.PAUSED,
+        ContainerEventType.CONTAINER_PAUSED, new PausedContainerTransition())
+    // In case something goes wrong then container will exit from the
+    // PAUSING state
+    .addTransition(ContainerState.PAUSING,
+        ContainerState.EXITED_WITH_SUCCESS,
+        ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS)
+    .addTransition(ContainerState.PAUSING,
+        ContainerState.EXITED_WITH_FAILURE,
+        ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
+        new ExitedWithFailureTransition(true))
+    .addTransition(ContainerState.PAUSING, ContainerState.EXITED_WITH_FAILURE,
+        ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
+        new KilledExternallyTransition())
+
+    // From PAUSED State
+    .addTransition(ContainerState.PAUSED, ContainerState.KILLING,
+        ContainerEventType.KILL_CONTAINER, new KillTransition())
+    .addTransition(ContainerState.PAUSED, ContainerState.PAUSED,
+        ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+        UPDATE_DIAGNOSTICS_TRANSITION)
+    .addTransition(ContainerState.PAUSED, ContainerState.PAUSED,
+        ContainerEventType.PAUSE_CONTAINER)
+    .addTransition(ContainerState.PAUSED, ContainerState.RESUMING,
+        ContainerEventType.RESUME_CONTAINER, new ResumeContainerTransition())
+    // In case something goes wrong then container will exit from the
+    // PAUSED state
+    .addTransition(ContainerState.PAUSED,
+        ContainerState.EXITED_WITH_FAILURE,
+        ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
+        new ExitedWithFailureTransition(true))
+    .addTransition(ContainerState.PAUSED, ContainerState.EXITED_WITH_FAILURE,
+        ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
+        new KilledExternallyTransition())
+    .addTransition(ContainerState.PAUSED,
+        ContainerState.EXITED_WITH_SUCCESS,
+        ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
+        new ExitedWithSuccessTransition(true))
+
+    // From RESUMING State
+    .addTransition(ContainerState.RESUMING, ContainerState.KILLING,
+        ContainerEventType.KILL_CONTAINER, new KillTransition())
+    .addTransition(ContainerState.RESUMING, ContainerState.RUNNING,
+        ContainerEventType.CONTAINER_RESUMED)
+    .addTransition(ContainerState.RESUMING, ContainerState.RESUMING,
+        ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+        UPDATE_DIAGNOSTICS_TRANSITION)
+    // In case something goes wrong then container will exit from the
+    // RESUMING state
+    .addTransition(ContainerState.RESUMING,
+        ContainerState.EXITED_WITH_FAILURE,
+        ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
+        new ExitedWithFailureTransition(true))
+    .addTransition(ContainerState.RESUMING,
+        ContainerState.EXITED_WITH_FAILURE,
+        ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
+        new KilledExternallyTransition())
+    .addTransition(ContainerState.RESUMING,
+        ContainerState.EXITED_WITH_SUCCESS,
+        ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
+        new ExitedWithSuccessTransition(true))
 
     // From REINITIALIZING State
     .addTransition(ContainerState.REINITIALIZING,
@@ -409,6 +484,8 @@ public class ContainerImpl implements Container {
         UPDATE_DIAGNOSTICS_TRANSITION)
     .addTransition(ContainerState.REINITIALIZING, ContainerState.KILLING,
         ContainerEventType.KILL_CONTAINER, new KillTransition())
+    .addTransition(ContainerState.REINITIALIZING, ContainerState.KILLING,
+        ContainerEventType.PAUSE_CONTAINER, new KillOnPauseTransition())
     .addTransition(ContainerState.REINITIALIZING,
         ContainerState.SCHEDULED,
         ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
@@ -426,6 +503,8 @@ public class ContainerImpl implements Container {
         UPDATE_DIAGNOSTICS_TRANSITION)
     .addTransition(ContainerState.RELAUNCHING, ContainerState.KILLING,
         ContainerEventType.KILL_CONTAINER, new KillTransition())
+    .addTransition(ContainerState.RELAUNCHING, ContainerState.KILLING,
+        ContainerEventType.PAUSE_CONTAINER, new KillOnPauseTransition())
 
     // From CONTAINER_EXITED_WITH_SUCCESS State
     .addTransition(ContainerState.EXITED_WITH_SUCCESS, ContainerState.DONE,
@@ -437,7 +516,8 @@ public class ContainerImpl implements Container {
         UPDATE_DIAGNOSTICS_TRANSITION)
     .addTransition(ContainerState.EXITED_WITH_SUCCESS,
         ContainerState.EXITED_WITH_SUCCESS,
-        ContainerEventType.KILL_CONTAINER)
+        EnumSet.of(ContainerEventType.KILL_CONTAINER,
+            ContainerEventType.PAUSE_CONTAINER))
 
     // From EXITED_WITH_FAILURE State
     .addTransition(ContainerState.EXITED_WITH_FAILURE, ContainerState.DONE,
@@ -449,7 +529,8 @@ public class ContainerImpl implements Container {
         UPDATE_DIAGNOSTICS_TRANSITION)
     .addTransition(ContainerState.EXITED_WITH_FAILURE,
                    ContainerState.EXITED_WITH_FAILURE,
-                   ContainerEventType.KILL_CONTAINER)
+        EnumSet.of(ContainerEventType.KILL_CONTAINER,
+            ContainerEventType.PAUSE_CONTAINER))
 
     // From KILLING State.
     .addTransition(ContainerState.KILLING,
@@ -483,7 +564,8 @@ public class ContainerImpl implements Container {
     // in the container launcher
     .addTransition(ContainerState.KILLING,
         ContainerState.KILLING,
-        ContainerEventType.CONTAINER_LAUNCHED)
+        EnumSet.of(ContainerEventType.CONTAINER_LAUNCHED,
+            ContainerEventType.PAUSE_CONTAINER))
 
     // From CONTAINER_CLEANEDUP_AFTER_KILL State.
     .addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
@@ -499,11 +581,13 @@ public class ContainerImpl implements Container {
         EnumSet.of(ContainerEventType.KILL_CONTAINER,
             ContainerEventType.RESOURCE_FAILED,
             ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
-            ContainerEventType.CONTAINER_EXITED_WITH_FAILURE))
+            ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
+            ContainerEventType.PAUSE_CONTAINER))
 
     // From DONE
     .addTransition(ContainerState.DONE, ContainerState.DONE,
-        ContainerEventType.KILL_CONTAINER)
+        EnumSet.of(ContainerEventType.KILL_CONTAINER,
+            ContainerEventType.PAUSE_CONTAINER))
     .addTransition(ContainerState.DONE, ContainerState.DONE,
         ContainerEventType.INIT_CONTAINER)
     .addTransition(ContainerState.DONE, ContainerState.DONE,
@@ -529,6 +613,8 @@ public class ContainerImpl implements Container {
     case LOCALIZING:
     case LOCALIZATION_FAILED:
     case SCHEDULED:
+    case PAUSED:
+    case RESUMING:
       return org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED;
     case RUNNING:
     case RELAUNCHING:
@@ -538,6 +624,7 @@ public class ContainerImpl implements Container {
     case KILLING:
     case CONTAINER_CLEANEDUP_AFTER_KILL:
     case CONTAINER_RESOURCES_CLEANINGUP:
+    case PAUSING:
       return org.apache.hadoop.yarn.api.records.ContainerState.RUNNING;
     case DONE:
     default:
@@ -1480,6 +1567,26 @@ public class ContainerImpl implements Container {
   }
 
   /**
+   * Transitions upon receiving PAUSE_CONTAINER.
+   * - LOCALIZED -> KILLING.
+   * - REINITIALIZING -> KILLING.
+   */
+  @SuppressWarnings("unchecked") // dispatcher not typed
+  static class KillOnPauseTransition implements
+      SingleArcTransition<ContainerImpl, ContainerEvent> {
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public void transition(ContainerImpl container, ContainerEvent event) {
+      // Kill the process/process-grp
+      container.setIsReInitializing(false);
+      container.dispatcher.getEventHandler().handle(
+          new ContainersLauncherEvent(container,
+              ContainersLauncherEventType.CLEANUP_CONTAINER));
+    }
+  }
+
+  /**
    * Transition from KILLING to CONTAINER_CLEANEDUP_AFTER_KILL
    * upon receiving CONTAINER_KILLED_ON_REQUEST.
    */
@@ -1668,6 +1775,57 @@ public class ContainerImpl implements Container {
     }
   }
 
+  /**
+   * Transitions upon receiving PAUSE_CONTAINER.
+   * - RUNNING -> PAUSED
+   */
+  @SuppressWarnings("unchecked") // dispatcher not typed
+  static class PauseContainerTransition implements
+      SingleArcTransition<ContainerImpl, ContainerEvent> {
+    @Override
+    public void transition(ContainerImpl container, ContainerEvent event) {
+      // Pause the process/process-grp if it is supported by the container
+      container.dispatcher.getEventHandler().handle(
+          new ContainersLauncherEvent(container,
+              ContainersLauncherEventType.PAUSE_CONTAINER));
+      ContainerPauseEvent pauseEvent = (ContainerPauseEvent) event;
+      container.addDiagnostics(pauseEvent.getDiagnostic(), "\n");
+    }
+  }
+
+  /**
+   * Transitions upon receiving PAUSED_CONTAINER.
+   */
+  @SuppressWarnings("unchecked") // dispatcher not typed
+  static class PausedContainerTransition implements
+      SingleArcTransition<ContainerImpl, ContainerEvent> {
+    @Override
+    public void transition(ContainerImpl container, ContainerEvent event) {
+      // Container was PAUSED so tell the scheduler
+      container.dispatcher.getEventHandler().handle(
+          new ContainerSchedulerEvent(container,
+              ContainerSchedulerEventType.CONTAINER_PAUSED));
+    }
+  }
+
+  /**
+   * Transitions upon receiving RESUME_CONTAINER.
+   * - PAUSED -> RUNNING
+   */
+  @SuppressWarnings("unchecked") // dispatcher not typed
+  static class ResumeContainerTransition implements
+      SingleArcTransition<ContainerImpl, ContainerEvent> {
+    @Override
+    public void transition(ContainerImpl container, ContainerEvent event) {
+      // Pause the process/process-grp if it is supported by the container
+      container.dispatcher.getEventHandler().handle(
+          new ContainersLauncherEvent(container,
+              ContainersLauncherEventType.RESUME_CONTAINER));
+      ContainerResumeEvent resumeEvent = (ContainerResumeEvent) event;
+      container.addDiagnostics(resumeEvent.getDiagnostic(), "\n");
+    }
+  }
+
   @Override
   public void handle(ContainerEvent event) {
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerPauseEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerPauseEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerPauseEvent.java
new file mode 100644
index 0000000..898304e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerPauseEvent.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+/**
+ * ContainerEvent for ContainerEventType.PAUSE_CONTAINER.
+ */
+public class ContainerPauseEvent extends ContainerEvent {
+
+  private final String diagnostic;
+
+  public ContainerPauseEvent(ContainerId cId,
+      String diagnostic) {
+    super(cId, ContainerEventType.PAUSE_CONTAINER);
+    this.diagnostic = diagnostic;
+  }
+
+  public String getDiagnostic() {
+    return this.diagnostic;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResumeEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResumeEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResumeEvent.java
new file mode 100644
index 0000000..d7c9e9a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResumeEvent.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+/**
+ * ContainerEvent for ContainerEventType.RESUME_CONTAINER.
+ */
+public class ContainerResumeEvent extends ContainerEvent {
+
+  private final String diagnostic;
+
+  public ContainerResumeEvent(ContainerId cId,
+      String diagnostic) {
+    super(cId, ContainerEventType.RESUME_CONTAINER);
+    this.diagnostic = diagnostic;
+  }
+
+  public String getDiagnostic() {
+    return this.diagnostic;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerState.java
index 91d1356..7c3fea8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerState.java
@@ -21,5 +21,6 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
 public enum ContainerState {
   NEW, LOCALIZING, LOCALIZATION_FAILED, SCHEDULED, RUNNING, RELAUNCHING,
   REINITIALIZING, EXITED_WITH_SUCCESS, EXITED_WITH_FAILURE, KILLING,
-  CONTAINER_CLEANEDUP_AFTER_KILL, CONTAINER_RESOURCES_CLEANINGUP, DONE
+  CONTAINER_CLEANEDUP_AFTER_KILL, CONTAINER_RESOURCES_CLEANINGUP, DONE,
+  PAUSING, PAUSED, RESUMING
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 0b599a8..872f666 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
@@ -86,6 +87,7 @@ import org.apache.hadoop.yarn.util.Apps;
 import org.apache.hadoop.yarn.util.AuxiliaryServiceHelper;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.yarn.util.ConverterUtils;
 
 public class ContainerLaunch implements Callable<Integer> {
 
@@ -105,8 +107,10 @@ public class ContainerLaunch implements Callable<Integer> {
   private final Configuration conf;
   private final Context context;
   private final ContainerManagerImpl containerManager;
-  
+
   protected AtomicBoolean containerAlreadyLaunched = new AtomicBoolean(false);
+  protected AtomicBoolean shouldPauseContainer = new AtomicBoolean(false);
+
   protected AtomicBoolean completed = new AtomicBoolean(false);
 
   private volatile boolean killedBeforeStart = false;
@@ -776,6 +780,90 @@ public class ContainerLaunch implements Callable<Integer> {
   }
 
   /**
+   * Pause the container.
+   * Cancels the launch if the container isn't launched yet. Otherwise asks the
+   * executor to pause the container.
+   * @throws IOException in case of errors.
+   */
+  @SuppressWarnings("unchecked") // dispatcher not typed
+  public void pauseContainer() throws IOException {
+    ContainerId containerId = container.getContainerId();
+    String containerIdStr = containerId.toString();
+    LOG.info("Pausing the container " + containerIdStr);
+
+    // The pause event is only handled if the container is in the running state
+    // (the container state machine), so we don't check for
+    // shouldLaunchContainer over here
+
+    if (!shouldPauseContainer.compareAndSet(false, true)) {
+      LOG.info("Container " + containerId + " not paused as "
+          + "resume already called");
+      return;
+    }
+
+    try {
+      // Pause the container
+      exec.pauseContainer(container);
+
+      // PauseContainer is a blocking call. We are here almost means the
+      // container is paused, so send out the event.
+      dispatcher.getEventHandler().handle(new ContainerEvent(
+          containerId,
+          ContainerEventType.CONTAINER_PAUSED));
+    } catch (Exception e) {
+      String message =
+          "Exception when trying to pause container " + containerIdStr
+              + ": " + StringUtils.stringifyException(e);
+      LOG.info(message);
+      container.handle(new ContainerKillEvent(container.getContainerId(),
+          ContainerExitStatus.PREEMPTED, "Container preempted as there was "
+          + " an exception in pausing it."));
+    }
+  }
+
+  /**
+   * Resume the container.
+   * Cancels the launch if the container isn't launched yet. Otherwise asks the
+   * executor to pause the container.
+   * @throws IOException in case of error.
+   */
+  @SuppressWarnings("unchecked") // dispatcher not typed
+  public void resumeContainer() throws IOException {
+    ContainerId containerId = container.getContainerId();
+    String containerIdStr = containerId.toString();
+    LOG.info("Resuming the container " + containerIdStr);
+
+    // The resume event is only handled if the container is in a paused state
+    // so we don't check for the launched flag here.
+
+    // paused flag will be set to true if process already paused
+    boolean alreadyPaused = !shouldPauseContainer.compareAndSet(false, true);
+    if (!alreadyPaused) {
+      LOG.info("Container " + containerIdStr + " not paused."
+          + " No resume necessary");
+      return;
+    }
+
+    // If the container has already started
+    try {
+        exec.resumeContainer(container);
+        // ResumeContainer is a blocking call. We are here almost means the
+        // container is resumed, so send out the event.
+        dispatcher.getEventHandler().handle(new ContainerEvent(
+            containerId,
+            ContainerEventType.CONTAINER_RESUMED));
+    } catch (Exception e) {
+      String message =
+          "Exception when trying to resume container " + containerIdStr
+              + ": " + StringUtils.stringifyException(e);
+      LOG.info(message);
+      container.handle(new ContainerKillEvent(container.getContainerId(),
+          ContainerExitStatus.PREEMPTED, "Container preempted as there was "
+          + " an exception in pausing it."));
+    }
+  }
+
+  /**
    * Loop through for a time-bounded interval waiting to
    * read the process id from a file generated by a running process.
    * @param pidFilePath File from which to read the process id

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
index d4a7bfd..eb6eaf5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.event.Dispatcher;
@@ -41,6 +42,7 @@ import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -170,6 +172,36 @@ public class ContainersLauncher extends AbstractService
               + " with command " + signalEvent.getCommand());
         }
         break;
+      case PAUSE_CONTAINER:
+        ContainerLaunch launchedContainer = running.get(containerId);
+        if (launchedContainer == null) {
+          // Container not launched. So nothing needs to be done.
+          return;
+        }
+
+        // Pause the container
+        try {
+          launchedContainer.pauseContainer();
+        } catch (Exception e) {
+          LOG.info("Got exception while pausing container: " +
+            StringUtils.stringifyException(e));
+        }
+        break;
+      case RESUME_CONTAINER:
+        ContainerLaunch launchCont = running.get(containerId);
+        if (launchCont == null) {
+          // Container not launched. So nothing needs to be done.
+          return;
+        }
+
+        // Resume the container.
+        try {
+          launchCont.resumeContainer();
+        } catch (Exception e) {
+          LOG.info("Got exception while resuming container: " +
+            StringUtils.stringifyException(e));
+        }
+        break;
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEventType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEventType.java
index 380a032..1054e06 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEventType.java
@@ -25,4 +25,7 @@ public enum ContainersLauncherEventType {
   CLEANUP_CONTAINER, // The process(grp) itself.
   CLEANUP_CONTAINER_FOR_REINIT, // The process(grp) itself.
   SIGNAL_CONTAINER,
+  PAUSE_CONTAINER,
+  RESUME_CONTAINER
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerSchedulerEventType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerSchedulerEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerSchedulerEventType.java
index 086cb9b..9ff731f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerSchedulerEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerSchedulerEventType.java
@@ -26,4 +26,5 @@ public enum ContainerSchedulerEventType {
   CONTAINER_COMPLETED,
   // Producer: Node HB response - RM has asked to shed the queue
   SHED_QUEUED_CONTAINERS,
+  CONTAINER_PAUSED
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f796e77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
index 33f4609..8909088 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
@@ -103,6 +103,7 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.ArgumentMatcher;
+import org.mockito.Mockito;
 
 public class TestContainer {
 
@@ -207,6 +208,42 @@ public class TestContainer {
 
   @Test
   @SuppressWarnings("unchecked") // mocked generic
+  public void testContainerPauseAndResume() throws Exception {
+    WrappedContainer wc = null;
+    try {
+      wc = new WrappedContainer(13, 314159265358979L, 4344, "yak");
+      wc.initContainer();
+      wc.localizeResources();
+      int running = metrics.getRunningContainers();
+      wc.launchContainer();
+      assertEquals(running + 1, metrics.getRunningContainers());
+      reset(wc.localizerBus);
+      wc.pauseContainer();
+      assertEquals(ContainerState.PAUSED,
+          wc.c.getContainerState());
+      wc.resumeContainer();
+      assertEquals(ContainerState.RUNNING,
+          wc.c.getContainerState());
+      wc.containerKilledOnRequest();
+      assertEquals(ContainerState.EXITED_WITH_FAILURE,
+          wc.c.getContainerState());
+      assertNull(wc.c.getLocalizedResources());
+      verifyCleanupCall(wc);
+      int failed = metrics.getFailedContainers();
+      wc.containerResourcesCleanup();
+      assertEquals(ContainerState.DONE, wc.c.getContainerState());
+      assertEquals(failed + 1, metrics.getFailedContainers());
+      assertEquals(running, metrics.getRunningContainers());
+    }
+    finally {
+      if (wc != null) {
+        wc.finished();
+      }
+    }
+  }
+
+  @Test
+  @SuppressWarnings("unchecked") // mocked generic
   public void testCleanupOnFailure() throws Exception {
     WrappedContainer wc = null;
     try {
@@ -955,6 +992,8 @@ public class TestContainer {
       NodeStatusUpdater nodeStatusUpdater = mock(NodeStatusUpdater.class);
       when(context.getNodeStatusUpdater()).thenReturn(nodeStatusUpdater);
       ContainerExecutor executor = mock(ContainerExecutor.class);
+      Mockito.doNothing().when(executor).pauseContainer(any(Container.class));
+      Mockito.doNothing().when(executor).resumeContainer(any(Container.class));
       launcher =
           new ContainersLauncher(context, dispatcher, executor, null, null);
       // create a mock ExecutorService, which will not really launch
@@ -1143,6 +1182,18 @@ public class TestContainer {
       drainDispatcherEvents();
     }
 
+    public void pauseContainer() {
+      c.handle(new ContainerPauseEvent(cId,
+          "PauseRequest"));
+      drainDispatcherEvents();
+    }
+
+    public void resumeContainer() {
+      c.handle(new ContainerResumeEvent(cId,
+          "ResumeRequest"));
+      drainDispatcherEvents();
+    }
+
     public void containerKilledOnRequest() {
       int exitCode = ContainerExitStatus.KILLED_BY_RESOURCEMANAGER;
       String diagnosticMsg = "Container completed with exit code " + exitCode;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: HADOOP-14634. Remove jline from main Hadoop pom.xml. Contributed by Ray Chiang.

Posted by as...@apache.org.
HADOOP-14634. Remove jline from main Hadoop pom.xml.
Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09653ea0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09653ea0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09653ea0

Branch: refs/heads/YARN-5972
Commit: 09653ea098a17fddcf111b0da289085915c351d1
Parents: 3de47ab
Author: Steve Loughran <st...@apache.org>
Authored: Mon Jul 10 11:53:13 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Jul 10 11:53:13 2017 +0100

----------------------------------------------------------------------
 hadoop-client-modules/hadoop-client/pom.xml                    | 4 ----
 hadoop-common-project/hadoop-common/pom.xml                    | 4 ----
 hadoop-project/pom.xml                                         | 4 ++++
 .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml       | 6 ------
 4 files changed, 4 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09653ea0/hadoop-client-modules/hadoop-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml
index 629f9fa..a32a2a3 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -163,10 +163,6 @@
           <artifactId>avro</artifactId>
         </exclusion>
         <exclusion>
-          <groupId>jline</groupId>
-          <artifactId>jline</artifactId>
-        </exclusion>
-        <exclusion>
           <groupId>io.netty</groupId>
           <artifactId>netty</artifactId>
         </exclusion>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09653ea0/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index f74290d..3e73bce 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -263,10 +263,6 @@
       <artifactId>zookeeper</artifactId>
       <exclusions>
         <exclusion>
-          <groupId>jline</groupId>
-          <artifactId>jline</artifactId>
-        </exclusion>
-        <exclusion>
           <groupId>org.jboss.netty</groupId>
           <artifactId>netty</artifactId>
         </exclusion>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09653ea0/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index a84070b..3969474 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1024,6 +1024,10 @@
             <groupId>org.jboss.netty</groupId>
             <artifactId>netty</artifactId>
           </exclusion>
+          <exclusion>
+            <groupId>jline</groupId>
+            <artifactId>jline</artifactId>
+          </exclusion>
         </exclusions>
       </dependency>
       <dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09653ea0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index ea0f32e..1ee7110 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -90,12 +90,6 @@
     <dependency>
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>jline</groupId>
-          <artifactId>jline</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.fusesource.leveldbjni</groupId>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: HADOOP-14638. Replace commons-logging APIs with slf4j in StreamPumper.

Posted by as...@apache.org.
HADOOP-14638. Replace commons-logging APIs with slf4j in StreamPumper.

This closes #247

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fce79510
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fce79510
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fce79510

Branch: refs/heads/YARN-5972
Commit: fce795101461cbce37334b0799b2238825f5a5aa
Parents: f1efa14
Author: wenxinhe <we...@gmail.com>
Authored: Mon Jul 10 11:37:48 2017 +0800
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jul 11 13:30:13 2017 +0900

----------------------------------------------------------------------
 .../org/apache/hadoop/ha/PowerShellFencer.java  |  7 +--
 .../apache/hadoop/ha/ShellCommandFencer.java    |  7 ++-
 .../org/apache/hadoop/ha/SshFenceByTcpPort.java |  7 +--
 .../java/org/apache/hadoop/ha/StreamPumper.java |  8 +--
 .../hadoop/ha/TestShellCommandFencer.java       | 55 ++++++++++++++++++--
 5 files changed, 65 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fce79510/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/PowerShellFencer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/PowerShellFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/PowerShellFencer.java
index 761b40a..6de618c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/PowerShellFencer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/PowerShellFencer.java
@@ -25,10 +25,10 @@ import java.io.OutputStreamWriter;
 import java.net.InetSocketAddress;
 import java.nio.charset.StandardCharsets;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Fencer method that uses PowerShell to remotely connect to a machine and kill
@@ -41,7 +41,8 @@ import org.apache.hadoop.util.StringUtils;
  */
 public class PowerShellFencer extends Configured implements FenceMethod {
 
-  private static final Log LOG = LogFactory.getLog(PowerShellFencer.class);
+  private static final Logger LOG = LoggerFactory.getLogger(PowerShellFencer
+      .class);
 
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fce79510/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java
index 15edee9..7e4a88f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java
@@ -21,12 +21,12 @@ import java.io.IOException;
 import java.lang.reflect.Field;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configured;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.util.Shell;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Fencing method that runs a shell command. It should be specified
@@ -61,8 +61,7 @@ public class ShellCommandFencer
   private static final String TARGET_PREFIX = "target_";
 
   @VisibleForTesting
-  static Log LOG = LogFactory.getLog(
-      ShellCommandFencer.class);
+  static Logger LOG = LoggerFactory.getLogger(ShellCommandFencer.class);
 
   @Override
   public void checkArgs(String args) throws BadFencingConfigurationException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fce79510/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
index 5815564..64cd5a8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
@@ -32,6 +32,8 @@ import com.jcraft.jsch.ChannelExec;
 import com.jcraft.jsch.JSch;
 import com.jcraft.jsch.JSchException;
 import com.jcraft.jsch.Session;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This fencing implementation sshes to the target node and uses 
@@ -58,9 +60,8 @@ import com.jcraft.jsch.Session;
 public class SshFenceByTcpPort extends Configured
   implements FenceMethod {
 
-  static final Log LOG = LogFactory.getLog(
-      SshFenceByTcpPort.class);
-  
+  static final Logger LOG = LoggerFactory.getLogger(SshFenceByTcpPort.class);
+
   static final String CONF_CONNECT_TIMEOUT_KEY =
     "dfs.ha.fencing.ssh.connect-timeout";
   private static final int CONF_CONNECT_TIMEOUT_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fce79510/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
index 8018f43..12a24fd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
@@ -17,14 +17,14 @@
  */
 package org.apache.hadoop.ha;
 
+import org.slf4j.Logger;
+
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.nio.charset.StandardCharsets;
 
-import org.apache.commons.logging.Log;
-
 /**
  * Class responsible for pumping the streams of the subprocess
  * out to log4j. stderr is pumped to WARN level and stdout is
@@ -35,7 +35,7 @@ class StreamPumper {
     STDOUT, STDERR;
   }
 
-  private final Log log;
+  private final Logger log;
   
   final Thread thread;
   final String logPrefix;
@@ -43,7 +43,7 @@ class StreamPumper {
   private final InputStream stream;
   private boolean started = false;
   
-  StreamPumper(final Log log, final String logPrefix,
+  StreamPumper(final Logger log, final String logPrefix,
       final InputStream stream, final StreamType type) {
     this.log = log;
     this.logPrefix = logPrefix;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fce79510/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java
index 6750b3b..3a2cf05 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java
@@ -19,30 +19,43 @@ package org.apache.hadoop.ha;
 
 import static org.junit.Assert.*;
 
+import java.lang.reflect.Method;
 import java.net.InetSocketAddress;
+import java.util.List;
 
+import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
+import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
 
-import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.mock;
 
 public class TestShellCommandFencer {
   private ShellCommandFencer fencer = createFencer();
   private static final HAServiceTarget TEST_TARGET =
       new DummyHAService(HAServiceState.ACTIVE,
           new InetSocketAddress("dummyhost", 1234));
-  
+  private static final Logger LOG = ShellCommandFencer.LOG;
+
   @BeforeClass
-  public static void setupLogSpy() {
-    ShellCommandFencer.LOG = spy(ShellCommandFencer.LOG);
+  public static void setupLogMock() {
+    ShellCommandFencer.LOG = mock(Logger.class, new LogAnswer());
   }
-  
+
+  @AfterClass
+  public static void tearDownLogMock() throws Exception {
+    ShellCommandFencer.LOG = LOG;
+  }
+
   @Before
   public void resetLogSpy() {
     Mockito.reset(ShellCommandFencer.LOG);
@@ -173,4 +186,36 @@ public class TestShellCommandFencer {
     assertEquals("a...gh", ShellCommandFencer.abbreviate("abcdefgh", 6));
     assertEquals("ab...gh", ShellCommandFencer.abbreviate("abcdefgh", 7));
   }
+
+  /**
+   * An answer simply delegate some basic log methods to real LOG.
+   */
+  private static class LogAnswer implements Answer {
+
+    private static final List<String> DELEGATE_METHODS = Lists.asList("error",
+        new String[]{"warn", "info", "debug", "trace"});
+
+    @Override
+    public Object answer(InvocationOnMock invocation) {
+
+      String methodName = invocation.getMethod().getName();
+
+      if (!DELEGATE_METHODS.contains(methodName)) {
+        return null;
+      }
+
+      try {
+        String msg = invocation.getArguments()[0].toString();
+        Method delegateMethod = LOG.getClass().getMethod(methodName,
+            msg.getClass());
+        delegateMethod.invoke(LOG, msg);
+      } catch (Throwable e) {
+        throw new IllegalStateException(
+            "Unsupported delegate method: " + methodName);
+      }
+
+      return null;
+    }
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: YARN-6764. Simplify the logic in FairScheduler#attemptScheduling. Contributed by Yufei Gu.

Posted by as...@apache.org.
YARN-6764. Simplify the logic in FairScheduler#attemptScheduling. Contributed by Yufei Gu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3de47ab5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3de47ab5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3de47ab5

Branch: refs/heads/YARN-5972
Commit: 3de47ab5ea5cb75805a03010dc72e11b0cf6c173
Parents: ba5b056
Author: Yufei Gu <yu...@apache.org>
Authored: Sun Jul 9 16:08:37 2017 -0700
Committer: Yufei Gu <yu...@apache.org>
Committed: Sun Jul 9 16:09:12 2017 -0700

----------------------------------------------------------------------
 .../resourcemanager/scheduler/fair/FairScheduler.java    | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3de47ab5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index d779159..b41d3f7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1046,16 +1046,13 @@ public class FairScheduler extends
         Resource maxResourcesToAssign = Resources.multiply(
             node.getUnallocatedResource(), 0.5f);
         while (node.getReservedContainer() == null) {
-          boolean assignedContainer = false;
           Resource assignment = queueMgr.getRootQueue().assignContainer(node);
-          if (!assignment.equals(Resources.none())) {
-            assignedContainers++;
-            assignedContainer = true;
-            Resources.addTo(assignedResource, assignment);
-          }
-          if (!assignedContainer) {
+          if (assignment.equals(Resources.none())) {
             break;
           }
+
+          assignedContainers++;
+          Resources.addTo(assignedResource, assignment);
           if (!shouldContinueAssigning(assignedContainers, maxResourcesToAssign,
               assignedResource)) {
             break;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: YARN-5216. Expose configurable preemption policy for OPPORTUNISTIC containers running on the NM. (Hitesh Sharma via asuresh)

Posted by as...@apache.org.
YARN-5216. Expose configurable preemption policy for OPPORTUNISTIC containers running on the NM. (Hitesh Sharma via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66b01b34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66b01b34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66b01b34

Branch: refs/heads/YARN-5972
Commit: 66b01b3437e9c027c34c31e3039cc1958433fa5d
Parents: 0f796e77
Author: Arun Suresh <as...@apache.org>
Authored: Sat Dec 24 17:16:52 2016 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Tue Jul 11 21:51:32 2017 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |   9 ++
 .../src/main/resources/yarn-default.xml         |   9 ++
 .../containermanager/container/Container.java   |   2 +
 .../container/ContainerImpl.java                |  32 ++++--
 .../scheduler/ContainerScheduler.java           |  84 ++++++++++++---
 .../TestContainerSchedulerQueuing.java          | 103 +++++++++++++++++++
 .../nodemanager/webapp/MockContainer.java       |   5 +
 7 files changed, 219 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66b01b34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a6d3360..e4282ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1003,6 +1003,15 @@ public class YarnConfiguration extends Configuration {
       NM_PREFIX + "container-retry-minimum-interval-ms";
   public static final int DEFAULT_NM_CONTAINER_RETRY_MINIMUM_INTERVAL_MS = 1000;
 
+  /**
+   * Use container pause as the preemption policy over kill in the container
+   * queue at a NodeManager.
+   **/
+  public static final String NM_CONTAINER_QUEUING_USE_PAUSE_FOR_PREEMPTION =
+      NM_PREFIX + "opportunistic-containers-use-pause-for-preemption";
+  public static final boolean
+      DEFAULT_NM_CONTAINER_QUEUING_USE_PAUSE_FOR_PREEMPTION = false;
+
   /** Interval at which the delayed token removal thread runs */
   public static final String RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS =
       RM_PREFIX + "delayed.delegation-token.removal-interval-ms";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66b01b34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 81c9cb2..b4ddab8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2922,6 +2922,15 @@
 
   <property>
     <description>
+    Use container pause as the preemption policy over kill in the container
+    queue at a NodeManager.
+    </description>
+    <name>yarn.nodemanager.opportunistic-containers-use-pause-for-preemption</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
     Error filename pattern, to identify the file in the container's
     Log directory which contain the container's error log. As error file
     redirection is done by client/AM and yarn will not be aware of the error

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66b01b34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
index bd3f06d..3a59fd8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
@@ -94,4 +94,6 @@ public interface Container extends EventHandler<ContainerEvent> {
   void sendKillEvent(int exitStatus, String description);
 
   boolean isRecovering();
+
+  void sendPauseEvent(String description);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66b01b34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 44068be..1969704 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -800,15 +800,22 @@ public class ContainerImpl implements Container {
   @SuppressWarnings("unchecked") // dispatcher not typed
   @Override
   public void sendLaunchEvent() {
-    ContainersLauncherEventType launcherEvent =
-        ContainersLauncherEventType.LAUNCH_CONTAINER;
-    if (recoveredStatus == RecoveredContainerStatus.LAUNCHED) {
-      // try to recover a container that was previously launched
-      launcherEvent = ContainersLauncherEventType.RECOVER_CONTAINER;
+    if (ContainerState.PAUSED == getContainerState()) {
+      dispatcher.getEventHandler().handle(
+          new ContainerResumeEvent(containerId,
+              "Container Resumed as some resources freed up"));
+    } else {
+      ContainersLauncherEventType launcherEvent =
+          ContainersLauncherEventType.LAUNCH_CONTAINER;
+      if (recoveredStatus == RecoveredContainerStatus.LAUNCHED) {
+        // try to recover a container that was previously launched
+        launcherEvent = ContainersLauncherEventType.RECOVER_CONTAINER;
+      }
+      containerLaunchStartTime = clock.getTime();
+      dispatcher.getEventHandler().handle(
+          new ContainersLauncherEvent(this, launcherEvent));
     }
-    containerLaunchStartTime = clock.getTime();
-    dispatcher.getEventHandler().handle(
-        new ContainersLauncherEvent(this, launcherEvent));
+
   }
 
   @SuppressWarnings("unchecked") // dispatcher not typed
@@ -828,6 +835,13 @@ public class ContainerImpl implements Container {
   }
 
   @SuppressWarnings("unchecked") // dispatcher not typed
+  @Override
+  public void sendPauseEvent(String description) {
+    dispatcher.getEventHandler().handle(
+        new ContainerPauseEvent(containerId, description));
+  }
+
+  @SuppressWarnings("unchecked") // dispatcher not typed
   private void sendRelaunchEvent() {
     ContainersLauncherEventType launcherEvent =
         ContainersLauncherEventType.RELAUNCH_CONTAINER;
@@ -1777,7 +1791,7 @@ public class ContainerImpl implements Container {
 
   /**
    * Transitions upon receiving PAUSE_CONTAINER.
-   * - RUNNING -> PAUSED
+   * - RUNNING -> PAUSING
    */
   @SuppressWarnings("unchecked") // dispatcher not typed
   static class PauseContainerTransition implements

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66b01b34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index 24530b3..3c1eb6e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -31,6 +32,7 @@ import org.apache.hadoop.yarn.server.api.records.ContainerQueuingLimit;
 import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
 
 
@@ -71,7 +73,7 @@ public class ContainerScheduler extends AbstractService implements
       queuedOpportunisticContainers = new LinkedHashMap<>();
 
   // Used to keep track of containers that have been marked to be killed
-  // to make room for a guaranteed container.
+  // or paused to make room for a guaranteed container.
   private final Map<ContainerId, Container> oppContainersToKill =
       new HashMap<>();
 
@@ -95,6 +97,8 @@ public class ContainerScheduler extends AbstractService implements
   private final AsyncDispatcher dispatcher;
   private final NodeManagerMetrics metrics;
 
+  private Boolean usePauseEventForPreemption = false;
+
   /**
    * Instantiate a Container Scheduler.
    * @param context NodeManager Context.
@@ -109,6 +113,17 @@ public class ContainerScheduler extends AbstractService implements
             DEFAULT_NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH));
   }
 
+
+  @Override
+  public void serviceInit(Configuration conf) throws Exception {
+    super.serviceInit(conf);
+    this.usePauseEventForPreemption =
+        conf.getBoolean(
+            YarnConfiguration.NM_CONTAINER_QUEUING_USE_PAUSE_FOR_PREEMPTION,
+            YarnConfiguration.
+                DEFAULT_NM_CONTAINER_QUEUING_USE_PAUSE_FOR_PREEMPTION);
+  }
+
   @VisibleForTesting
   public ContainerScheduler(Context context, AsyncDispatcher dispatcher,
       NodeManagerMetrics metrics, int qLength) {
@@ -133,8 +148,9 @@ public class ContainerScheduler extends AbstractService implements
     case SCHEDULE_CONTAINER:
       scheduleContainer(event.getContainer());
       break;
+    case CONTAINER_PAUSED:
     case CONTAINER_COMPLETED:
-      onContainerCompleted(event.getContainer());
+      onResourcesReclaimed(event.getContainer());
       break;
     case SHED_QUEUED_CONTAINERS:
       shedQueuedOpportunisticContainers();
@@ -164,6 +180,12 @@ public class ContainerScheduler extends AbstractService implements
     return this.queuedOpportunisticContainers.size();
   }
 
+  @VisibleForTesting
+  public void setUsePauseEventForPreemption(
+      boolean usePauseEventForPreemption) {
+    this.usePauseEventForPreemption = usePauseEventForPreemption;
+  }
+
   public OpportunisticContainersStatus getOpportunisticContainersStatus() {
     this.opportunisticContainersStatus.setQueuedOpportContainers(
         getNumQueuedOpportunisticContainers());
@@ -178,7 +200,7 @@ public class ContainerScheduler extends AbstractService implements
     return this.opportunisticContainersStatus;
   }
 
-  private void onContainerCompleted(Container container) {
+  private void onResourcesReclaimed(Container container) {
     oppContainersToKill.remove(container.getContainerId());
 
     // This could be killed externally for eg. by the ContainerManager,
@@ -206,6 +228,24 @@ public class ContainerScheduler extends AbstractService implements
     // Start pending guaranteed containers, if resources available.
     boolean resourcesAvailable =
         startContainersFromQueue(queuedGuaranteedContainers.values());
+    // Resume opportunistic containers, if resource available.
+    if (resourcesAvailable) {
+      List<Container> pausedContainers = new ArrayList<Container>();
+      Map<ContainerId, Container> containers =
+          context.getContainers();
+      for (Map.Entry<ContainerId, Container>entry : containers.entrySet()) {
+        ContainerId contId = entry.getKey();
+        // Find containers that were not already started and are in paused state
+        if(false == runningContainers.containsKey(contId)) {
+          if(containers.get(contId).getContainerState()
+              == ContainerState.PAUSED) {
+            pausedContainers.add(containers.get(contId));
+          }
+        }
+      }
+      resourcesAvailable =
+          startContainersFromQueue(pausedContainers);
+    }
     // Start opportunistic containers, if resources available.
     if (resourcesAvailable) {
       startContainersFromQueue(queuedOpportunisticContainers.values());
@@ -245,9 +285,9 @@ public class ContainerScheduler extends AbstractService implements
       if (container.getContainerTokenIdentifier().getExecutionType() ==
           ExecutionType.GUARANTEED) {
         queuedGuaranteedContainers.put(container.getContainerId(), container);
-        // Kill running opportunistic containers to make space for
+        // Kill / Pause running opportunistic containers to make space for
         // guaranteed container.
-        killOpportunisticContainers(container);
+        reclaimOpportunisticContainerResources(container);
       } else {
         if (queuedOpportunisticContainers.size() <= maxOppQueueLength) {
           LOG.info("Opportunistic container {} will be queued at the NM.",
@@ -276,22 +316,34 @@ public class ContainerScheduler extends AbstractService implements
     }
   }
 
-  private void killOpportunisticContainers(Container container) {
-    List<Container> extraOpportContainersToKill =
-        pickOpportunisticContainersToKill(container.getContainerId());
+  @SuppressWarnings("unchecked")
+  private void reclaimOpportunisticContainerResources(Container container) {
+    List<Container> extraOppContainersToReclaim =
+        pickOpportunisticContainersToReclaimResources(
+            container.getContainerId());
     // Kill the opportunistic containers that were chosen.
-    for (Container contToKill : extraOpportContainersToKill) {
-      contToKill.sendKillEvent(
-          ContainerExitStatus.KILLED_BY_CONTAINER_SCHEDULER,
-          "Container Killed to make room for Guaranteed Container.");
-      oppContainersToKill.put(contToKill.getContainerId(), contToKill);
+    for (Container contToReclaim : extraOppContainersToReclaim) {
+      String preemptionAction = usePauseEventForPreemption == true ? "paused" :
+          "resumed";
       LOG.info(
-          "Opportunistic container {} will be killed in order to start the "
+          "Container {} will be {} to start the "
               + "execution of guaranteed container {}.",
-          contToKill.getContainerId(), container.getContainerId());
+          contToReclaim.getContainerId(), preemptionAction,
+          container.getContainerId());
+
+      if (usePauseEventForPreemption) {
+        contToReclaim.sendPauseEvent(
+            "Container Paused to make room for Guaranteed Container");
+      } else {
+        contToReclaim.sendKillEvent(
+            ContainerExitStatus.KILLED_BY_CONTAINER_SCHEDULER,
+            "Container Killed to make room for Guaranteed Container.");
+      }
+      oppContainersToKill.put(contToReclaim.getContainerId(), contToReclaim);
     }
   }
 
+  @SuppressWarnings("unchecked")
   private void startAllocatedContainer(Container container) {
     LOG.info("Starting container [" + container.getContainerId()+ "]");
     runningContainers.put(container.getContainerId(), container);
@@ -303,7 +355,7 @@ public class ContainerScheduler extends AbstractService implements
     container.sendLaunchEvent();
   }
 
-  private List<Container> pickOpportunisticContainersToKill(
+  private List<Container> pickOpportunisticContainersToReclaimResources(
       ContainerId containerToStartId) {
     // The opportunistic containers that need to be killed for the
     // given container to start.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66b01b34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index 8264f2e..5999488 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -23,6 +23,8 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
@@ -47,6 +49,7 @@ import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl;
@@ -121,18 +124,38 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
   @Override
   protected ContainerExecutor createContainerExecutor() {
     DefaultContainerExecutor exec = new DefaultContainerExecutor() {
+      ConcurrentMap<String, Boolean> oversleepMap =
+          new ConcurrentHashMap<String, Boolean>();
       @Override
       public int launchContainer(ContainerStartContext ctx)
           throws IOException, ConfigurationException {
+        oversleepMap.put(ctx.getContainer().getContainerId().toString(), false);
         if (delayContainers) {
           try {
             Thread.sleep(10000);
+            if(oversleepMap.get(ctx.getContainer().getContainerId().toString())
+                == true) {
+              Thread.sleep(10000);
+            }
           } catch (InterruptedException e) {
             // Nothing..
           }
         }
         return super.launchContainer(ctx);
       }
+
+      @Override
+      public void pauseContainer(Container container) {
+        // To mimic pausing we force the container to be in the PAUSED state
+        // a little longer by oversleeping.
+        oversleepMap.put(container.getContainerId().toString(), true);
+        LOG.info("Container was paused");
+      }
+
+      @Override
+      public void resumeContainer(Container container) {
+        LOG.info("Container was resumed");
+      }
     };
     exec.setConf(conf);
     return spy(exec);
@@ -418,6 +441,86 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
   }
 
   /**
+   * Submit two OPPORTUNISTIC and one GUARANTEED containers. The resources
+   * requests by each container as such that only one can run in parallel.
+   * Thus, the OPPORTUNISTIC container that started running, will be
+   * paused for the GUARANTEED container to start.
+   * Once the GUARANTEED container finishes its execution, the remaining
+   * OPPORTUNISTIC container will be executed.
+   * @throws Exception
+   */
+  @Test
+  public void testPauseOpportunisticForGuaranteedContainer() throws Exception {
+    containerManager.start();
+    containerManager.getContainerScheduler().
+        setUsePauseEventForPreemption(true);
+    ContainerLaunchContext containerLaunchContext =
+        recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+    List<StartContainerRequest> list = new ArrayList<>();
+    list.add(StartContainerRequest.newInstance(
+        containerLaunchContext,
+        createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
+            context.getNodeId(),
+            user, BuilderUtils.newResource(2048, 1),
+            context.getContainerTokenSecretManager(), null,
+            ExecutionType.OPPORTUNISTIC)));
+
+    StartContainersRequest allRequests =
+        StartContainersRequest.newInstance(list);
+    containerManager.startContainers(allRequests);
+
+    BaseContainerManagerTest.waitForNMContainerState(containerManager,
+        createContainerId(0), ContainerState.RUNNING, 40);
+
+    list = new ArrayList<>();
+    list.add(StartContainerRequest.newInstance(
+        containerLaunchContext,
+        createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
+            context.getNodeId(),
+            user, BuilderUtils.newResource(2048, 1),
+            context.getContainerTokenSecretManager(), null,
+            ExecutionType.GUARANTEED)));
+    allRequests =
+        StartContainersRequest.newInstance(list);
+
+    containerManager.startContainers(allRequests);
+
+    BaseContainerManagerTest.waitForNMContainerState(containerManager,
+        createContainerId(1), ContainerState.RUNNING, 40);
+
+    // Get container statuses. Container 0 should be paused, container 1
+    // should be running.
+    List<ContainerId> statList = new ArrayList<ContainerId>();
+    for (int i = 0; i < 2; i++) {
+      statList.add(createContainerId(i));
+    }
+    GetContainerStatusesRequest statRequest =
+        GetContainerStatusesRequest.newInstance(statList);
+    List<ContainerStatus> containerStatuses = containerManager
+        .getContainerStatuses(statRequest).getContainerStatuses();
+    for (ContainerStatus status : containerStatuses) {
+      if (status.getContainerId().equals(createContainerId(0))) {
+        Assert.assertTrue(status.getDiagnostics().contains(
+            "Container Paused to make room for Guaranteed Container"));
+      } else if (status.getContainerId().equals(createContainerId(1))) {
+        Assert.assertEquals(
+            org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
+            status.getState());
+      }
+      System.out.println("\nStatus : [" + status + "]\n");
+    }
+
+    // Make sure that the GUARANTEED container completes
+    BaseContainerManagerTest.waitForNMContainerState(containerManager,
+        createContainerId(1), ContainerState.DONE, 40);
+    // Make sure that the PAUSED opportunistic container resumes and
+    // starts running
+    BaseContainerManagerTest.waitForNMContainerState(containerManager,
+        createContainerId(0), ContainerState.DONE, 40);
+  }
+
+  /**
    * 1. Submit a long running GUARANTEED container to hog all NM resources.
    * 2. Submit 6 OPPORTUNISTIC containers, all of which will be queued.
    * 3. Update the Queue Limit to 2.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66b01b34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
index 022baea..bcdde76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
@@ -235,4 +235,9 @@ public class MockContainer implements Container {
   public boolean isRecovering() {
     return false;
   }
+
+  @Override
+  public void sendPauseEvent(String description) {
+
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: YARN-5067 Support specifying resources for AM containers in SLS. (Yufei Gu via Haibo Chen)

Posted by as...@apache.org.
YARN-5067 Support specifying resources for AM containers in SLS. (Yufei Gu via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/147df300
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/147df300
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/147df300

Branch: refs/heads/YARN-5972
Commit: 147df300bf00b5f4ed250426b6ccdd69085466da
Parents: 38996fd
Author: Haibo Chen <ha...@apache.org>
Authored: Fri Jun 30 16:50:06 2017 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Fri Jun 30 17:03:44 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 38 +++++++++++++++----
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  | 39 +++++++-------------
 .../yarn/sls/appmaster/MRAMSimulator.java       | 11 +++---
 .../hadoop/yarn/sls/conf/SLSConfiguration.java  | 15 ++++++++
 .../yarn/sls/appmaster/TestAMSimulator.java     |  4 +-
 5 files changed, 68 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/147df300/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 02da056..a534f03 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -406,7 +406,7 @@ public class SLSRunner extends Configured implements Tool {
     }
 
     runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime,
-        getTaskContainers(jsonJob), null);
+        getTaskContainers(jsonJob), null, getAMContainerResource(jsonJob));
   }
 
   private List<ContainerSimulator> getTaskContainers(Map jsonJob)
@@ -558,7 +558,8 @@ public class SLSRunner extends Configured implements Tool {
 
     // Only supports the default job type currently
     runNewAM(SLSUtils.DEFAULT_JOB_TYPE, user, jobQueue, oldJobId,
-        jobStartTimeMS, jobFinishTimeMS, containerList, null);
+        jobStartTimeMS, jobFinishTimeMS, containerList, null,
+        getAMContainerResource(null));
   }
 
   private Resource getDefaultContainerResource() {
@@ -676,7 +677,8 @@ public class SLSRunner extends Configured implements Tool {
         }
 
         runNewAM(SLSUtils.DEFAULT_JOB_TYPE, user, jobQueue, oldJobId,
-            jobStartTimeMS, jobFinishTimeMS, containerList, rr);
+            jobStartTimeMS, jobFinishTimeMS, containerList, rr,
+            getAMContainerResource(null));
       }
     } finally {
       stjp.close();
@@ -684,6 +686,26 @@ public class SLSRunner extends Configured implements Tool {
 
   }
 
+  private Resource getAMContainerResource(Map jsonJob) {
+    Resource amContainerResource =
+        SLSConfiguration.getAMContainerResource(getConf());
+
+    if (jsonJob == null) {
+      return amContainerResource;
+    }
+
+    if (jsonJob.containsKey("am.memory")) {
+      amContainerResource.setMemorySize(
+          Long.parseLong(jsonJob.get("am.memory").toString()));
+    }
+
+    if (jsonJob.containsKey("am.vcores")) {
+      amContainerResource.setVirtualCores(
+          Integer.parseInt(jsonJob.get("am.vcores").toString()));
+    }
+    return amContainerResource;
+  }
+
   private void increaseQueueAppNum(String queue) throws YarnException {
     SchedulerWrapper wrapper = (SchedulerWrapper)rm.getResourceScheduler();
     String queueName = wrapper.getRealQueueName(queue);
@@ -700,7 +722,7 @@ public class SLSRunner extends Configured implements Tool {
   private void runNewAM(String jobType, String user,
       String jobQueue, String oldJobId, long jobStartTimeMS,
       long jobFinishTimeMS, List<ContainerSimulator> containerList,
-      ReservationSubmissionRequest rr) {
+      ReservationSubmissionRequest rr, Resource amContainerResource) {
 
     AMSimulator amSim = (AMSimulator) ReflectionUtils.newInstance(
         amClassMap.get(jobType), new Configuration());
@@ -710,9 +732,11 @@ public class SLSRunner extends Configured implements Tool {
           SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS,
           SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS_DEFAULT);
       boolean isTracked = trackedApps.contains(oldJobId);
-      amSim.init(AM_ID++, heartbeatInterval, containerList,
-          rm, this, jobStartTimeMS, jobFinishTimeMS, user, jobQueue,
-          isTracked, oldJobId, rr, runner.getStartTimeMS());
+      AM_ID++;
+
+      amSim.init(heartbeatInterval, containerList, rm, this, jobStartTimeMS,
+          jobFinishTimeMS, user, jobQueue, isTracked, oldJobId, rr,
+          runner.getStartTimeMS(), amContainerResource);
       runner.schedule(amSim);
       maxRuntime = Math.max(maxRuntime, jobFinishTimeMS);
       numTasks += containerList.size();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/147df300/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
index 7ce3ef0..c69805e 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.sls.appmaster;
 
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
-import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -35,18 +34,13 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
-import org.apache.hadoop.yarn.api.protocolrecords
-        .FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
-
-import org.apache.hadoop.yarn.api.protocolrecords
-        .RegisterApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.protocolrecords
-        .RegisterApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
@@ -54,7 +48,6 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -66,7 +59,6 @@ import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.sls.scheduler.SchedulerMetrics;
 import org.apache.hadoop.yarn.util.Records;
-import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
 import org.apache.hadoop.yarn.sls.scheduler.SchedulerWrapper;
 import org.apache.hadoop.yarn.sls.SLSRunner;
@@ -116,9 +108,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
   
   private static final Logger LOG = LoggerFactory.getLogger(AMSimulator.class);
 
-  // resource for AM container
-  private final static int MR_AM_CONTAINER_RESOURCE_MEMORY_MB = 1024;
-  private final static int MR_AM_CONTAINER_RESOURCE_VCORES = 1;
+  private Resource amContainerResource;
 
   private ReservationSubmissionRequest reservationRequest;
 
@@ -127,11 +117,12 @@ public abstract class AMSimulator extends TaskRunner.Task {
   }
 
   @SuppressWarnings("checkstyle:parameternumber")
-  public void init(int id, int heartbeatInterval,
+  public void init(int heartbeatInterval,
       List<ContainerSimulator> containerList, ResourceManager resourceManager,
       SLSRunner slsRunnner, long startTime, long finishTime, String simUser,
       String simQueue, boolean tracked, String oldApp,
-      ReservationSubmissionRequest rr, long baseTimeMS) {
+      ReservationSubmissionRequest rr, long baseTimeMS,
+      Resource amContainerResource) {
     super.init(startTime, startTime + 1000000L * heartbeatInterval,
         heartbeatInterval);
     this.user = simUser;
@@ -144,6 +135,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
     this.traceStartTimeMS = startTime;
     this.traceFinishTimeMS = finishTime;
     this.reservationRequest = rr;
+    this.amContainerResource = amContainerResource;
   }
 
   /**
@@ -318,16 +310,13 @@ public abstract class AMSimulator extends TaskRunner.Task {
     appSubContext.setPriority(Priority.newInstance(0));
     ContainerLaunchContext conLauContext = 
         Records.newRecord(ContainerLaunchContext.class);
-    conLauContext.setApplicationACLs(
-        new HashMap<ApplicationAccessType, String>());
-    conLauContext.setCommands(new ArrayList<String>());
-    conLauContext.setEnvironment(new HashMap<String, String>());
-    conLauContext.setLocalResources(new HashMap<String, LocalResource>());
-    conLauContext.setServiceData(new HashMap<String, ByteBuffer>());
+    conLauContext.setApplicationACLs(new HashMap<>());
+    conLauContext.setCommands(new ArrayList<>());
+    conLauContext.setEnvironment(new HashMap<>());
+    conLauContext.setLocalResources(new HashMap<>());
+    conLauContext.setServiceData(new HashMap<>());
     appSubContext.setAMContainerSpec(conLauContext);
-    appSubContext.setResource(Resources
-        .createResource(MR_AM_CONTAINER_RESOURCE_MEMORY_MB,
-            MR_AM_CONTAINER_RESOURCE_VCORES));
+    appSubContext.setResource(amContainerResource);
 
     if(reservationId != null) {
       appSubContext.setReservationID(reservationId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/147df300/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
index 7ac30ab..21bf054 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -114,14 +115,14 @@ public class MRAMSimulator extends AMSimulator {
       LoggerFactory.getLogger(MRAMSimulator.class);
 
   @SuppressWarnings("checkstyle:parameternumber")
-  public void init(int id, int heartbeatInterval,
+  public void init(int heartbeatInterval,
       List<ContainerSimulator> containerList, ResourceManager rm, SLSRunner se,
       long traceStartTime, long traceFinishTime, String user, String queue, 
       boolean isTracked, String oldAppId, ReservationSubmissionRequest rr,
-      long baselineStartTimeMS) {
-    super.init(id, heartbeatInterval, containerList, rm, se, 
-              traceStartTime, traceFinishTime, user, queue,
-              isTracked, oldAppId, rr, baselineStartTimeMS);
+      long baselineStartTimeMS, Resource amContainerResource) {
+    super.init(heartbeatInterval, containerList, rm, se,
+        traceStartTime, traceFinishTime, user, queue, isTracked, oldAppId,
+        rr, baselineStartTimeMS, amContainerResource);
     amtype = "mapreduce";
     
     // get map/reduce tasks

http://git-wip-us.apache.org/repos/asf/hadoop/blob/147df300/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
index 8fd5b3f..038f202 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.sls.conf;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.Resource;
 
 @Private
 @Unstable
@@ -62,6 +64,14 @@ public class SLSConfiguration {
   public static final int AM_HEARTBEAT_INTERVAL_MS_DEFAULT = 1000;
   public static final String AM_TYPE = AM_PREFIX + "type.";
 
+  public static final String AM_CONTAINER_MEMORY = AM_PREFIX +
+      "container.memory";
+  public static final int AM_CONTAINER_MEMORY_DEFAULT = 1024;
+
+  public static final String AM_CONTAINER_VCORES = AM_PREFIX +
+      "container.vcores";
+  public static final int AM_CONTAINER_VCORES_DEFAULT = 1;
+
   // container
   public static final String CONTAINER_PREFIX = PREFIX + "container.";
   public static final String CONTAINER_MEMORY_MB = CONTAINER_PREFIX
@@ -70,4 +80,9 @@ public class SLSConfiguration {
   public static final String CONTAINER_VCORES = CONTAINER_PREFIX + "vcores";
   public static final int CONTAINER_VCORES_DEFAULT = 1;
 
+  public static Resource getAMContainerResource(Configuration conf) {
+    return Resource.newInstance(
+        conf.getLong(AM_CONTAINER_MEMORY, AM_CONTAINER_MEMORY_DEFAULT),
+        conf.getInt(AM_CONTAINER_VCORES, AM_CONTAINER_VCORES_DEFAULT));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/147df300/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
index 56aa219..02dc26e 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
@@ -133,8 +133,8 @@ public class TestAMSimulator {
     String appId = "app1";
     String queue = "default";
     List<ContainerSimulator> containers = new ArrayList<>();
-    app.init(1, 1000, containers, rm, null, 0, 1000000L, "user1", queue,
-        true, appId, null, 0);
+    app.init(1000, containers, rm, null, 0, 1000000L, "user1", queue, true,
+        appId, null, 0, SLSConfiguration.getAMContainerResource(conf));
     app.firstStep();
 
     verifySchedulerMetrics(appId);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HADOOP-14563. LoadBalancingKMSClientProvider#warmUpEncryptedKeys swallows IOException. Contributed by Rushabh S Shah.

Posted by as...@apache.org.
HADOOP-14563. LoadBalancingKMSClientProvider#warmUpEncryptedKeys swallows IOException. Contributed by Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8153fe2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8153fe2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8153fe2b

Branch: refs/heads/YARN-5972
Commit: 8153fe2bd35fb4df0b64f93ac0046e34d4807ac3
Parents: 82cb2a6
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Jul 7 06:13:10 2017 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Jul 7 06:13:10 2017 -0700

----------------------------------------------------------------------
 .../key/kms/LoadBalancingKMSClientProvider.java | 12 +++-
 .../kms/TestLoadBalancingKMSClientProvider.java | 63 ++++++++++++++++++++
 2 files changed, 74 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8153fe2b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index aa24993..de9c988 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -39,6 +39,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 
 /**
  * A simple LoadBalancing KMSClientProvider that round-robins requests
@@ -159,15 +160,24 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
   // This request is sent to all providers in the load-balancing group
   @Override
   public void warmUpEncryptedKeys(String... keyNames) throws IOException {
+    Preconditions.checkArgument(providers.length > 0,
+        "No providers are configured");
+    boolean success = false;
+    IOException e = null;
     for (KMSClientProvider provider : providers) {
       try {
         provider.warmUpEncryptedKeys(keyNames);
+        success = true;
       } catch (IOException ioe) {
+        e = ioe;
         LOG.error(
             "Error warming up keys for provider with url"
-            + "[" + provider.getKMSUrl() + "]");
+            + "[" + provider.getKMSUrl() + "]", ioe);
       }
     }
+    if (!success && e != null) {
+      throw e;
+    }
   }
 
   // This request is sent to all providers in the load-balancing group

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8153fe2b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
index 499b991..d14dd59 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider.Options;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -257,4 +258,66 @@ public class TestLoadBalancingKMSClientProvider {
           "AuthenticationException"));
     }
   }
+
+  /**
+   * tests {@link LoadBalancingKMSClientProvider#warmUpEncryptedKeys(String...)}
+   * error handling in case when all the providers throws {@link IOException}.
+   * @throws Exception
+   */
+  @Test
+  public void testWarmUpEncryptedKeysWhenAllProvidersFail() throws Exception {
+    Configuration conf = new Configuration();
+    KMSClientProvider p1 = mock(KMSClientProvider.class);
+    String keyName = "key1";
+    Mockito.doThrow(new IOException(new AuthorizationException("p1"))).when(p1)
+        .warmUpEncryptedKeys(Mockito.anyString());
+    KMSClientProvider p2 = mock(KMSClientProvider.class);
+    Mockito.doThrow(new IOException(new AuthorizationException("p2"))).when(p2)
+        .warmUpEncryptedKeys(Mockito.anyString());
+
+    when(p1.getKMSUrl()).thenReturn("p1");
+    when(p2.getKMSUrl()).thenReturn("p2");
+
+    LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+        new KMSClientProvider[] {p1, p2}, 0, conf);
+    try {
+      kp.warmUpEncryptedKeys(keyName);
+      fail("Should fail since both providers threw IOException");
+    } catch (Exception e) {
+      assertTrue(e.getCause() instanceof IOException);
+    }
+    Mockito.verify(p1, Mockito.times(1)).warmUpEncryptedKeys(keyName);
+    Mockito.verify(p2, Mockito.times(1)).warmUpEncryptedKeys(keyName);
+  }
+
+  /**
+   * tests {@link LoadBalancingKMSClientProvider#warmUpEncryptedKeys(String...)}
+   * error handling in case atleast one provider succeeds.
+   * @throws Exception
+   */
+  @Test
+  public void testWarmUpEncryptedKeysWhenOneProviderSucceeds()
+      throws Exception {
+    Configuration conf = new Configuration();
+    KMSClientProvider p1 = mock(KMSClientProvider.class);
+    String keyName = "key1";
+    Mockito.doThrow(new IOException(new AuthorizationException("p1"))).when(p1)
+        .warmUpEncryptedKeys(Mockito.anyString());
+    KMSClientProvider p2 = mock(KMSClientProvider.class);
+    Mockito.doNothing().when(p2)
+        .warmUpEncryptedKeys(Mockito.anyString());
+
+    when(p1.getKMSUrl()).thenReturn("p1");
+    when(p2.getKMSUrl()).thenReturn("p2");
+
+    LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+        new KMSClientProvider[] {p1, p2}, 0, conf);
+    try {
+      kp.warmUpEncryptedKeys(keyName);
+    } catch (Exception e) {
+      fail("Should not throw Exception since p2 doesn't throw Exception");
+    }
+    Mockito.verify(p1, Mockito.times(1)).warmUpEncryptedKeys(keyName);
+    Mockito.verify(p2, Mockito.times(1)).warmUpEncryptedKeys(keyName);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: YARN-6708. Nodemanager container crash after ext3 folder limit. Contributed by Bibin A Chundatt

Posted by as...@apache.org.
YARN-6708. Nodemanager container crash after ext3 folder limit. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7576a688
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7576a688
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7576a688

Branch: refs/heads/YARN-5972
Commit: 7576a688ea84aed7206321b1f03594e43a5f216e
Parents: 946dd25
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Thu Jul 6 09:40:09 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Thu Jul 6 09:40:09 2017 -0500

----------------------------------------------------------------------
 .../localizer/ContainerLocalizer.java           | 37 +++++++++++++----
 .../localizer/TestContainerLocalizer.java       | 43 +++++++++++++++++++-
 2 files changed, 71 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7576a688/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
index 6e79857..8a46491 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
@@ -31,6 +31,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.Stack;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CancellationException;
 import java.util.concurrent.CompletionService;
@@ -60,6 +61,7 @@ import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
 import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.SerializedException;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -95,6 +97,8 @@ public class ContainerLocalizer {
   private static final String USERCACHE_CTXT_FMT = "%s.user.cache.dirs";
   private static final FsPermission FILECACHE_PERMS =
       new FsPermission((short)0710);
+  private static final FsPermission USERCACHE_FOLDER_PERMS =
+      new FsPermission((short) 0755);
 
   private final String user;
   private final String appId;
@@ -237,10 +241,29 @@ public class ContainerLocalizer {
 
   }
 
-  Callable<Path> download(Path path, LocalResource rsrc,
+  Callable<Path> download(Path destDirPath, LocalResource rsrc,
       UserGroupInformation ugi) throws IOException {
-    diskValidator.checkStatus(new File(path.toUri().getRawPath()));
-    return new FSDownloadWrapper(lfs, ugi, conf, path, rsrc);
+    // For private localization FsDownload creates folder in destDirPath. Parent
+    // directories till user filecache folder is created here.
+    if (rsrc.getVisibility() == LocalResourceVisibility.PRIVATE) {
+      createParentDirs(destDirPath);
+    }
+    diskValidator.checkStatus(new File(destDirPath.toUri().getRawPath()));
+    return new FSDownloadWrapper(lfs, ugi, conf, destDirPath, rsrc);
+  }
+
+  private void createParentDirs(Path destDirPath) throws IOException {
+    Path parent = destDirPath.getParent();
+    Path cacheRoot = LocalCacheDirectoryManager.getCacheDirectoryRoot(parent);
+    Stack<Path> dirs = new Stack<Path>();
+    while (!parent.equals(cacheRoot)) {
+      dirs.push(parent);
+      parent = parent.getParent();
+    }
+    // Create directories with user cache permission
+    while (!dirs.isEmpty()) {
+      createDir(lfs, dirs.pop(), USERCACHE_FOLDER_PERMS);
+    }
   }
 
   static long getEstimatedSize(LocalResource rsrc) {
@@ -455,21 +478,21 @@ public class ContainerLocalizer {
       // $x/usercache/$user/filecache
       Path userFileCacheDir = new Path(base, FILECACHE);
       usersFileCacheDirs[i] = userFileCacheDir.toString();
-      createDir(lfs, userFileCacheDir, FILECACHE_PERMS, false);
+      createDir(lfs, userFileCacheDir, FILECACHE_PERMS);
       // $x/usercache/$user/appcache/$appId
       Path appBase = new Path(base, new Path(APPCACHE, appId));
       // $x/usercache/$user/appcache/$appId/filecache
       Path appFileCacheDir = new Path(appBase, FILECACHE);
       appsFileCacheDirs[i] = appFileCacheDir.toString();
-      createDir(lfs, appFileCacheDir, FILECACHE_PERMS, false);
+      createDir(lfs, appFileCacheDir, FILECACHE_PERMS);
     }
     conf.setStrings(String.format(APPCACHE_CTXT_FMT, appId), appsFileCacheDirs);
     conf.setStrings(String.format(USERCACHE_CTXT_FMT, user), usersFileCacheDirs);
   }
 
   private static void createDir(FileContext lfs, Path dirPath,
-      FsPermission perms, boolean createParent) throws IOException {
-    lfs.mkdir(dirPath, perms, createParent);
+      FsPermission perms) throws IOException {
+    lfs.mkdir(dirPath, perms, false);
     if (!perms.equals(perms.applyUMask(lfs.getUMask()))) {
       lfs.setPermission(dirPath, perms);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7576a688/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java
index 9db10e4..6f6482f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java
@@ -38,6 +38,7 @@ import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
+import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
@@ -50,11 +51,12 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
-import com.google.common.base.Supplier;
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
@@ -80,7 +82,7 @@ import org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec;
 import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
 import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction;
 import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
-import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.ArgumentMatcher;
@@ -88,12 +90,15 @@ import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
+import com.google.common.base.Supplier;
+
 public class TestContainerLocalizer {
 
   static final Log LOG = LogFactory.getLog(TestContainerLocalizer.class);
   static final Path basedir =
       new Path("target", TestContainerLocalizer.class.getName());
   static final FsPermission CACHE_DIR_PERM = new FsPermission((short)0710);
+  static final FsPermission USERCACHE_DIR_PERM = new FsPermission((short) 0755);
 
   static final String appUser = "yak";
   static final String appId = "app_RM_0";
@@ -101,6 +106,10 @@ public class TestContainerLocalizer {
   static final InetSocketAddress nmAddr =
       new InetSocketAddress("foobar", 8040);
 
+  @After
+  public void cleanUp() throws IOException {
+    FileUtils.deleteDirectory(new File(basedir.toUri().getRawPath()));
+  }
 
   @Test
   public void testMain() throws Exception {
@@ -635,4 +644,34 @@ static DataInputBuffer createFakeCredentials(Random r, int nTok)
     return ret;
   }
 
+  @Test(timeout = 10000)
+  public void testUserCacheDirPermission() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
+    FileContext lfs = FileContext.getLocalFSFileContext(conf);
+    Path fileCacheDir = lfs.makeQualified(new Path(basedir, "filecache"));
+    lfs.mkdir(fileCacheDir, FsPermission.getDefault(), true);
+    RecordFactory recordFactory = mock(RecordFactory.class);
+    ContainerLocalizer localizer = new ContainerLocalizer(lfs,
+        UserGroupInformation.getCurrentUser().getUserName(), "application_01",
+        "container_01", new ArrayList<Path>(), recordFactory);
+    LocalResource rsrc = mock(LocalResource.class);
+    when(rsrc.getVisibility()).thenReturn(LocalResourceVisibility.PRIVATE);
+    Path destDirPath = new Path(fileCacheDir, "0/0/85");
+    //create one of the parent directories with the wrong permissions first
+    FsPermission wrongPerm = new FsPermission((short) 0700);
+    lfs.mkdir(destDirPath.getParent().getParent(), wrongPerm, false);
+    lfs.mkdir(destDirPath.getParent(), wrongPerm, false);
+    //Localize and check the directory permission are correct.
+    localizer
+        .download(destDirPath, rsrc, UserGroupInformation.getCurrentUser());
+    Assert
+        .assertEquals("Cache directory permissions filecache/0/0 is incorrect",
+            USERCACHE_DIR_PERM,
+            lfs.getFileStatus(destDirPath.getParent()).getPermission());
+    Assert.assertEquals("Cache directory permissions filecache/0 is incorrect",
+        USERCACHE_DIR_PERM,
+        lfs.getFileStatus(destDirPath.getParent().getParent()).getPermission());
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: HADOOP-14629. Improve exception checking in FileContext related JUnit tests. Contributed by Andras Bokor.

Posted by as...@apache.org.
HADOOP-14629. Improve exception checking in FileContext related JUnit tests. Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9144fd9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9144fd9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9144fd9e

Branch: refs/heads/YARN-5972
Commit: 9144fd9e9b5d84d71158451428341746a6567152
Parents: d670c3a
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Jul 12 11:35:50 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Jul 12 11:35:50 2017 +0900

----------------------------------------------------------------------
 .../fs/FileContextMainOperationsBaseTest.java   | 65 ++++++++++----------
 .../fs/TestHDFSFileContextMainOperations.java   | 46 +++++++-------
 2 files changed, 56 insertions(+), 55 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9144fd9e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index ece96f8..a536e57 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -24,7 +24,6 @@ import java.io.IOException;
 import java.util.EnumSet;
 import java.util.NoSuchElementException;
 
-import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.Rename;
@@ -881,14 +880,14 @@ public abstract class FileContextMainOperationsBaseTest  {
     Path src = getTestRootPath(fc, "test/hadoop/nonExistent");
     Path dst = getTestRootPath(fc, "test/new/newpath");
     try {
-      rename(src, dst, false, false, false, Rename.NONE);
+      rename(src, dst, false, false, Rename.NONE);
       Assert.fail("Should throw FileNotFoundException");
     } catch (IOException e) {
       Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
     }
 
     try {
-      rename(src, dst, false, false, false, Rename.OVERWRITE);
+      rename(src, dst, false, false, Rename.OVERWRITE);
       Assert.fail("Should throw FileNotFoundException");
     } catch (IOException e) {
       Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
@@ -904,14 +903,14 @@ public abstract class FileContextMainOperationsBaseTest  {
     Path dst = getTestRootPath(fc, "test/nonExistent/newfile");
     
     try {
-      rename(src, dst, false, true, false, Rename.NONE);
+      rename(src, dst, true, false, Rename.NONE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
       Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
     }
 
     try {
-      rename(src, dst, false, true, false, Rename.OVERWRITE);
+      rename(src, dst, true, false, Rename.OVERWRITE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
       Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
@@ -928,13 +927,13 @@ public abstract class FileContextMainOperationsBaseTest  {
     createFile(dst.getParent());
     
     try {
-      rename(src, dst, false, true, false, Rename.NONE);
+      rename(src, dst, true, false, Rename.NONE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
     }
 
     try {
-      rename(src, dst, false, true, false, Rename.OVERWRITE);
+      rename(src, dst, true, false, Rename.OVERWRITE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
     }
@@ -948,7 +947,7 @@ public abstract class FileContextMainOperationsBaseTest  {
     createFile(src);
     Path dst = getTestRootPath(fc, "test/new/newfile");
     fc.mkdir(dst.getParent(), FileContext.DEFAULT_PERM, true);
-    rename(src, dst, true, false, true, Rename.OVERWRITE);
+    rename(src, dst, false, true, Rename.OVERWRITE);
   }
 
   @Test
@@ -957,14 +956,14 @@ public abstract class FileContextMainOperationsBaseTest  {
     Path src = getTestRootPath(fc, "test/hadoop/file");
     createFile(src);
     try {
-      rename(src, src, false, true, false, Rename.NONE);
+      rename(src, src, true, true, Rename.NONE);
       Assert.fail("Renamed file to itself");
     } catch (IOException e) {
       Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
     }
     // Also fails with overwrite
     try {
-      rename(src, src, false, true, false, Rename.OVERWRITE);
+      rename(src, src, true, true, Rename.OVERWRITE);
       Assert.fail("Renamed file to itself");
     } catch (IOException e) {
       Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
@@ -982,14 +981,14 @@ public abstract class FileContextMainOperationsBaseTest  {
     
     // Fails without overwrite option
     try {
-      rename(src, dst, false, true, false, Rename.NONE);
+      rename(src, dst, true, true, Rename.NONE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
       Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
     }
     
     // Succeeds with overwrite option
-    rename(src, dst, true, false, true, Rename.OVERWRITE);
+    rename(src, dst, false, true, Rename.OVERWRITE);
   }
 
   @Test
@@ -1003,14 +1002,14 @@ public abstract class FileContextMainOperationsBaseTest  {
     
     // Fails without overwrite option
     try {
-      rename(src, dst, false, false, true, Rename.NONE);
+      rename(src, dst, true, true, Rename.NONE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
     }
     
     // File cannot be renamed as directory
     try {
-      rename(src, dst, false, false, true, Rename.OVERWRITE);
+      rename(src, dst, true, true, Rename.OVERWRITE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
     }
@@ -1022,14 +1021,14 @@ public abstract class FileContextMainOperationsBaseTest  {
     Path src = getTestRootPath(fc, "test/hadoop/dir");
     fc.mkdir(src, FileContext.DEFAULT_PERM, true);
     try {
-      rename(src, src, false, true, false, Rename.NONE);
+      rename(src, src, true, true, Rename.NONE);
       Assert.fail("Renamed directory to itself");
     } catch (IOException e) {
       Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
     }
     // Also fails with overwrite
     try {
-      rename(src, src, false, true, false, Rename.OVERWRITE);
+      rename(src, src, true, true, Rename.OVERWRITE);
       Assert.fail("Renamed directory to itself");
     } catch (IOException e) {
       Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);      
@@ -1045,14 +1044,14 @@ public abstract class FileContextMainOperationsBaseTest  {
     Path dst = getTestRootPath(fc, "test/nonExistent/newdir");
     
     try {
-      rename(src, dst, false, true, false, Rename.NONE);
+      rename(src, dst, true, false, Rename.NONE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
       Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
     }
 
     try {
-      rename(src, dst, false, true, false, Rename.OVERWRITE);
+      rename(src, dst, true, false, Rename.OVERWRITE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
       Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
@@ -1077,7 +1076,7 @@ public abstract class FileContextMainOperationsBaseTest  {
     Path dst = getTestRootPath(fc, "test/new/newdir");
     fc.mkdir(dst.getParent(), FileContext.DEFAULT_PERM, true);
     
-    rename(src, dst, true, false, true, options);
+    rename(src, dst, false, true, options);
     Assert.assertFalse("Nested file1 exists", 
         exists(fc, getTestRootPath(fc, "test/hadoop/dir/file1")));
     Assert.assertFalse("Nested file2 exists", 
@@ -1102,14 +1101,14 @@ public abstract class FileContextMainOperationsBaseTest  {
 
     // Fails without overwrite option
     try {
-      rename(src, dst, false, true, false, Rename.NONE);
+      rename(src, dst, true, true, Rename.NONE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
       // Expected (cannot over-write non-empty destination)
       Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
     }
     // Succeeds with the overwrite option
-    rename(src, dst, true, false, true, Rename.OVERWRITE);
+    rename(src, dst, false, true, Rename.OVERWRITE);
   }
 
   @Test
@@ -1126,7 +1125,7 @@ public abstract class FileContextMainOperationsBaseTest  {
     createFile(getTestRootPath(fc, "test/new/newdir/file1"));
     // Fails without overwrite option
     try {
-      rename(src, dst, false, true, false, Rename.NONE);
+      rename(src, dst, true, true, Rename.NONE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
       // Expected (cannot over-write non-empty destination)
@@ -1134,7 +1133,7 @@ public abstract class FileContextMainOperationsBaseTest  {
     }
     // Fails even with the overwrite option
     try {
-      rename(src, dst, false, true, false, Rename.OVERWRITE);
+      rename(src, dst, true, true, Rename.OVERWRITE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException ex) {
       // Expected (cannot over-write non-empty destination)
@@ -1151,13 +1150,13 @@ public abstract class FileContextMainOperationsBaseTest  {
     createFile(dst);
     // Fails without overwrite option
     try {
-      rename(src, dst, false, true, true, Rename.NONE);
+      rename(src, dst, true, true, Rename.NONE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
     }
     // Directory cannot be renamed as existing file
     try {
-      rename(src, dst, false, true, true, Rename.OVERWRITE);
+      rename(src, dst, true, true, Rename.OVERWRITE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException ex) {
     }
@@ -1219,14 +1218,14 @@ public abstract class FileContextMainOperationsBaseTest  {
     out.close();
   }
 
-  private void rename(Path src, Path dst, boolean renameShouldSucceed,
-      boolean srcExists, boolean dstExists, Rename... options)
-      throws IOException {
-    fc.rename(src, dst, options);
-    if (!renameShouldSucceed)
-      Assert.fail("rename should have thrown exception");
-    Assert.assertEquals("Source exists", srcExists, exists(fc, src));
-    Assert.assertEquals("Destination exists", dstExists, exists(fc, dst));
+  protected void rename(Path src, Path dst, boolean srcExists,
+      boolean dstExists, Rename... options) throws IOException {
+    try {
+      fc.rename(src, dst, options);
+    } finally {
+      Assert.assertEquals("Source exists", srcExists, exists(fc, src));
+      Assert.assertEquals("Destination exists", dstExists, exists(fc, dst));
+    }
   }
   
   private boolean containsPath(Path path, FileStatus[] filteredPaths)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9144fd9e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
index 94fb0fb..8c37351 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
@@ -199,9 +200,9 @@ public class TestHDFSFileContextMainOperations extends
      * accommodates rename
      */
     // rename uses dstdir quota=1
-    rename(src1, dst1, false, true, false, Rename.NONE);
+    rename(src1, dst1, false, true, Rename.NONE);
     // rename reuses dstdir quota=1
-    rename(src2, dst1, true, true, false, Rename.OVERWRITE);
+    rename(src2, dst1, false, true, Rename.OVERWRITE);
 
     /*
      * Test2: src does not exceed quota and dst has *no* quota to accommodate 
@@ -209,7 +210,10 @@ public class TestHDFSFileContextMainOperations extends
      */
     // dstDir quota = 1 and dst1 already uses it
     createFile(src2);
-    rename(src2, dst2, false, false, true, Rename.NONE);
+    try {
+      rename(src2, dst2, true, false, Rename.NONE);
+      fail("NSQuotaExceededException excepted");
+    } catch (NSQuotaExceededException e) {}
 
     /*
      * Test3: src exceeds quota and dst has *no* quota to accommodate rename
@@ -217,7 +221,11 @@ public class TestHDFSFileContextMainOperations extends
      */
     // src1 has no quota to accommodate new rename node
     fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
-    rename(dst1, src1, false, false, true, Rename.NONE);
+
+    try {
+      rename(dst1, src1, true, false, Rename.NONE);
+      fail("NSQuotaExceededException excepted");
+    } catch (NSQuotaExceededException e) {}
     
     /*
      * Test4: src exceeds quota and dst has *no* quota to accommodate rename
@@ -228,16 +236,23 @@ public class TestHDFSFileContextMainOperations extends
     fs.setQuota(src1.getParent(), 100, HdfsConstants.QUOTA_DONT_SET);
     createFile(src1);
     fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
-    rename(dst1, src1, true, true, false, Rename.OVERWRITE);
+    rename(dst1, src1, false, true, Rename.OVERWRITE);
   }
   
-  @Test
+  @Test(expected = RemoteException.class)
   public void testRenameRoot() throws Exception {
     Path src = getTestRootPath(fc, "test/testRenameRoot/srcdir/src1");
     Path dst = new Path("/");
     createFile(src);
-    rename(src, dst, true, false, true, Rename.OVERWRITE);
-    rename(dst, src, true, false, true, Rename.OVERWRITE);
+    rename(dst, src, true, true, Rename.OVERWRITE);
+  }
+
+  @Test(expected = RemoteException.class)
+  public void testRenameToRoot() throws Exception {
+    Path src = getTestRootPath(fc, "test/testRenameRoot/srcdir/src1");
+    Path dst = new Path("/");
+    createFile(src);
+    rename(src, dst, true, true, Rename.OVERWRITE);
   }
   
   /**
@@ -286,7 +301,7 @@ public class TestHDFSFileContextMainOperations extends
     fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
     // Free up quota for a subsequent rename
     fs.delete(dst1, true);
-    rename(src1, dst1, true, true, false, Rename.OVERWRITE);
+    rename(src1, dst1, false, true, Rename.OVERWRITE);
     
     // Restart the cluster and ensure the above operations can be
     // loaded from the edits log
@@ -325,19 +340,6 @@ public class TestHDFSFileContextMainOperations extends
     Assert.assertEquals(renameSucceeds, exists(fc, dst));
   }
   
-  private void rename(Path src, Path dst, boolean dstExists,
-      boolean renameSucceeds, boolean exception, Options.Rename... options)
-      throws Exception {
-    try {
-      fc.rename(src, dst, options);
-      Assert.assertTrue(renameSucceeds);
-    } catch (Exception ex) {
-      Assert.assertTrue(exception);
-    }
-    Assert.assertEquals(renameSucceeds, !exists(fc, src));
-    Assert.assertEquals((dstExists||renameSucceeds), exists(fc, dst));
-  }
-  
   @Override
   protected boolean listCorruptedBlocksSupported() {
     return true;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang.

Posted by as...@apache.org.
HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/900221f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/900221f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/900221f9

Branch: refs/heads/YARN-5972
Commit: 900221f95ea9fe1936b4d5f277e6047ee8734eca
Parents: 72993b3
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Jun 29 17:15:13 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Jun 29 17:15:13 2017 -0700

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    | 13 ++-
 .../PendingReconstructionBlocks.java            |  8 +-
 .../namenode/metrics/NameNodeMetrics.java       | 18 ++++
 .../TestPendingReconstruction.java              | 86 +++++++++++++++++++-
 4 files changed, 118 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/900221f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a0c4698..a5ee30b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1851,7 +1851,7 @@ public class BlockManager implements BlockStatsMXBean {
         (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block));
   }
 
-  private BlockReconstructionWork scheduleReconstruction(BlockInfo block,
+  BlockReconstructionWork scheduleReconstruction(BlockInfo block,
       int priority) {
     // skip abandoned block or block reopened for append
     if (block.isDeleted() || !block.isCompleteOrCommitted()) {
@@ -1873,6 +1873,7 @@ public class BlockManager implements BlockStatsMXBean {
     if(srcNodes == null || srcNodes.length == 0) {
       // block can not be reconstructed from any node
       LOG.debug("Block {} cannot be reconstructed from any node", block);
+      NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
       return null;
     }
 
@@ -1885,6 +1886,7 @@ public class BlockManager implements BlockStatsMXBean {
       neededReconstruction.remove(block, priority);
       blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
           " it has enough replicas", block);
+      NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
       return null;
     }
 
@@ -1900,6 +1902,7 @@ public class BlockManager implements BlockStatsMXBean {
     if (block.isStriped()) {
       if (pendingNum > 0) {
         // Wait the previous reconstruction to finish.
+        NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
         return null;
       }
 
@@ -3727,8 +3730,8 @@ public class BlockManager implements BlockStatsMXBean {
    * The given node is reporting that it received a certain block.
    */
   @VisibleForTesting
-  void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
-      throws IOException {
+  public void addBlock(DatanodeStorageInfo storageInfo, Block block,
+      String delHint) throws IOException {
     DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     // Decrement number of blocks scheduled to this datanode.
     // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
@@ -3751,7 +3754,9 @@ public class BlockManager implements BlockStatsMXBean {
     BlockInfo storedBlock = getStoredBlock(block);
     if (storedBlock != null &&
         block.getGenerationStamp() == storedBlock.getGenerationStamp()) {
-      pendingReconstruction.decrement(storedBlock, node);
+      if (pendingReconstruction.decrement(storedBlock, node)) {
+        NameNode.getNameNodeMetrics().incSuccessfulReReplications();
+      }
     }
     processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
         delHintNode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/900221f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
index 2221d1d..0f20daa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
@@ -30,6 +30,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
 
@@ -97,8 +98,10 @@ class PendingReconstructionBlocks {
    * for this block.
    *
    * @param dn The DataNode that finishes the reconstruction
+   * @return true if the block is decremented to 0 and got removed.
    */
-  void decrement(BlockInfo block, DatanodeDescriptor dn) {
+  boolean decrement(BlockInfo block, DatanodeDescriptor dn) {
+    boolean removed = false;
     synchronized (pendingReconstructions) {
       PendingBlockInfo found = pendingReconstructions.get(block);
       if (found != null) {
@@ -106,9 +109,11 @@ class PendingReconstructionBlocks {
         found.decrementReplicas(dn);
         if (found.getNumReplicas() <= 0) {
           pendingReconstructions.remove(block);
+          removed = true;
         }
       }
     }
+    return removed;
   }
 
   /**
@@ -263,6 +268,7 @@ class PendingReconstructionBlocks {
               timedOutItems.add(block);
             }
             LOG.warn("PendingReconstructionMonitor timed out " + block);
+            NameNode.getNameNodeMetrics().incTimeoutReReplications();
             iter.remove();
           }
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/900221f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
index cb81f5a..f2534e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
@@ -58,6 +58,12 @@ public class NameNodeMetrics {
   @Metric MutableCounterLong createSymlinkOps;
   @Metric MutableCounterLong getLinkTargetOps;
   @Metric MutableCounterLong filesInGetListingOps;
+  @Metric ("Number of successful re-replications")
+  MutableCounterLong successfulReReplications;
+  @Metric ("Number of times we failed to schedule a block re-replication.")
+  MutableCounterLong numTimesReReplicationNotScheduled;
+  @Metric("Number of timed out block re-replications")
+  MutableCounterLong timeoutReReplications;
   @Metric("Number of allowSnapshot operations")
   MutableCounterLong allowSnapshotOps;
   @Metric("Number of disallowSnapshot operations")
@@ -300,6 +306,18 @@ public class NameNodeMetrics {
     transactionsBatchedInSync.incr(count);
   }
 
+  public void incSuccessfulReReplications() {
+    successfulReReplications.incr();
+  }
+
+  public void incNumTimesReReplicationNotScheduled() {
+    numTimesReReplicationNotScheduled.incr();
+  }
+
+  public void incTimeoutReReplications() {
+    timeoutReReplications.incr();
+  }
+
   public void addSync(long elapsed) {
     syncs.add(elapsed);
     for (MutableQuantiles q : syncsQuantiles) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/900221f9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
index 7679f9d..042eae7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -44,6 +48,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -178,7 +183,7 @@ public class TestPendingReconstruction {
   public void testProcessPendingReconstructions() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     conf.setLong(
-        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
+        DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
     MiniDFSCluster cluster = null;
     Block block;
     BlockInfo blockInfo;
@@ -418,7 +423,7 @@ public class TestPendingReconstruction {
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
     CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
         DFS_REPLICATION_INTERVAL);
-    CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
+    CONF.setInt(DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
         DFS_REPLICATION_INTERVAL);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
         DATANODE_COUNT).build();
@@ -471,4 +476,81 @@ public class TestPendingReconstruction {
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testReplicationCounter() throws Exception {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.setInt(DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
+    conf.setInt(DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 2);
+    MiniDFSCluster tmpCluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+        DATANODE_COUNT).build();
+    tmpCluster.waitActive();
+    FSNamesystem fsn = tmpCluster.getNamesystem(0);
+    fsn.writeLock();
+
+    try {
+      BlockManager bm = fsn.getBlockManager();
+      BlocksMap blocksMap = bm.blocksMap;
+
+      // create three blockInfo below, blockInfo0 will success, blockInfo1 will
+      // time out, blockInfo2 will fail the replication.
+      BlockCollection bc0 = Mockito.mock(BlockCollection.class);
+      BlockInfo blockInfo0 = new BlockInfoContiguous((short) 3);
+      blockInfo0.setBlockId(0);
+
+      BlockCollection bc1 = Mockito.mock(BlockCollection.class);
+      BlockInfo blockInfo1 = new BlockInfoContiguous((short) 3);
+      blockInfo1.setBlockId(1);
+
+      BlockCollection bc2 = Mockito.mock(BlockCollection.class);
+      Mockito.when(bc2.getId()).thenReturn((2L));
+      BlockInfo blockInfo2 = new BlockInfoContiguous((short) 3);
+      blockInfo2.setBlockId(2);
+
+      blocksMap.addBlockCollection(blockInfo0, bc0);
+      blocksMap.addBlockCollection(blockInfo1, bc1);
+      blocksMap.addBlockCollection(blockInfo2, bc2);
+
+      PendingReconstructionBlocks pending = bm.pendingReconstruction;
+
+      MetricsRecordBuilder rb = getMetrics("NameNodeActivity");
+      assertCounter("SuccessfulReReplications", 0L, rb);
+      assertCounter("NumTimesReReplicationNotScheduled", 0L, rb);
+      assertCounter("TimeoutReReplications", 0L, rb);
+
+      // add block0 and block1 to pending queue.
+      pending.increment(blockInfo0);
+      pending.increment(blockInfo1);
+
+      Thread.sleep(2000);
+
+      rb = getMetrics("NameNodeActivity");
+      assertCounter("SuccessfulReReplications", 0L, rb);
+      assertCounter("NumTimesReReplicationNotScheduled", 0L, rb);
+      assertCounter("TimeoutReReplications", 0L, rb);
+
+      // call addBlock on block0 will make it successfully replicated.
+      // not callign addBlock on block1 will make it timeout later.
+      DatanodeStorageInfo[] storageInfos =
+          DFSTestUtil.createDatanodeStorageInfos(1);
+      bm.addBlock(storageInfos[0], blockInfo0, null);
+
+      // call schedule replication on blockInfo2 will fail the re-replication.
+      // because there is no source data to replicate from.
+      bm.scheduleReconstruction(blockInfo2, 0);
+
+      Thread.sleep(2000);
+
+      rb = getMetrics("NameNodeActivity");
+      assertCounter("SuccessfulReReplications", 1L, rb);
+      assertCounter("NumTimesReReplicationNotScheduled", 1L, rb);
+      assertCounter("TimeoutReReplications", 1L, rb);
+
+    } finally {
+      tmpCluster.shutdown();
+      fsn.writeUnlock();
+    }
+  }
+
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: YARN-6751. Display reserved resources in web UI per queue (Contributed by Abdullah Yousufi via Daniel Templeton)

Posted by as...@apache.org.
YARN-6751. Display reserved resources in web UI per queue
(Contributed by Abdullah Yousufi via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec975197
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec975197
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec975197

Branch: refs/heads/YARN-5972
Commit: ec975197799417a1d5727dedc395fe6c15c30eb2
Parents: 441378e
Author: Daniel Templeton <te...@apache.org>
Authored: Thu Jun 29 16:52:46 2017 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Thu Jun 29 16:53:50 2017 -0700

----------------------------------------------------------------------
 .../yarn/server/resourcemanager/scheduler/fair/FSQueue.java   | 7 +++++++
 .../yarn/server/resourcemanager/webapp/FairSchedulerPage.java | 6 ++++--
 .../resourcemanager/webapp/dao/FairSchedulerQueueInfo.java    | 6 ++++++
 3 files changed, 17 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec975197/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index 12b1b83..1016823 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -57,6 +57,7 @@ public abstract class FSQueue implements Queue, Schedulable {
 
   private Resource fairShare = Resources.createResource(0, 0);
   private Resource steadyFairShare = Resources.createResource(0, 0);
+  private Resource reservedResource = Resources.createResource(0, 0);
   private final String name;
   protected final FairScheduler scheduler;
   private final YarnAuthorizationProvider authorizer;
@@ -161,6 +162,12 @@ public abstract class FSQueue implements Queue, Schedulable {
     this.maxShare = maxShare;
   }
 
+  public Resource getReservedResource() {
+    reservedResource.setMemorySize(metrics.getReservedMB());
+    reservedResource.setVirtualCores(metrics.getReservedVirtualCores());
+    return reservedResource;
+  }
+
   @Override
   public Resource getMaxShare() {
     return maxShare;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec975197/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
index 544275e..5f46841 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
@@ -75,7 +75,8 @@ public class FairSchedulerPage extends RmView {
           _("Num Active Applications:", qinfo.getNumActiveApplications()).
           _("Num Pending Applications:", qinfo.getNumPendingApplications()).
           _("Min Resources:", qinfo.getMinResources().toString()).
-          _("Max Resources:", qinfo.getMaxResources().toString());
+          _("Max Resources:", qinfo.getMaxResources().toString()).
+          _("Reserved Resources:", qinfo.getReservedResources().toString());
       int maxApps = qinfo.getMaxApplications();
       if (maxApps < Integer.MAX_VALUE) {
           ri._("Max Running Applications:", qinfo.getMaxApplications());
@@ -103,7 +104,8 @@ public class FairSchedulerPage extends RmView {
       ResponseInfo ri = info("\'" + qinfo.getQueueName() + "\' Queue Status").
           _("Used Resources:", qinfo.getUsedResources().toString()).
           _("Min Resources:", qinfo.getMinResources().toString()).
-          _("Max Resources:", qinfo.getMaxResources().toString());
+          _("Max Resources:", qinfo.getMaxResources().toString()).
+          _("Reserved Resources:", qinfo.getReservedResources().toString());
       int maxApps = qinfo.getMaxApplications();
       if (maxApps < Integer.MAX_VALUE) {
           ri._("Max Running Applications:", qinfo.getMaxApplications());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec975197/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
index 64e573b..fa14bae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
@@ -58,6 +58,7 @@ public class FairSchedulerQueueInfo {
   private ResourceInfo steadyFairResources;
   private ResourceInfo fairResources;
   private ResourceInfo clusterResources;
+  private ResourceInfo reservedResources;
 
   private long pendingContainers;
   private long allocatedContainers;
@@ -93,6 +94,7 @@ public class FairSchedulerQueueInfo {
     maxResources = new ResourceInfo(
         Resources.componentwiseMin(queue.getMaxShare(),
             scheduler.getClusterResource()));
+    reservedResources = new ResourceInfo(queue.getReservedResource());
 
     fractionMemSteadyFairShare =
         (float)steadyFairResources.getMemorySize() / clusterResources.getMemorySize();
@@ -186,6 +188,10 @@ public class FairSchedulerQueueInfo {
     return maxResources;
   }
   
+  public ResourceInfo getReservedResources() {
+    return reservedResources;
+  }
+
   public int getMaxApplications() {
     return maxApps;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: YARN-6410. FSContext.scheduler should be final (Contributed by Yeliang Cang via Daniel Templeton)

Posted by as...@apache.org.
YARN-6410. FSContext.scheduler should be final (Contributed by Yeliang Cang via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d278804
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d278804
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d278804

Branch: refs/heads/YARN-5972
Commit: 9d278804425f0118b590d302985408e0794b88bb
Parents: c5a0c38
Author: Daniel Templeton <te...@apache.org>
Authored: Sun Jul 9 19:34:35 2017 +0900
Committer: Daniel Templeton <te...@apache.org>
Committed: Sun Jul 9 19:34:35 2017 +0900

----------------------------------------------------------------------
 .../yarn/server/resourcemanager/scheduler/fair/FSContext.java      | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d278804/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
index a4aa8f4..eb76ca3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
@@ -29,7 +29,7 @@ public class FSContext {
   private boolean preemptionEnabled = false;
   private float preemptionUtilizationThreshold;
   private FSStarvedApps starvedApps;
-  private FairScheduler scheduler;
+  private final FairScheduler scheduler;
 
   FSContext(FairScheduler scheduler) {
     this.scheduler = scheduler;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: HADOOP-10829. Iteration on CredentialProviderFactory.serviceLoader is thread-unsafe. Contributed by Benoy Antony and Rakesh R.

Posted by as...@apache.org.
HADOOP-10829. Iteration on CredentialProviderFactory.serviceLoader is thread-unsafe. Contributed by Benoy Antony and Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1efa14f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1efa14f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1efa14f

Branch: refs/heads/YARN-5972
Commit: f1efa14fc676641fa15c11d3147e3ad948b084e9
Parents: 5496a34
Author: Jitendra Pandey <ji...@apache.org>
Authored: Fri Jul 7 12:45:37 2017 -0700
Committer: Jitendra Pandey <ji...@apache.org>
Committed: Mon Jul 10 17:48:27 2017 -0700

----------------------------------------------------------------------
 .../hadoop/security/alias/CredentialProviderFactory.java  | 10 ++++++++++
 1 file changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1efa14f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
index d1e3eb5..1b2ac41 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
 import java.util.ServiceLoader;
 
@@ -49,6 +50,15 @@ public abstract class CredentialProviderFactory {
       ServiceLoader.load(CredentialProviderFactory.class,
           CredentialProviderFactory.class.getClassLoader());
 
+  // Iterate through the serviceLoader to avoid lazy loading.
+  // Lazy loading would require synchronization in concurrent use cases.
+  static {
+    Iterator<CredentialProviderFactory> iterServices = serviceLoader.iterator();
+    while (iterServices.hasNext()) {
+      iterServices.next();
+    }
+  }
+
   public static List<CredentialProvider> getProviders(Configuration conf
                                                ) throws IOException {
     List<CredentialProvider> result = new ArrayList<CredentialProvider>();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: HADOOP-14608. KMS JMX servlet path not backwards compatible. Contributed by John Zhuge.

Posted by as...@apache.org.
HADOOP-14608. KMS JMX servlet path not backwards compatible. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/946dd256
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/946dd256
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/946dd256

Branch: refs/heads/YARN-5972
Commit: 946dd256755109ca57d9cfa0912eef8402450181
Parents: 6436768
Author: John Zhuge <jz...@apache.org>
Authored: Fri Jun 30 11:12:29 2017 -0700
Committer: John Zhuge <jz...@apache.org>
Committed: Wed Jul 5 11:16:56 2017 -0700

----------------------------------------------------------------------
 .../main/resources/webapps/kms/WEB-INF/web.xml  | 10 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 79 ++++++++++++++++++++
 2 files changed, 89 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/946dd256/hadoop-common-project/hadoop-kms/src/main/resources/webapps/kms/WEB-INF/web.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/resources/webapps/kms/WEB-INF/web.xml b/hadoop-common-project/hadoop-kms/src/main/resources/webapps/kms/WEB-INF/web.xml
index 1c14d28..737236c 100644
--- a/hadoop-common-project/hadoop-kms/src/main/resources/webapps/kms/WEB-INF/web.xml
+++ b/hadoop-common-project/hadoop-kms/src/main/resources/webapps/kms/WEB-INF/web.xml
@@ -40,11 +40,21 @@
     <load-on-startup>1</load-on-startup>
   </servlet>
 
+  <servlet>
+    <servlet-name>jmx-servlet</servlet-name>
+    <servlet-class>org.apache.hadoop.jmx.JMXJsonServlet</servlet-class>
+  </servlet>
+
   <servlet-mapping>
     <servlet-name>webservices-driver</servlet-name>
     <url-pattern>/kms/*</url-pattern>
   </servlet-mapping>
 
+  <servlet-mapping>
+    <servlet-name>jmx-servlet</servlet-name>
+    <url-pattern>/kms/jmx</url-pattern>
+  </servlet-mapping>
+
   <filter>
     <filter-name>authFilter</filter-name>
     <filter-class>org.apache.hadoop.crypto.key.kms.server.KMSAuthenticationFilter</filter-class>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/946dd256/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index dc5f83f..a45906a 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -40,10 +40,12 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.http.client.utils.URIBuilder;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -55,6 +57,7 @@ import org.mockito.internal.util.reflection.Whitebox;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.net.ssl.HttpsURLConnection;
 import javax.security.auth.login.AppConfigurationEntry;
 
 import java.io.ByteArrayInputStream;
@@ -62,6 +65,7 @@ import java.io.DataInputStream;
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.Writer;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -69,6 +73,8 @@ import java.net.ServerSocket;
 import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.URL;
+import java.net.URLConnection;
+import java.security.GeneralSecurityException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -83,6 +89,8 @@ import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.LinkedBlockingQueue;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
@@ -97,6 +105,8 @@ public class TestKMS {
   private static final String SSL_RELOADER_THREAD_NAME =
       "Truststore reloader thread";
 
+  private SSLFactory sslFactory;
+
   @Rule
   public final Timeout testTimeout = new Timeout(180000);
 
@@ -317,6 +327,57 @@ public class TestKMS {
     }
   }
 
+  /**
+   * Read in the content from an URL connection.
+   * @param conn URLConnection To read
+   * @return the text from the output
+   * @throws IOException if something went wrong
+   */
+  private static String readOutput(URLConnection conn) throws IOException {
+    StringBuilder out = new StringBuilder();
+    InputStream in = conn.getInputStream();
+    byte[] buffer = new byte[64 * 1024];
+    int len = in.read(buffer);
+    while (len > 0) {
+      out.append(new String(buffer, 0, len));
+      len = in.read(buffer);
+    }
+    return out.toString();
+  }
+
+  private static void assertReFind(String re, String value) {
+    Pattern p = Pattern.compile(re);
+    Matcher m = p.matcher(value);
+    Assert.assertTrue("'" + p + "' does not match " + value, m.find());
+  }
+
+  private URLConnection openJMXConnection(URL baseUrl, boolean kerberos)
+      throws Exception {
+    URIBuilder b = new URIBuilder(baseUrl + "/jmx");
+    if (!kerberos) {
+      b.addParameter("user.name", "dr.who");
+    }
+    URL url = b.build().toURL();
+    LOG.info("JMX URL " + url);
+    URLConnection conn = url.openConnection();
+    if (sslFactory != null) {
+      HttpsURLConnection httpsConn = (HttpsURLConnection) conn;
+      try {
+        httpsConn.setSSLSocketFactory(sslFactory.createSSLSocketFactory());
+      } catch (GeneralSecurityException ex) {
+        throw new IOException(ex);
+      }
+      httpsConn.setHostnameVerifier(sslFactory.getHostnameVerifier());
+    }
+    return conn;
+  }
+
+  private void testJMXQuery(URL baseUrl, boolean kerberos) throws Exception {
+    LOG.info("Testing JMX");
+    assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"",
+        readOutput(openJMXConnection(baseUrl, kerberos)));
+  }
+
   public void testStartStop(final boolean ssl, final boolean kerberos)
       throws Exception {
     Configuration conf = new Configuration();
@@ -350,6 +411,15 @@ public class TestKMS {
 
     writeConf(testDir, conf);
 
+    if (ssl) {
+      sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+      try {
+        sslFactory.init();
+      } catch (GeneralSecurityException ex) {
+        throw new IOException(ex);
+      }
+    }
+
     runServer(keystore, password, testDir, new KMSCallable<Void>() {
       @Override
       public Void call() throws Exception {
@@ -390,6 +460,8 @@ public class TestKMS {
             doAs(user, new PrivilegedExceptionAction<Void>() {
               @Override
               public Void run() throws Exception {
+                testJMXQuery(url, kerberos);
+
                 final KeyProvider kp = createProvider(uri, conf);
                 // getKeys() empty
                 Assert.assertTrue(kp.getKeys().isEmpty());
@@ -406,6 +478,8 @@ public class TestKMS {
             });
           }
         } else {
+          testJMXQuery(url, kerberos);
+
           KeyProvider kp = createProvider(uri, conf);
           // getKeys() empty
           Assert.assertTrue(kp.getKeys().isEmpty());
@@ -421,6 +495,11 @@ public class TestKMS {
         return null;
       }
     });
+
+    if (sslFactory != null) {
+      sslFactory.destroy();
+      sslFactory = null;
+    }
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: Add release notes, changes, jdiff for 3.0.0-alpha4

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f10864a8/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md
new file mode 100644
index 0000000..3ad6cc6
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md
@@ -0,0 +1,492 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# "Apache Hadoop"  3.0.0-alpha4 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-13956](https://issues.apache.org/jira/browse/HADOOP-13956) | *Critical* | **Read ADLS credentials from Credential Provider**
+
+The hadoop-azure-datalake file system now supports configuration of the Azure Data Lake Store account credentials using the standard Hadoop Credential Provider API. For details, please refer to the documentation on hadoop-azure-datalake and the Credential Provider API.
+
+
+---
+
+* [MAPREDUCE-6404](https://issues.apache.org/jira/browse/MAPREDUCE-6404) | *Major* | **Allow AM to specify a port range for starting its webapp**
+
+Add a new configuration - "yarn.app.mapreduce.am.webapp.port-range" to specify port-range for webapp launched by AM.
+
+
+---
+
+* [HDFS-10860](https://issues.apache.org/jira/browse/HDFS-10860) | *Blocker* | **Switch HttpFS from Tomcat to Jetty**
+
+<!-- markdown -->
+
+The following environment variables are deprecated. Set the corresponding
+configuration properties instead.
+
+Environment Variable        | Configuration Property       | Configuration File
+----------------------------|------------------------------|--------------------
+HTTPFS_TEMP                 | hadoop.http.temp.dir         | httpfs-site.xml
+HTTPFS_HTTP_PORT            | hadoop.httpfs.http.port      | httpfs-site.xml
+HTTPFS_MAX_HTTP_HEADER_SIZE | hadoop.http.max.request.header.size and hadoop.http.max.response.header.size | httpfs-site.xml
+HTTPFS_MAX_THREADS          | hadoop.http.max.threads      | httpfs-site.xml
+HTTPFS_SSL_ENABLED          | hadoop.httpfs.ssl.enabled    | httpfs-site.xml
+HTTPFS_SSL_KEYSTORE_FILE    | ssl.server.keystore.location | ssl-server.xml
+HTTPFS_SSL_KEYSTORE_PASS    | ssl.server.keystore.password | ssl-server.xml
+
+These default HTTP Services have been added.
+
+Name               | Description
+-------------------|------------------------------------
+/conf              | Display configuration properties
+/jmx               | Java JMX management interface
+/logLevel          | Get or set log level per class
+/logs              | Display log files
+/stacks            | Display JVM stacks
+/static/index.html | The static home page
+
+Script httpfs.sh has been deprecated, use `hdfs httpfs` instead. The new scripts are based on the Hadoop shell scripting framework. `hadoop daemonlog` is supported. SSL configurations are read from ssl-server.xml.
+
+
+---
+
+* [HDFS-11210](https://issues.apache.org/jira/browse/HDFS-11210) | *Major* | **Enhance key rolling to guarantee new KeyVersion is returned from generateEncryptedKeys after a key is rolled**
+
+<!-- markdown --> 
+
+An `invalidateCache` command has been added to the KMS.
+The `rollNewVersion` semantics of the KMS has been improved so that after a key's version is rolled, `generateEncryptedKey` of that key guarantees to return the `EncryptedKeyVersion` based on the new key version.
+
+
+---
+
+* [HADOOP-13075](https://issues.apache.org/jira/browse/HADOOP-13075) | *Major* | **Add support for SSE-KMS and SSE-C in s3a filesystem**
+
+The new encryption options SSE-KMS and especially SSE-C must be considered experimental at present. If you are using SSE-C, problems may arise if the bucket mixes encrypted and unencrypted files. For SSE-KMS, there may be extra throttling of IO, especially with the fadvise=random option. You may wish to request an increase in your KMS IOPs limits.
+
+
+---
+
+* [HDFS-11026](https://issues.apache.org/jira/browse/HDFS-11026) | *Major* | **Convert BlockTokenIdentifier to use Protobuf**
+
+Changed the serialized format of BlockTokenIdentifier to protocol buffers. Includes logic to decode both the old Writable format and the new PB format to support existing clients. Client implementations in other languages will require similar functionality.
+
+
+---
+
+* [HADOOP-13929](https://issues.apache.org/jira/browse/HADOOP-13929) | *Major* | **ADLS connector should not check in contract-test-options.xml**
+
+To run live unit tests, create src/test/resources/auth-keys.xml with the same properties as in the deprecated contract-test-options.xml.
+
+
+---
+
+* [HDFS-11100](https://issues.apache.org/jira/browse/HDFS-11100) | *Critical* | **Recursively deleting file protected by sticky bit should fail**
+
+Changed the behavior of removing directories with sticky bits, so that it is closer to what most Unix/Linux users would expect.
+
+
+---
+
+* [YARN-6177](https://issues.apache.org/jira/browse/YARN-6177) | *Major* | **Yarn client should exit with an informative error message if an incompatible Jersey library is used at client**
+
+Let yarn client exit with an informative error message if an incompatible Jersey library is used from client side.
+
+
+---
+
+* [HADOOP-13805](https://issues.apache.org/jira/browse/HADOOP-13805) | *Major* | **UGI.getCurrentUser() fails if user does not have a keytab associated**
+
+Due to a remaining issue after HADOOP-13558, an UGI may still try to renew the TGT even though the UGI is created from an existing Subject. The renewal would fail because of non-existing keytab. 
+
+Fixing the issue means different behavior which is incompatible, however,  configuration property "hadoop.treat.subject.external" is introduced to enable the fix (disabled by default). The behavior is the same as before when the fix is not enabled.
+
+
+---
+
+* [HDFS-11405](https://issues.apache.org/jira/browse/HDFS-11405) | *Blocker* | **Rename "erasurecode" CLI subcommand to "ec"**
+
+The "hdfs erasurecode" CLI command has been renamed to "hdfs ec" for ease-of-use.
+
+
+---
+
+* [HDFS-11426](https://issues.apache.org/jira/browse/HDFS-11426) | *Major* | **Refactor EC CLI to be similar to storage policies CLI**
+
+The \`hdfs ec\` CLI command has been substantially reworked to make the calling patterns more similar to the \`hdfs storagepolicies\` command. See \`hdfs ec -help\` and the HDFS erasure coding documentation for more information.
+
+
+---
+
+* [HADOOP-13817](https://issues.apache.org/jira/browse/HADOOP-13817) | *Minor* | **Add a finite shell command timeout to ShellBasedUnixGroupsMapping**
+
+A new introduced configuration key "hadoop.security.groups.shell.command.timeout" allows applying a finite wait timeout over the 'id' commands launched by the ShellBasedUnixGroupsMapping plugin. Values specified can be in any valid time duration units: https://hadoop.apache.org/docs/current/api/org/apache/hadoop/conf/Configuration.html#getTimeDuration-java.lang.String-long-java.util.concurrent.TimeUnit-
+
+Value defaults to 0, indicating infinite wait (preserving existing behaviour).
+
+
+---
+
+* [HDFS-11427](https://issues.apache.org/jira/browse/HDFS-11427) | *Major* | **Rename "rs-default" to "rs"**
+
+The "rs-default" codec has been renamed to simply "rs" for simplicity. Previous configuration keys like "io.erasurecode.codec.rs-default" have also been renamed to match.
+
+
+---
+
+* [HDFS-11382](https://issues.apache.org/jira/browse/HDFS-11382) | *Major* | **Persist Erasure Coding Policy ID in a new optional field in INodeFile in FSImage**
+
+The FSImage on-disk format for INodeFile is changed to additionally include a field for Erasure Coded files. This optional field 'erasureCodingPolicyID' which is unit32 type is available for all Erasure Coded files and represents the Erasure Coding Policy ID. Previously, the 'replication' field in INodeFile disk format was overloaded  to represent the same Erasure Coding Policy ID.
+
+
+---
+
+* [HDFS-11428](https://issues.apache.org/jira/browse/HDFS-11428) | *Major* | **Change setErasureCodingPolicy to take a required string EC policy name**
+
+{{HdfsAdmin#setErasureCodingPolicy}} now takes a String {{ecPolicyName}} rather than an ErasureCodingPolicy object. The corresponding RPC's wire format has also been modified.
+
+
+---
+
+* [HADOOP-14138](https://issues.apache.org/jira/browse/HADOOP-14138) | *Critical* | **Remove S3A ref from META-INF service discovery, rely on existing core-default entry**
+
+The classpath implementing the s3a filesystem is now defined in core-default.xml. Attempting to instantiate an S3A filesystem instance using a Configuration instance which has not included the default resorts will fail. Applications should not be doing this anyway, as it will lose other critical  configuration options needed by the filesystem.
+
+
+---
+
+* [HADOOP-6801](https://issues.apache.org/jira/browse/HADOOP-6801) | *Minor* | **io.sort.mb and io.sort.factor were renamed and moved to mapreduce but are still in CommonConfigurationKeysPublic.java and used in SequenceFile.java**
+
+Two new configuration keys, seq.io.sort.mb and seq.io.sort.factor have been introduced for the SequenceFile's Sorter feature to replace older, deprecated property keys of io.sort.mb and io.sort.factor.
+
+This only affects direct users of the org.apache.hadoop.io.SequenceFile.Sorter Java class. For controlling MR2's internal sorting instead, use the existing config keys of mapreduce.task.io.sort.mb and mapreduce.task.io.sort.factor.
+
+
+---
+
+* [HDFS-8112](https://issues.apache.org/jira/browse/HDFS-8112) | *Blocker* | **Relax permission checking for EC related operations**
+
+The HdfsAdmin erasure coding APIs (set, unset, get) are now usable by non-superusers based on appropriate file and directory permissions.
+
+
+---
+
+* [HDFS-11498](https://issues.apache.org/jira/browse/HDFS-11498) | *Major* | **Make RestCsrfPreventionHandler and WebHdfsHandler compatible with Netty 4.0**
+
+This JIRA sets the Netty 4 dependency to 4.0.23. This is an incompatible change for the 3.0 release line, as 3.0.0-alpha1 and 3.0.0-alpha2 depended on Netty 4.1.0.Beta5.
+
+
+---
+
+* [HDFS-11152](https://issues.apache.org/jira/browse/HDFS-11152) | *Blocker* | **Start erasure coding policy ID number from 1 instead of 0 to void potential unexpected errors**
+
+The NameNode metadata for storing erasure coding policies has changed.
+
+
+---
+
+* [HDFS-11314](https://issues.apache.org/jira/browse/HDFS-11314) | *Blocker* | **Enforce set of enabled EC policies on the NameNode**
+
+HDFS will now restrict the set of erasure coding policies that can be set by users. The set of allowed policies can be configured via "dfs.namenode.ec.policies.enabled" on the NameNode. Please see the documentation for more details.
+
+
+---
+
+* [HDFS-11499](https://issues.apache.org/jira/browse/HDFS-11499) | *Major* | **Decommissioning stuck because of failing recovery**
+
+Allow a block to complete if the number of replicas on live nodes, decommissioning nodes and nodes in maintenance mode satisfies minimum replication factor.
+The fix prevents block recovery failure if replica of last block is being decommissioned. Vice versa, the decommissioning will be stuck, waiting for the last block to be completed. In addition, file close() operation will not fail due to last block being decommissioned.
+
+
+---
+
+* [HDFS-11505](https://issues.apache.org/jira/browse/HDFS-11505) | *Major* | **Do not enable any erasure coding policies by default**
+
+By default, none of the built-in erasure coding policies are enabled. Users have to explicitly enable the erasure coding policy via the hdfs configuration 'dfs.namenode.ec.policies.enabled' before setting the policy on any directories.
+
+
+---
+
+* [HADOOP-14213](https://issues.apache.org/jira/browse/HADOOP-14213) | *Major* | **Move Configuration runtime check for hadoop-site.xml to initialization**
+
+Move the check for hadoop-site.xml to static initialization of the Configuration class.
+
+
+---
+
+* [HADOOP-10101](https://issues.apache.org/jira/browse/HADOOP-10101) | *Major* | **Update guava dependency to the latest version**
+
+Guava is updated to version 21.0. 
+
+In the background of merging this patch into trunk, there is a work, shaded Hadoop client artifacts and minicluster, on HADOOP-11804. hadoop-client has its own Guava which is shaded, so we can update dependency with minimum effect compare to previous HADOOP-11804. 
+
+See also HADOOP-14238 as related problem.
+
+
+---
+
+* [HADOOP-14038](https://issues.apache.org/jira/browse/HADOOP-14038) | *Minor* | **Rename ADLS credential properties**
+
+<!-- markdown --> 
+
+* Properties {{dfs.adls.*}} are renamed {{fs.adl.*}}
+* Property {{adl.dfs.enable.client.latency.tracker}} is renamed {{adl.enable.client.latency.tracker}}
+* Old properties are still supported
+
+
+---
+
+* [HADOOP-14267](https://issues.apache.org/jira/browse/HADOOP-14267) | *Major* | **Make DistCpOptions class immutable**
+
+DistCpOptions has been changed to be constructed with a Builder pattern. This potentially affects applications that invoke DistCp with the Java API.
+
+
+---
+
+* [HDFS-11596](https://issues.apache.org/jira/browse/HDFS-11596) | *Critical* | **hadoop-hdfs-client jar is in the wrong directory in release tarball**
+
+The scope of hadoop-hdfs's dependency on hadoop-hdfs-client has changed from "compile" to "provided". This may affect users who directly consume hadoop-hdfs, which is a private API. These users need to add a new dependency on hadoop-hdfs-client, or better yet, switch from hadoop-hdfs to hadoop-hdfs-client.
+
+
+---
+
+* [HADOOP-14202](https://issues.apache.org/jira/browse/HADOOP-14202) | *Major* | **fix jsvc/secure user var inconsistencies**
+
+<!-- markdown -->
+
+The secure user variables have been changed to be consistent with the rest of the environment variable changes:
+
+| Old | New |
+|:---- |:---- | 
+| HADOOP\_SECURE\_DN\_USER  | HDFS\_DATANODE\_SECURE\_USER |
+| HADOO\P_PRIVILEGED\_NFS\_USER | HDFS\_NFS3\_SECURE\_USER |
+
+
+---
+
+* [HADOOP-14174](https://issues.apache.org/jira/browse/HADOOP-14174) | *Major* | **Set default ADLS access token provider type to ClientCredential**
+
+Switch the default ADLS access token provider type from Custom to ClientCredential.
+
+
+---
+
+* [YARN-6298](https://issues.apache.org/jira/browse/YARN-6298) | *Blocker* | **Metric preemptCall is not used in new preemption**
+
+Metric preemptCall in FSOpDurations is no longer supported.
+
+
+---
+
+* [HADOOP-14285](https://issues.apache.org/jira/browse/HADOOP-14285) | *Major* | **Update minimum version of Maven from 3.0 to 3.3**
+
+Minimum version of Apache Maven has been updated from 3.0 to 3.3.
+
+
+---
+
+* [HADOOP-14225](https://issues.apache.org/jira/browse/HADOOP-14225) | *Minor* | **Remove xmlenc dependency**
+
+xmlenc dependency has been removed. If you rely on the transitive dependency, you need to set the dependency explicitly in your code after this change.
+
+
+---
+
+* [HADOOP-13665](https://issues.apache.org/jira/browse/HADOOP-13665) | *Blocker* | **Erasure Coding codec should support fallback coder**
+
+Use configuration properties io.erasurecode.codec.{rs-legacy,rs,xor}.rawcoders to control erasure coding codec. These properties support codec fallback in case the previous codec is not loaded.
+
+
+---
+
+* [HADOOP-14248](https://issues.apache.org/jira/browse/HADOOP-14248) | *Major* | **Retire SharedInstanceProfileCredentialsProvider in trunk.**
+
+SharedInstanceProfileCredentialsProvider is removed after this change. Users should use InstanceProfileCredentialsProvider provided by AWS SDK instead, which itself enforces a singleton instance to reduce calls to AWS EC2 Instance Metadata Service.
+
+
+---
+
+* [HDFS-11565](https://issues.apache.org/jira/browse/HDFS-11565) | *Blocker* | **Use compact identifiers for built-in ECPolicies in HdfsFileStatus**
+
+Some of the existing fields in ErasureCodingPolicyProto have changed from required to optional. For system EC policies, these fields are populated from hardcoded values.
+
+
+---
+
+* [HADOOP-11794](https://issues.apache.org/jira/browse/HADOOP-11794) | *Major* | **Enable distcp to copy blocks in parallel**
+
+If  a positive value is passed to command line switch -blocksperchunk, files with more blocks than this value will be split into chunks of \`\<blocksperchunk\>\` blocks to be transferred in parallel, and reassembled on the destination. By default, \`\<blocksperchunk\>\` is 0 and the files will be transmitted in their entirety without splitting. This switch is only applicable when both the source file system supports getBlockLocations and target supports concat.
+
+
+---
+
+* [YARN-3427](https://issues.apache.org/jira/browse/YARN-3427) | *Blocker* | **Remove deprecated methods from ResourceCalculatorProcessTree**
+
+The deprecated ProcessTree methods getCumulativeVmem
+ and getCumulativeRssmem have been removed.
+
+
+---
+
+* [HDFS-11402](https://issues.apache.org/jira/browse/HDFS-11402) | *Major* | **HDFS Snapshots should capture point-in-time copies of OPEN files**
+
+When the config param "dfs.namenode.snapshot.capture.openfiles" is enabled, HDFS snapshots taken will additionally capture point-in-time copies of the open files that have valid leases. Even when the current version open files grow or shrink in size, the snapshot will always retain the immutable versions of these open files, just as in for all other closed files. Note: The file length captured for open files in the snapshot was the one recorded in NameNode at the time of snapshot and it may be shorter than what the client has written till then. In order to capture the latest length, the client can call hflush/hsync with the flag SyncFlag.UPDATE\_LENGTH on the open files handles.
+
+
+---
+
+* [HDFS-6708](https://issues.apache.org/jira/browse/HDFS-6708) | *Major* | **StorageType should be encoded in the block token**
+
+StorageTypes are now encoded in the BlockTokenIdentifier to ensure that the intended StorageType for writes is not tampered with on it's way through the Client to the Datanode.
+
+
+---
+
+* [HADOOP-10105](https://issues.apache.org/jira/browse/HADOOP-10105) | *Blocker* | **remove httpclient dependency**
+
+Apache Httpclient has been removed as a dependency. This library is End of Life: people using it should move to its {{httpcore}} successor. If you cannot do that, you must add an explicit dependency on {{httpclient}} in your classpath.
+
+
+---
+
+* [HADOOP-13200](https://issues.apache.org/jira/browse/HADOOP-13200) | *Blocker* | **Implement customizable and configurable erasure coders**
+
+CodecRegistry uses ServiceLoader to dynamically load all implementations of RawErasureCoderFactory. In Hadoop 3.0, there are several built-in implementations, and user can also provide self-defined implementations with the corresponding resource files. 
+For each codec, user can configure the order of the implementations with the configuration keys:
+\`io.erasurecode.codec.rs.rawcoders\` for the default RS codec,
+\`io.erasurecode.codec.rs-legacy.rawcoders\` for the legacy RS codec,
+\`io.erasurecode.codec.xor.rawcoders\` for the XOR codec.
+User can also configure self-defined codec with the configuration key like:
+\`io.erasurecode.codec.self-defined.rawcoders\`.
+For each codec, Hadoop will use the implementation according to the order configured. If the former implementation fails, it will fall back to call the latter one. The order is defined by a list of coder names separated by commas. The names for the built-in implementations are:
+\`rs\_native\` and \`rs\_java\` for the default RS codec, of which  the former is a native implementation which leverages Intel ISA-L library, which is the default implementation and the latter is the implementation in pure Java,
+\`rs-legacy\_java\` for the legacy RS codec, which is the default implementation in pure Java,
+\`xor\_native\` and \`xor\_java\` for the XOR codec, of which the former is the Intel ISA-L implementation which is the default one and the latter in pure Java.
+
+
+---
+
+* [YARN-2962](https://issues.apache.org/jira/browse/YARN-2962) | *Critical* | **ZKRMStateStore: Limit the number of znodes under a znode**
+
+**WARNING: No release note provided for this change.**
+
+
+---
+
+* [HADOOP-14386](https://issues.apache.org/jira/browse/HADOOP-14386) | *Blocker* | **Rewind trunk from Guava 21.0 back to Guava 11.0.2**
+
+YARN application tags can no longer contain non-printable ASCII characters.
+
+
+---
+
+* [HADOOP-14401](https://issues.apache.org/jira/browse/HADOOP-14401) | *Major* | **maven-project-info-reports-plugin can be removed**
+
+hadoop-auth and hadoop-hdfs-httpfs modules no longer generate dependencies.html via maven-project-info-reports-plugin.
+
+
+---
+
+* [HADOOP-14375](https://issues.apache.org/jira/browse/HADOOP-14375) | *Minor* | **Remove tomcat support from hadoop-functions.sh**
+
+This change removes the support in the shell scripts for Tomcat that was added in 3.0.0-alpha1.
+
+
+---
+
+* [HADOOP-14419](https://issues.apache.org/jira/browse/HADOOP-14419) | *Minor* | **Remove findbugs report from docs profile**
+
+Findbugs report is no longer part of the documentation.
+
+
+---
+
+* [HDFS-11661](https://issues.apache.org/jira/browse/HDFS-11661) | *Blocker* | **GetContentSummary uses excessive amounts of memory**
+
+Reverted HDFS-10797 to fix a scalability regression brought by the commit.
+
+
+---
+
+* [HADOOP-14426](https://issues.apache.org/jira/browse/HADOOP-14426) | *Blocker* | **Upgrade Kerby version from 1.0.0-RC2 to 1.0.0**
+
+**WARNING: No release note provided for this change.**
+
+
+---
+
+* [HADOOP-14407](https://issues.apache.org/jira/browse/HADOOP-14407) | *Major* | **DistCp - Introduce a configurable copy buffer size**
+
+The copy buffer size can be configured via the new parameter \<copybuffersize\>. By default the \<copybuffersize\> is set to 8KB.
+
+
+---
+
+* [HADOOP-13921](https://issues.apache.org/jira/browse/HADOOP-13921) | *Critical* | **Remove Log4j classes from JobConf**
+
+Changes the type of JobConf.DEFAULT\_LOG\_LEVEL from a Log4J Level to a String. Clients that referenced this field will need to be recompiled and may need to alter their source to account for the type change. The level itself remains conceptually at "INFO".
+
+
+---
+
+* [HADOOP-8143](https://issues.apache.org/jira/browse/HADOOP-8143) | *Minor* | **Change distcp to have -pb on by default**
+
+If -p option of distcp command is unspecified, block size is preserved.
+
+
+---
+
+* [HADOOP-14502](https://issues.apache.org/jira/browse/HADOOP-14502) | *Minor* | **Confusion/name conflict between NameNodeActivity#BlockReportNumOps and RpcDetailedActivity#BlockReportNumOps**
+
+**WARNING: No release note provided for this change.**
+
+
+---
+
+* [HDFS-11067](https://issues.apache.org/jira/browse/HDFS-11067) | *Major* | **DFS#listStatusIterator(..) should throw FileNotFoundException if the directory deleted before fetching next batch of entries**
+
+DistributedFileSystem#listStatusIterator(..) throws FileNotFoundException if directory got deleted during iterating over large list beyond ls limit.
+
+
+---
+
+* [HDFS-11956](https://issues.apache.org/jira/browse/HDFS-11956) | *Blocker* | **Do not require a storage ID or target storage IDs when writing a block**
+
+Hadoop 2.x clients do not pass the storage ID or target storage IDs when writing a block. For backwards compatibility, the DataNode will not require the presence of these fields. This means older clients are unable to write to a particular storage as chosen by the NameNode (e.g. HDFS-9806).
+
+
+---
+
+* [HADOOP-14536](https://issues.apache.org/jira/browse/HADOOP-14536) | *Major* | **Update azure-storage sdk to version 5.3.0**
+
+The WASB FileSystem now uses version 5.3.0 of the Azure Storage SDK.
+
+
+---
+
+* [HADOOP-14546](https://issues.apache.org/jira/browse/HADOOP-14546) | *Major* | **Azure: Concurrent I/O does not work when secure.mode is enabled**
+
+Fix to wasb:// (Azure) file system that allows the concurrent I/O feature to be used with the secure mode feature.
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f10864a8/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha4.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha4.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha4.xml
new file mode 100644
index 0000000..286b5fe
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha4.xml
@@ -0,0 +1,322 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Fri Jun 30 01:55:19 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop HDFS 3.0.0-alpha4"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.0.0-alpha4.jar:/usr/lib/jvm/java-8-oracle/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.0.0-alpha4.jar:/maven/org/slf4j/slf4j-api/1.7.25/slf4j-api-1.7.25.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/zookeeper/zookeeper/3.4.9/zookeep
 er-3.4.9.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/apache/curator/curator-framework/2.12.0/curator-framework-2.12.0.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.0/kerb-simplekdc-1.0.0.jar:/maven/org/apache/kerby/kerb-client/1.0.0/kerb-client-1.0.0.jar:/maven/org/apache/kerby/kerby-config/1.0.0/kerby-config-1.0.0.jar:/maven/org/apache/kerby/kerb-core/1.0.0/kerb-core-1.0.0.jar:/maven/org/apache/kerby/kerby-pkix/1.0.0/kerby-pkix-1.0.0.jar:/maven/org/apache/kerby/kerby-asn1/1.0.0/kerby-asn1-1.0.0.jar:/maven/org/apache/kerby/kerby-util/1.0.0/kerby-util-1.0.0.jar:/maven/org/apache/kerby/kerb-common/1.0.0/kerb-common-1.0.0.jar:/maven/org/apache/kerby/kerb-crypto/1.0.0/kerb-crypto-1.0.0.jar:/maven/org/apache/kerby/kerb-util/1.0.0/kerb-util-1.0.0.jar:/maven/org/apache/kerby/kerb-admin/1.0.0/kerb-admin-1.0.0.jar:/maven/org/apache/kerby/kerb-server/1.0.0/kerb-server-1.0.0.jar:/maven/org/apache/kerby/kerb-identity/1.0.0/kerb-identity-1.0.0.jar:/maven/org/apache/kerby/kerby-xdr
 /1.0.0/kerby-xdr-1.0.0.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.0.0-alpha4.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/eclipse/jetty/jetty-servlet/9.3.11.v20160721/jetty-servlet-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-security/9.3.11.v20160721/jetty-security-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-webapp/9.3.11.v20160721/jetty-webapp-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-xml/9.3.11.v20160721/jetty-xml-9.3.11.v20160721.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml
 /bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/commons-beanutils/commons-beanutils/1.9.3/commons-beanutils-1.9.3.jar:/maven/org/apache/commons/commons-configuration2/2.1/commons-configuration2-2.1.jar:/maven/org/apache/commons/commons-lang3/3.3.2/commons-lang3-3.3.2.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/maven/com/google/re2j/re2j/1.0/re2j-1.0.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.12.0/curator-client-2.12.0.jar:/maven/org/apac
 he/curator/curator-recipes/2.12.0/curator-recipes-2.12.0.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/org/codehaus/woodstox/stax2-api/3.1.4/stax2-api-3.1.4.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.0.3/woodstox-core-5.0.3.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-3.0.0-alpha4.jar:/maven/com/squareup/okhttp/okhttp/2.4.0/okhttp-2.4.0.jar:/maven/com/squareup/okio/okio/1.4.0/okio-1.4.0.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.7.8/jackson-annotations-2.7.8.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/org/eclipse/jetty/jetty-server/9.3.11.v20160721/jetty-server-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-http/9.3.11.v20160721/jetty-http-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-io/9.3.11.v20160721/jetty-io-9.3.11.v20160721.jar:/maven/org/eclipse/
 jetty/jetty-util/9.3.11.v20160721/jetty-util-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.3.11.v20160721/jetty-util-ajax-9.3.11.v20160721.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/org/slf4j/slf4j-log4j12/1.7.25/slf4j-log4j12-1.7.25.jar:/maven/io/netty/netty/3.10.5.Final/net
 ty-3.10.5.Final.jar:/maven/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/maven/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/maven/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/maven/org/apache/htrace/htrace-core4/4.1.0-incubating/htrace-core4-4.1.0-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.7.8/jackson-databind-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.7.8/jackson-core-2.7.8.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 3.0.0-alpha4 -->
+<package name="org.apache.hadoop.hdfs">
+  <doc>
+  <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files 
+have strictly one writer at any one time.  Bytes are always appended 
+to the end of the writer's stream.  There is no notion of "record appends"
+or "mutations" that are then checked or reordered.  Writers simply emit 
+a byte stream.  That byte stream is guaranteed to be stored in the 
+order written.</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.net">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
+</package>
+<package name="org.apache.hadoop.hdfs.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.client">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.server">
+  <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+  <interface name="JournalNodeMXBean"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getJournalsStatus" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
+ 
+ @return A string presenting status for each journal]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the JMX management interface for JournalNode information]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.block">
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.blockmanagement">
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.command">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.connectors">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.datamodel">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.planner">
+</package>
+<package name="org.apache.hadoop.hdfs.server.mover">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+  <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <interface name="AuditLogger"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="initialize"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Called during initialization of the logger.
+
+ @param conf The configuration object.]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <doc>
+      <![CDATA[Called to log an audit event.
+ <p>
+ This method must return as quickly as possible, since it's called
+ in a critical section of the NameNode's operation.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's
+             metadata (permissions, owner, times, etc).]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface defining an audit logger.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <class name="HdfsAuditLogger" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
+    <constructor name="HdfsAuditLogger"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+    </method>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
+ FileStatus)} with additional parameters related to logging delegation token
+ tracking IDs.
+ 
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's metadata
+          (permissions, owner, times, etc).
+ @param callerContext Context information of the caller
+ @param ugi UserGroupInformation of the current user, or null if not logging
+          token tracking information
+ @param dtSecretManager The token secret manager, or null if not logging
+          token tracking information]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String,
+ String, FileStatus, CallerContext, UserGroupInformation,
+ DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Extension of {@link AuditLogger}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+  <class name="INodeAttributeProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="INodeAttributeProvider"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="start"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initialize the provider. This method is called at NameNode startup
+ time.]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
+      </doc>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fullPath" type="java.lang.String"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathElements" type="java.lang.String[]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="components" type="byte[][]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
+      <doc>
+      <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
+ Enforcer that can provide an alternate implementation of the
+ default permission checking logic.
+ @param defaultEnforcer The Default AccessControlEnforcer
+ @return The AccessControlEnforcer to use]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.ha">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
+</package>
+<package name="org.apache.hadoop.hdfs.server.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.util">
+</package>
+<package name="org.apache.hadoop.hdfs.web">
+</package>
+<package name="org.apache.hadoop.hdfs.web.resources">
+</package>
+
+</api>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HADOOP-14617. Add ReflectionUtils.logThreadInfo that accept slf4j logger API. Contributed by Wenxin He.

Posted by as...@apache.org.
HADOOP-14617. Add ReflectionUtils.logThreadInfo that accept slf4j logger API.
Contributed by Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b17e655b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b17e655b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b17e655b

Branch: refs/heads/YARN-5972
Commit: b17e655b7069a6e9dab28d9f5fc34bc95a27e5d5
Parents: f2aba1d
Author: Steve Loughran <st...@apache.org>
Authored: Tue Jul 4 10:52:59 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Tue Jul 4 11:41:07 2017 +0100

----------------------------------------------------------------------
 .../org/apache/hadoop/util/ReflectionUtils.java | 30 ++++++++++++++++++++
 .../apache/hadoop/util/TestReflectionUtils.java | 19 ++++++++++++-
 2 files changed, 48 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17e655b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
index da14979..f1294e7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.serializer.Deserializer;
 import org.apache.hadoop.io.serializer.SerializationFactory;
 import org.apache.hadoop.io.serializer.Serializer;
+import org.slf4j.Logger;
 
 /**
  * General reflection utils
@@ -229,6 +230,35 @@ public class ReflectionUtils {
   }
 
   /**
+   * Log the current thread stacks at INFO level.
+   * @param log the logger that logs the stack trace
+   * @param title a descriptive title for the call stacks
+   * @param minInterval the minimum time from the last
+   */
+  public static void logThreadInfo(Logger log,
+                                   String title,
+                                   long minInterval) {
+    boolean dumpStack = false;
+    if (log.isInfoEnabled()) {
+      synchronized (ReflectionUtils.class) {
+        long now = Time.now();
+        if (now - previousLogTime >= minInterval * 1000) {
+          previousLogTime = now;
+          dumpStack = true;
+        }
+      }
+      if (dumpStack) {
+        try {
+          ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+          printThreadInfo(new PrintStream(buffer, false, "UTF-8"), title);
+          log.info(buffer.toString(Charset.defaultCharset().name()));
+        } catch (UnsupportedEncodingException ignored) {
+        }
+      }
+    }
+  }
+
+  /**
    * Return the correctly-typed {@link Class} of the given object.
    *  
    * @param o object whose correctly-typed <code>Class</code> is to be obtained

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17e655b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
index 56e86ef..62cd625 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
@@ -25,9 +25,14 @@ import java.net.URLClassLoader;
 import java.util.HashMap;
 import java.util.List;
 
+import static org.hamcrest.CoreMatchers.containsString;
 import static org.junit.Assert.*;
+
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestReflectionUtils {
 
@@ -150,7 +155,19 @@ public class TestReflectionUtils {
     assertTrue("Missing parent method", containsParentMethod);
     assertTrue("Missing child method", containsChildMethod);
   }
-  
+
+  @Test
+  public void testLogThreadInfo() throws Exception {
+    Logger logger = LoggerFactory.getLogger(TestReflectionUtils.class);
+    LogCapturer logCapturer = LogCapturer.captureLogs(logger);
+
+    final String title = "title";
+    ReflectionUtils.logThreadInfo(logger, title, 0L);
+
+    assertThat(logCapturer.getOutput(),
+        containsString("Process Thread Dump: " + title));
+  }
+
   // Used for testGetDeclaredFieldsIncludingInherited
   private class Parent {
     private int parentField;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: YARN-6428. Queue AM limit is not honored in CS always. Contributed by Bibin A Chundatt.

Posted by as...@apache.org.
YARN-6428. Queue AM limit is not honored in CS always. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/626d730b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/626d730b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/626d730b

Branch: refs/heads/YARN-5972
Commit: 626d730bfc4c1f6ea54e8a246a0a676cdb74937f
Parents: 9d27880
Author: Naganarasimha <na...@apache.org>
Authored: Sun Jul 9 17:26:09 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Sun Jul 9 17:26:09 2017 +0530

----------------------------------------------------------------------
 .../resource/DominantResourceCalculator.java    | 22 ++++-------
 .../capacity/TestCapacityScheduler.java         | 40 ++++++++++++++++++++
 2 files changed, 47 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/626d730b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index ea9b927..7697e1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -218,27 +218,19 @@ public class DominantResourceCalculator extends ResourceCalculator {
   public Resource multiplyAndNormalizeUp(Resource r, double by,
       Resource stepFactor) {
     return Resources.createResource(
-        roundUp(
-            (int)Math.ceil(r.getMemorySize() * by), stepFactor.getMemorySize()),
-        roundUp(
-            (int)Math.ceil(r.getVirtualCores() * by),
-            stepFactor.getVirtualCores())
-        );
+        roundUp((long) Math.ceil((float) (r.getMemorySize() * by)),
+            stepFactor.getMemorySize()),
+        roundUp((int) Math.ceil((float) (r.getVirtualCores() * by)),
+            stepFactor.getVirtualCores()));
   }
 
   @Override
   public Resource multiplyAndNormalizeDown(Resource r, double by,
       Resource stepFactor) {
     return Resources.createResource(
-        roundDown(
-            (int)(r.getMemorySize() * by),
-            stepFactor.getMemorySize()
-            ),
-        roundDown(
-            (int)(r.getVirtualCores() * by),
-            stepFactor.getVirtualCores()
-            )
-        );
+        roundDown((long) (r.getMemorySize() * by), stepFactor.getMemorySize()),
+        roundDown((int) (r.getVirtualCores() * by),
+            stepFactor.getVirtualCores()));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/626d730b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 3c6e6df..ba0f906 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -4417,4 +4417,44 @@ public class TestCapacityScheduler {
     Assert.assertEquals(b1.getState(), QueueState.RUNNING);
     Assert.assertTrue(!b1.getChildQueues().isEmpty());
   }
+
+  @Test(timeout = 30000)
+  public void testAMLimitDouble() throws Exception {
+    CapacitySchedulerConfiguration config =
+        new CapacitySchedulerConfiguration();
+    config.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
+        DominantResourceCalculator.class.getName());
+    CapacitySchedulerConfiguration conf =
+        new CapacitySchedulerConfiguration(config);
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    conf.setInt("yarn.scheduler.minimum-allocation-mb", 512);
+    conf.setInt("yarn.scheduler.minimum-allocation-vcores", 1);
+    MockRM rm = new MockRM(conf);
+    rm.start();
+    rm.registerNode("127.0.0.1:1234", 10 * GB);
+    rm.registerNode("127.0.0.1:1235", 10 * GB);
+    rm.registerNode("127.0.0.1:1236", 10 * GB);
+    rm.registerNode("127.0.0.1:1237", 10 * GB);
+    ResourceScheduler scheduler = rm.getRMContext().getScheduler();
+    waitforNMRegistered(scheduler, 4, 5);
+    LeafQueue queueA =
+        (LeafQueue) ((CapacityScheduler) scheduler).getQueue("default");
+    Resource amResourceLimit = queueA.getAMResourceLimit();
+    Assert.assertEquals(4096, amResourceLimit.getMemorySize());
+    Assert.assertEquals(4, amResourceLimit.getVirtualCores());
+    rm.stop();
+  }
+
+  private void waitforNMRegistered(ResourceScheduler scheduler, int nodecount,
+      int timesec) throws InterruptedException {
+    long start = System.currentTimeMillis();
+    while (System.currentTimeMillis() - start < timesec * 1000) {
+      if (scheduler.getNumClusterNodes() < nodecount) {
+        Thread.sleep(100);
+      } else {
+        break;
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: HDFS-12114. Consistent HttpFS property names. Contributed by John Zhuge.

Posted by as...@apache.org.
HDFS-12114. Consistent HttpFS property names. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac0a04a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac0a04a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac0a04a6

Branch: refs/heads/YARN-5972
Commit: ac0a04a6e165920a6d43c2aa3dab06ca38f3135b
Parents: 9144fd9
Author: John Zhuge <jz...@apache.org>
Authored: Mon Jul 10 17:22:00 2017 -0700
Committer: John Zhuge <jz...@apache.org>
Committed: Tue Jul 11 21:06:41 2017 -0700

----------------------------------------------------------------------
 .../fs/http/server/HttpFSServerWebServer.java      | 13 +++++++------
 .../main/libexec/shellprofile.d/hadoop-httpfs.sh   | 17 -----------------
 .../src/main/resources/httpfs-default.xml          |  8 ++++----
 .../src/site/markdown/ServerSetup.md.vm            | 11 ++++++-----
 .../fs/http/server/TestHttpFSServerWebServer.java  |  2 +-
 5 files changed, 18 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac0a04a6/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java
index d8706c5..0949f86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java
@@ -46,17 +46,16 @@ public class HttpFSServerWebServer {
   private static final String HTTPFS_SITE_XML = "httpfs-site.xml";
 
   // HTTP properties
-  static final String HTTP_PORT_KEY = "hadoop.httpfs.http.port";
+  static final String HTTP_PORT_KEY = "httpfs.http.port";
   private static final int HTTP_PORT_DEFAULT = 14000;
-  static final String HTTP_HOST_KEY = "hadoop.httpfs.http.host";
-  private static final String HTTP_HOST_DEFAULT = "0.0.0.0";
+  static final String HTTP_HOSTNAME_KEY = "httpfs.http.hostname";
+  private static final String HTTP_HOSTNAME_DEFAULT = "0.0.0.0";
 
   // SSL properties
   static final String SSL_ENABLED_KEY = "httpfs.ssl.enabled";
   private static final boolean SSL_ENABLED_DEFAULT = false;
 
-  private static final String HTTP_ADMINS_KEY =
-      "hadoop.httpfs.http.administrators";
+  private static final String HTTP_ADMINS_KEY = "httpfs.http.administrators";
 
   private static final String NAME = "webhdfs";
   private static final String SERVLET_PATH = "/webhdfs";
@@ -74,6 +73,8 @@ public class HttpFSServerWebServer {
     // Override configuration with deprecated environment variables.
     deprecateEnv("HTTPFS_TEMP", conf, HttpServer2.HTTP_TEMP_DIR_KEY,
         HTTPFS_SITE_XML);
+    deprecateEnv("HTTPFS_HTTP_HOSTNAME", conf, HTTP_HOSTNAME_KEY,
+        HTTPFS_SITE_XML);
     deprecateEnv("HTTPFS_HTTP_PORT", conf, HTTP_PORT_KEY,
         HTTPFS_SITE_XML);
     deprecateEnv("HTTPFS_MAX_THREADS", conf,
@@ -95,7 +96,7 @@ public class HttpFSServerWebServer {
         SSL_ENABLED_DEFAULT);
     scheme = sslEnabled ? HttpServer2.HTTPS_SCHEME : HttpServer2.HTTP_SCHEME;
 
-    String host = conf.get(HTTP_HOST_KEY, HTTP_HOST_DEFAULT);
+    String host = conf.get(HTTP_HOSTNAME_KEY, HTTP_HOSTNAME_DEFAULT);
     int port = conf.getInt(HTTP_PORT_KEY, HTTP_PORT_DEFAULT);
     URI endpoint = new URI(scheme, null, host, port, null, null, null);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac0a04a6/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh
index a3bbe56..239409c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh
@@ -30,17 +30,6 @@ function hdfs_subcommand_httpfs
     . "${HADOOP_CONF_DIR}/httpfs-env.sh"
   fi
 
-  hadoop_deprecate_envvar HTTPFS_CONFIG HADOOP_CONF_DIR
-  hadoop_deprecate_envvar HTTPFS_LOG HADOOP_LOG_DIR
-
-  hadoop_using_envvar HTTPFS_HTTP_HOSTNAME
-  hadoop_using_envvar HTTPFS_HTTP_PORT
-  hadoop_using_envvar HTTPFS_MAX_HTTP_HEADER_SIZE
-  hadoop_using_envvar HTTPFS_MAX_THREADS
-  hadoop_using_envvar HTTPFS_SSL_ENABLED
-  hadoop_using_envvar HTTPFS_SSL_KEYSTORE_FILE
-  hadoop_using_envvar HTTPFS_TEMP
-
   # shellcheck disable=SC2034
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION=true
   # shellcheck disable=SC2034
@@ -53,12 +42,6 @@ function hdfs_subcommand_httpfs
     "-Dhttpfs.config.dir=${HTTPFS_CONFIG:-${HADOOP_CONF_DIR}}"
   hadoop_add_param HADOOP_OPTS "-Dhttpfs.log.dir" \
     "-Dhttpfs.log.dir=${HTTPFS_LOG:-${HADOOP_LOG_DIR}}"
-  hadoop_add_param HADOOP_OPTS "-Dhttpfs.http.hostname" \
-    "-Dhttpfs.http.hostname=${HTTPFS_HOST_NAME:-$(hostname -f)}"
-  if [[ -n "${HTTPFS_SSL_ENABLED}" ]]; then
-    hadoop_add_param HADOOP_OPTS "-Dhttpfs.ssl.enabled" \
-      "-Dhttpfs.ssl.enabled=${HTTPFS_SSL_ENABLED}"
-  fi
 
   if [[ "${HADOOP_DAEMON_MODE}" == "default" ]] ||
      [[ "${HADOOP_DAEMON_MODE}" == "start" ]]; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac0a04a6/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
index 490d778..e420456 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
@@ -16,7 +16,7 @@
 <configuration>
 
   <property>
-    <name>hadoop.httpfs.http.port</name>
+    <name>httpfs.http.port</name>
     <value>14000</value>
     <description>
       The HTTP port for HttpFS REST API.
@@ -24,7 +24,7 @@
   </property>
 
   <property>
-    <name>hadoop.httpfs.http.host</name>
+    <name>httpfs.http.hostname</name>
     <value>0.0.0.0</value>
     <description>
       The bind host for HttpFS REST API.
@@ -32,7 +32,7 @@
   </property>
 
   <property>
-    <name>hadoop.httpfs.http.administrators</name>
+    <name>httpfs.http.administrators</name>
     <value></value>
     <description>ACL for the admins, this configuration is used to control
       who can access the default servlets for HttpFS server. The value
@@ -46,7 +46,7 @@
   </property>
 
   <property>
-    <name>hadoop.httpfs.ssl.enabled</name>
+    <name>httpfs.ssl.enabled</name>
     <value>false</value>
     <description>
       Whether SSL is enabled. Default is false, i.e. disabled.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac0a04a6/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
index 4db5d96..91ef90e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
@@ -82,7 +82,7 @@ Enable SSL in `etc/hadoop/httpfs-site.xml`:
 
 ```xml
   <property>
-    <name>hadoop.httpfs.ssl.enabled</name>
+    <name>httpfs.ssl.enabled</name>
     <value>true</value>
     <description>
       Whether SSL is enabled. Default is false, i.e. disabled.
@@ -142,13 +142,14 @@ configuration properties instead.
 
 Environment Variable        | Configuration Property       | Configuration File
 ----------------------------|------------------------------|--------------------
-HTTPFS_TEMP                 | hadoop.http.temp.dir         | httpfs-site.xml
-HTTPFS_HTTP_PORT            | hadoop.httpfs.http.port      | httpfs-site.xml
+HTTPFS_HTTP_HOSTNAME        | httpfs.http.hostname         | httpfs-site.xml
+HTTPFS_HTTP_PORT            | httpfs.http.port             | httpfs-site.xml
 HTTPFS_MAX_HTTP_HEADER_SIZE | hadoop.http.max.request.header.size and hadoop.http.max.response.header.size | httpfs-site.xml
 HTTPFS_MAX_THREADS          | hadoop.http.max.threads      | httpfs-site.xml
-HTTPFS_SSL_ENABLED          | hadoop.httpfs.ssl.enabled    | httpfs-site.xml
+HTTPFS_SSL_ENABLED          | httpfs.ssl.enabled           | httpfs-site.xml
 HTTPFS_SSL_KEYSTORE_FILE    | ssl.server.keystore.location | ssl-server.xml
 HTTPFS_SSL_KEYSTORE_PASS    | ssl.server.keystore.password | ssl-server.xml
+HTTPFS_TEMP                 | hadoop.http.temp.dir         | httpfs-site.xml
 
 HTTP Default Services
 ---------------------
@@ -182,7 +183,7 @@ and `/stacks`, configure the following properties in `httpfs-site.xml`:
   </property>
 
   <property>
-    <name>hadoop.httpfs.http.administrators</name>
+    <name>httpfs.http.administrators</name>
     <value></value>
     <description>ACL for the admins, this configuration is used to control
       who can access the default servlets for HttpFS server. The value

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac0a04a6/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java
index 841c4dc..ddaeedb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java
@@ -63,7 +63,7 @@ public class TestHttpFSServerWebServer {
   @Before
   public void setUp() throws Exception {
     Configuration conf = new Configuration();
-    conf.set(HttpFSServerWebServer.HTTP_HOST_KEY, "localhost");
+    conf.set(HttpFSServerWebServer.HTTP_HOSTNAME_KEY, "localhost");
     conf.setInt(HttpFSServerWebServer.HTTP_PORT_KEY, 0);
     Configuration sslConf = new Configuration();
     webServer = new HttpFSServerWebServer(conf, sslConf);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: YARN-6714. IllegalStateException while handling APP_ATTEMPT_REMOVED event when async-scheduling enabled in CapacityScheduler. Contributed by Tao Yang.

Posted by as...@apache.org.
YARN-6714. IllegalStateException while handling APP_ATTEMPT_REMOVED event when async-scheduling enabled in CapacityScheduler. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34f113df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34f113df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34f113df

Branch: refs/heads/YARN-5972
Commit: 34f113df5cff2cc330fb671296932b8227b11975
Parents: fce7951
Author: Sunil G <su...@apache.org>
Authored: Tue Jul 11 14:52:44 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue Jul 11 14:52:44 2017 +0530

----------------------------------------------------------------------
 .../scheduler/capacity/CapacityScheduler.java   |   5 +-
 .../TestCapacitySchedulerAsyncScheduling.java   | 149 +++++++++++++++++++
 2 files changed, 153 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34f113df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index d3186da..0d72860 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2392,7 +2392,10 @@ public class CapacityScheduler extends
 
     if (attemptId != null) {
       FiCaSchedulerApp app = getApplicationAttempt(attemptId);
-      if (app != null) {
+      // Required sanity check for attemptId - when async-scheduling enabled,
+      // proposal might be outdated if AM failover just finished
+      // and proposal queue was not be consumed in time
+      if (app != null && attemptId.equals(app.getApplicationAttemptId())) {
         if (app.accept(cluster, request)) {
           app.apply(cluster, request);
           LOG.info("Allocation proposal accepted");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34f113df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
index 9854a15..0eb89d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
@@ -18,6 +18,12 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -25,12 +31,29 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
+import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
+import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 
 public class TestCapacitySchedulerAsyncScheduling {
@@ -140,4 +163,130 @@ public class TestCapacitySchedulerAsyncScheduling {
 
     rm.close();
   }
+
+  // Testcase for YARN-6714
+  @Test (timeout = 30000)
+  public void testCommitProposalForFailedAppAttempt()
+      throws Exception {
+    // disable async-scheduling for simulating complex since scene
+    Configuration disableAsyncConf = new Configuration(conf);
+    disableAsyncConf.setBoolean(
+        CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, false);
+
+    // init RM & NMs & Nodes
+    final MockRM rm = new MockRM(disableAsyncConf);
+    rm.start();
+    final MockNM nm1 = rm.registerNode("h1:1234", 9 * GB);
+    final MockNM nm2 = rm.registerNode("h2:2234", 9 * GB);
+    List<MockNM> nmLst = new ArrayList<>();
+    nmLst.add(nm1);
+    nmLst.add(nm2);
+
+    // init scheduler & nodes
+    while (
+        ((CapacityScheduler) rm.getRMContext().getScheduler()).getNodeTracker()
+            .nodeCount() < 2) {
+      Thread.sleep(10);
+    }
+    Assert.assertEquals(2,
+        ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
+            .getNodeTracker().nodeCount());
+    CapacityScheduler scheduler =
+        (CapacityScheduler) rm.getRMContext().getScheduler();
+    SchedulerNode sn1 = scheduler.getSchedulerNode(nm1.getNodeId());
+    SchedulerNode sn2 = scheduler.getSchedulerNode(nm2.getNodeId());
+
+    // launch app
+    RMApp app = rm.submitApp(200, "app", "user", null, false, "default",
+        YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS, null, null, true, true);
+    MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1);
+    FiCaSchedulerApp schedulerApp =
+        scheduler.getApplicationAttempt(am.getApplicationAttemptId());
+
+    // allocate and launch 1 containers and running on nm2
+    allocateAndLaunchContainers(am, nm2, rm, 1,
+        Resources.createResource(5 * GB), 0, 2);
+
+    // nm1 runs 1 container(app1-container_01/AM)
+    // nm2 runs 1 container(app1-container_02)
+    Assert.assertEquals(1, sn1.getNumContainers());
+    Assert.assertEquals(1, sn2.getNumContainers());
+
+    // kill app attempt1
+    scheduler.handle(
+        new AppAttemptRemovedSchedulerEvent(am.getApplicationAttemptId(),
+            RMAppAttemptState.KILLED, true));
+    // wait until app attempt1 removed on nm1
+    while (sn1.getCopiedListOfRunningContainers().size() == 1) {
+      Thread.sleep(100);
+    }
+    // wait until app attempt2 launched on nm1
+    while (sn1.getCopiedListOfRunningContainers().size() == 0) {
+      nm1.nodeHeartbeat(true);
+      Thread.sleep(100);
+    }
+
+    // generate reserved proposal of stopped app attempt
+    // and it could be committed for async-scheduling
+    // this kind of proposal should be skipped
+    Resource reservedResource = Resources.createResource(5 * GB);
+    Container container = Container.newInstance(
+        ContainerId.newContainerId(am.getApplicationAttemptId(), 3),
+        sn2.getNodeID(), sn2.getHttpAddress(), reservedResource,
+        Priority.newInstance(0), null);
+    RMContainer rmContainer = new RMContainerImpl(container, SchedulerRequestKey
+        .create(ResourceRequest
+            .newInstance(Priority.newInstance(0), "*", reservedResource, 1)),
+        am.getApplicationAttemptId(), sn2.getNodeID(), "user",
+        rm.getRMContext());
+    SchedulerContainer reservedContainer =
+        new SchedulerContainer(schedulerApp, scheduler.getNode(sn2.getNodeID()),
+            rmContainer, "", false);
+    ContainerAllocationProposal reservedForAttempt1Proposal =
+        new ContainerAllocationProposal(reservedContainer, null,
+            reservedContainer, NodeType.OFF_SWITCH, NodeType.OFF_SWITCH,
+            SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY, reservedResource);
+    List<ContainerAllocationProposal> reservedProposals = new ArrayList<>();
+    reservedProposals.add(reservedForAttempt1Proposal);
+    ResourceCommitRequest request =
+        new ResourceCommitRequest(null, reservedProposals, null);
+    scheduler.tryCommit(scheduler.getClusterResource(), request);
+    Assert.assertNull("Outdated proposal should not be accepted!",
+        sn2.getReservedContainer());
+
+    rm.stop();
+  }
+
+
+  private void allocateAndLaunchContainers(MockAM am, MockNM nm, MockRM rm,
+      int nContainer, Resource resource, int priority, int startContainerId)
+      throws Exception {
+    am.allocate(Arrays.asList(ResourceRequest
+        .newInstance(Priority.newInstance(priority), "*", resource,
+            nContainer)), null);
+    ContainerId lastContainerId = ContainerId
+        .newContainerId(am.getApplicationAttemptId(),
+            startContainerId + nContainer - 1);
+    Assert.assertTrue(
+        rm.waitForState(nm, lastContainerId, RMContainerState.ALLOCATED));
+    // Acquire them, and NM report RUNNING
+    am.allocate(null, null);
+
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+    for (int cId = startContainerId;
+         cId < startContainerId + nContainer; cId++) {
+      ContainerId containerId =
+          ContainerId.newContainerId(am.getApplicationAttemptId(), cId);
+      RMContainer rmContainer = cs.getRMContainer(containerId);
+      if (rmContainer != null) {
+        rmContainer.handle(
+            new RMContainerEvent(containerId, RMContainerEventType.LAUNCHED));
+      } else {
+        Assert.fail("Cannot find RMContainer");
+      }
+      rm.waitForState(nm,
+          ContainerId.newContainerId(am.getApplicationAttemptId(), cId),
+          RMContainerState.RUNNING);
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: YARN-6681. Eliminate double-copy of child queues in canAssignToThisQueue. Contributed by Daryn Sharp.

Posted by as...@apache.org.
YARN-6681. Eliminate double-copy of child queues in canAssignToThisQueue. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa1aaee8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa1aaee8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa1aaee8

Branch: refs/heads/YARN-5972
Commit: fa1aaee87b0141a0255b5f8e5fd8e8f49d7efe86
Parents: 147df30
Author: Naganarasimha <na...@apache.org>
Authored: Sat Jul 1 12:29:39 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Sat Jul 1 12:29:39 2017 +0530

----------------------------------------------------------------------
 .../resourcemanager/scheduler/capacity/AbstractCSQueue.java  | 8 +++++++-
 .../resourcemanager/scheduler/capacity/ParentQueue.java      | 7 ++++++-
 2 files changed, 13 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa1aaee8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index b69ec96..5fbdead 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -615,6 +616,11 @@ public abstract class AbstractCSQueue implements CSQueue {
         minimumAllocation);
   }
 
+  public boolean hasChildQueues() {
+    List<CSQueue> childQueues = getChildQueues();
+    return childQueues != null && !childQueues.isEmpty();
+  }
+
   boolean canAssignToThisQueue(Resource clusterResource,
       String nodePartition, ResourceLimits currentResourceLimits,
       Resource resourceCouldBeUnreserved, SchedulingMode schedulingMode) {
@@ -640,7 +646,7 @@ public abstract class AbstractCSQueue implements CSQueue {
       // When queue is a parent queue: Headroom = limit - used + killable
       // When queue is a leaf queue: Headroom = limit - used (leaf queue cannot preempt itself)
       Resource usedExceptKillable = nowTotalUsed;
-      if (null != getChildQueues() && !getChildQueues().isEmpty()) {
+      if (hasChildQueues()) {
         usedExceptKillable = Resources.subtract(nowTotalUsed,
             getTotalKillableResource(nodePartition));
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa1aaee8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index 91fedbc..f6ada4f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -845,7 +845,12 @@ public class ParentQueue extends AbstractCSQueue {
       writeLock.unlock();
     }
   }
-  
+
+  @Override
+  public boolean hasChildQueues() {
+    return true;
+  }
+
   @Override
   public List<CSQueue> getChildQueues() {
     try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: YARN-6746. SchedulerUtils.checkResourceRequestMatchingNodePartition() is dead code (Contributed by Deepti Sawhney via Daniel Templeton)

Posted by as...@apache.org.
YARN-6746. SchedulerUtils.checkResourceRequestMatchingNodePartition() is dead code
(Contributed by Deepti Sawhney via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5a0c38d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5a0c38d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5a0c38d

Branch: refs/heads/YARN-5972
Commit: c5a0c38d8e1752031d1dd8bdd4a7a3894cf775cb
Parents: 0615985
Author: Daniel Templeton <te...@apache.org>
Authored: Sun Jul 9 19:27:32 2017 +0900
Committer: Daniel Templeton <te...@apache.org>
Committed: Sun Jul 9 19:27:32 2017 +0900

----------------------------------------------------------------------
 .../resourcemanager/scheduler/SchedulerUtils.java | 18 ------------------
 1 file changed, 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5a0c38d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index 148f023..c67f1ce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -349,25 +349,7 @@ public class SchedulerUtils {
     }
     return null;
   }
-  
-  public static boolean checkResourceRequestMatchingNodePartition(
-      String requestedPartition, String nodePartition,
-      SchedulingMode schedulingMode) {
-    // We will only look at node label = nodeLabelToLookAt according to
-    // schedulingMode and partition of node.
-    String nodePartitionToLookAt = null;
-    if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
-      nodePartitionToLookAt = nodePartition;
-    } else {
-      nodePartitionToLookAt = RMNodeLabelsManager.NO_LABEL;
-    }
 
-    if (null == requestedPartition) {
-      requestedPartition = RMNodeLabelsManager.NO_LABEL;
-    }
-    return requestedPartition.equals(nodePartitionToLookAt);
-  }
-  
   private static boolean hasPendingResourceRequest(ResourceCalculator rc,
       ResourceUsage usage, String partitionToLookAt, Resource cluster) {
     if (Resources.greaterThan(rc, cluster,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He.

Posted by as...@apache.org.
HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7cd09527
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7cd09527
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7cd09527

Branch: refs/heads/YARN-5972
Commit: 7cd095272caa724d11802690544b38d0baaf247d
Parents: 8fc5dcc
Author: Akira Ajisaka <aa...@apache.org>
Authored: Sat Jul 8 02:54:24 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Sat Jul 8 02:54:24 2017 +0900

----------------------------------------------------------------------
 .../fs/FileContextCreateMkdirBaseTest.java      |  2 +-
 .../hadoop/fs/FileContextPermissionBase.java    |  5 ++--
 .../apache/hadoop/fs/FileContextUtilBase.java   |  5 ++--
 .../org/apache/hadoop/fs/TestListFiles.java     |  4 +--
 .../fs/TestLocalFileSystemPermission.java       |  2 +-
 .../ha/TestActiveStandbyElectorRealZK.java      |  7 ++---
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |  6 ++--
 .../hadoop/ha/TestZKFailoverController.java     |  5 ++--
 .../io/serializer/TestSerializationFactory.java |  6 ++--
 .../org/apache/hadoop/ipc/MiniRPCBenchmark.java | 13 ++++-----
 .../java/org/apache/hadoop/ipc/TestIPC.java     |  5 ++--
 .../apache/hadoop/ipc/TestMiniRPCBenchmark.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java     |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 16 +++++------
 .../hadoop/security/TestGroupFallback.java      | 12 ++++----
 .../hadoop/security/TestUGIWithMiniKdc.java     |  2 +-
 .../security/TestUserGroupInformation.java      |  2 +-
 .../hadoop/security/ssl/TestSSLFactory.java     |  2 +-
 .../delegation/web/TestWebDelegationToken.java  |  2 +-
 .../apache/hadoop/test/GenericTestUtils.java    | 30 ++++++++++++++++++++
 .../hadoop/test/TestGenericTestUtils.java       | 10 +++++++
 .../hadoop/util/Crc32PerformanceTest.java       | 11 +++----
 hadoop-common-project/hadoop-nfs/pom.xml        |  6 ++++
 .../apache/hadoop/oncrpc/TestFrameDecoder.java  |  6 ++--
 24 files changed, 104 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
index c1de27a..fbd598c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -30,6 +29,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
 
 import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.event.Level;
 
 /**
  * <p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
index dff89f9..240989e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.StringTokenizer;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -32,6 +33,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
@@ -61,8 +63,7 @@ public abstract class FileContextPermissionBase {
   
   {
     try {
-      ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
-      .setLevel(org.apache.log4j.Level.DEBUG);
+      GenericTestUtils.setLogLevel(FileSystem.LOG, Level.DEBUG);
     }
     catch(Exception e) {
       System.out.println("Cannot change log level\n"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
index bebf4bf..0a96d3e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
@@ -23,10 +23,12 @@ import static org.junit.Assert.assertTrue;
 
 import java.util.Arrays;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 /**
  * <p>
@@ -48,8 +50,7 @@ public abstract class FileContextUtilBase {
   
   {
     try {
-      ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
-      .setLevel(org.apache.log4j.Level.DEBUG);
+      GenericTestUtils.setLogLevel(FileSystem.LOG, Level.DEBUG);
     } catch(Exception e) {
       System.out.println("Cannot change log level\n"
           + StringUtils.stringifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
index 010754f..44308ea 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
@@ -24,18 +24,18 @@ import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.log4j.Level;
 
 import static org.junit.Assert.*;
 import org.junit.Test;
 import org.junit.BeforeClass;
+import org.slf4j.event.Level;
 
 /**
  * This class tests the FileStatus API.
  */
 public class TestListFiles {
   static {
-    GenericTestUtils.setLogLevel(FileSystem.LOG, Level.ALL);
+    GenericTestUtils.setLogLevel(FileSystem.LOG, Level.TRACE);
   }
 
   static final long seed = 0xDEADBEEFL;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
index 1478111..81756f9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
@@ -21,10 +21,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
-import org.apache.log4j.Level;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
 
 import java.io.IOException;
 import java.util.ArrayList;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
index 59c0886..bbef9ef 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
@@ -24,12 +24,11 @@ import static org.junit.Assert.assertTrue;
 import java.util.Collections;
 import java.util.UUID;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.ActiveStandbyElector.State;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
-import org.apache.log4j.Level;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.ZooKeeper;
@@ -39,6 +38,7 @@ import org.mockito.AdditionalMatchers;
 import org.mockito.Mockito;
 
 import com.google.common.primitives.Ints;
+import org.slf4j.event.Level;
 
 /**
  * Test for {@link ActiveStandbyElector} using real zookeeper.
@@ -47,8 +47,7 @@ public class TestActiveStandbyElectorRealZK extends ClientBaseWithFixes {
   static final int NUM_ELECTORS = 2;
   
   static {
-    ((Log4JLogger)ActiveStandbyElector.LOG).getLogger().setLevel(
-        Level.ALL);
+    GenericTestUtils.setLogLevel(ActiveStandbyElector.LOG, Level.TRACE);
   }
   
   static final String PARENT_DIR = "/" + UUID.randomUUID();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
index 51fb829..f0ebc1e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
@@ -21,18 +21,18 @@ import static org.junit.Assert.*;
 
 import java.net.InetSocketAddress;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.SshFenceByTcpPort.Args;
-import org.apache.log4j.Level;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assume;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 public class TestSshFenceByTcpPort {
 
   static {
-    ((Log4JLogger)SshFenceByTcpPort.LOG).getLogger().setLevel(Level.ALL);
+    GenericTestUtils.setLogLevel(SshFenceByTcpPort.LOG, Level.TRACE);
   }
 
   private static String TEST_FENCING_HOST = System.getProperty(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
index 846c8ae..0fa8e86 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.*;
 import java.security.NoSuchAlgorithmException;
 
 import com.google.common.base.Supplier;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
@@ -30,7 +29,6 @@ import org.apache.hadoop.ha.HealthMonitor.State;
 import org.apache.hadoop.ha.MiniZKFCCluster.DummyZKFC;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.apache.log4j.Level;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
@@ -41,6 +39,7 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
+import org.slf4j.event.Level;
 
 public class TestZKFailoverController extends ClientBaseWithFixes {
   private Configuration conf;
@@ -71,7 +70,7 @@ public class TestZKFailoverController extends ClientBaseWithFixes {
     "digest:" + DIGEST_USER_HASH + ":rwcda";
   
   static {
-    ((Log4JLogger)ActiveStandbyElector.LOG).getLogger().setLevel(Level.ALL);
+    GenericTestUtils.setLogLevel(ActiveStandbyElector.LOG, Level.TRACE);
   }
   
   @Before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
index 6774155..2cde3e3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
@@ -18,21 +18,21 @@
 package org.apache.hadoop.io.serializer;
 
 import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNotNull;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.Writable;
-import org.apache.log4j.Level;
+import org.slf4j.event.Level;
 
 public class TestSerializationFactory {
 
   static {
-    ((Log4JLogger) SerializationFactory.LOG).getLogger().setLevel(Level.ALL);
+    GenericTestUtils.setLogLevel(SerializationFactory.LOG, Level.TRACE);
   }
 
   static Configuration conf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
index cdbd557..2290270 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
@@ -26,9 +26,9 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Enumeration;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
@@ -43,8 +43,7 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelect
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
 import org.apache.hadoop.util.Time;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
+import org.slf4j.event.Level;
 
 /**
  * MiniRPCBenchmark measures time to establish an RPC connection 
@@ -255,9 +254,9 @@ public class MiniRPCBenchmark {
   }
 
   static void setLoggingLevel(Level level) {
-    LogManager.getLogger(Server.class.getName()).setLevel(level);
-    ((Log4JLogger)Server.AUDITLOG).getLogger().setLevel(level);
-    LogManager.getLogger(Client.class.getName()).setLevel(level);
+    GenericTestUtils.setLogLevel(Server.LOG, level);
+    GenericTestUtils.setLogLevel(Server.AUDITLOG, level);
+    GenericTestUtils.setLogLevel(Client.LOG, level);
   }
 
   /**
@@ -370,7 +369,7 @@ public class MiniRPCBenchmark {
       useDelegationToken = args[3].equalsIgnoreCase("useToken");
     Level l = Level.ERROR;
     if(args.length > 4)
-      l = Level.toLevel(args[4]);
+      l = GenericTestUtils.toLevel(args[4]);
 
     MiniRPCBenchmark mb = new MiniRPCBenchmark(l);
     long elapsedTime = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 1d47473..4bda637 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -60,7 +60,6 @@ import javax.net.SocketFactory;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -85,7 +84,6 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Before;
@@ -98,6 +96,7 @@ import org.mockito.stubbing.Answer;
 import com.google.common.base.Supplier;
 import com.google.common.primitives.Bytes;
 import com.google.common.primitives.Ints;
+import org.slf4j.event.Level;
 
 /** Unit tests for IPC. */
 public class TestIPC {
@@ -864,7 +863,7 @@ public class TestIPC {
 
   @Test(timeout=30000)
   public void testConnectionIdleTimeouts() throws Exception {
-    ((Log4JLogger)Server.LOG).getLogger().setLevel(Level.DEBUG);
+    GenericTestUtils.setLogLevel(Server.LOG, Level.DEBUG);
     final int maxIdle = 1000;
     final int cleanupInterval = maxIdle*3/4; // stagger cleanups
     final int killMax = 3;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
index 0f34be8..a130fa9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.ipc;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.log4j.Level;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 /**
  * Test {@link MiniRPCBenchmark}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 166b205..8725cf4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -50,11 +50,11 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
 import org.apache.hadoop.test.MockitoUtil;
-import org.apache.log4j.Level;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.event.Level;
 
 import javax.net.SocketFactory;
 import java.io.Closeable;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
index c48ff2e..7608cb4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
@@ -22,7 +22,6 @@ import com.google.protobuf.ServiceException;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -36,7 +35,7 @@ import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.*;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.log4j.Level;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -44,6 +43,7 @@ import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.event.Level;
 
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.CallbackHandler;
@@ -186,12 +186,12 @@ public class TestSaslRPC extends TestRpcBase {
   }
 
   static {
-    ((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) SecurityUtil.LOG).getLogger().setLevel(Level.ALL);
+    GenericTestUtils.setLogLevel(Client.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(Server.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(SaslRpcClient.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(SaslRpcServer.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(SaslInputStream.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(SecurityUtil.LOG, Level.TRACE);
   }
 
   public static class BadTokenSecretManager extends TestTokenSecretManager {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
index a61eee6..85f17b1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
@@ -25,16 +25,16 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 public class TestGroupFallback {
   public static final Log LOG = LogFactory.getLog(TestGroupFallback.class);
 
   @Test
   public void testGroupShell() throws Exception {
-    Logger.getRootLogger().setLevel(Level.DEBUG);
+    GenericTestUtils.setRootLogLevel(Level.DEBUG);
     Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         "org.apache.hadoop.security.ShellBasedUnixGroupsMapping");
@@ -50,7 +50,7 @@ public class TestGroupFallback {
 
   @Test
   public void testNetgroupShell() throws Exception {
-    Logger.getRootLogger().setLevel(Level.DEBUG);
+    GenericTestUtils.setRootLogLevel(Level.DEBUG);
     Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         "org.apache.hadoop.security.ShellBasedUnixGroupsNetgroupMapping");
@@ -69,7 +69,7 @@ public class TestGroupFallback {
     LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
         "test the normal path and 'mvn -DTestGroupFallback clear test' will" +
         " test the fall back functionality");
-    Logger.getRootLogger().setLevel(Level.DEBUG);
+    GenericTestUtils.setRootLogLevel(Level.DEBUG);
     Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         "org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback");
@@ -88,7 +88,7 @@ public class TestGroupFallback {
     LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
         "test the normal path and 'mvn -DTestGroupFallback clear test' will" +
         " test the fall back functionality");
-    Logger.getRootLogger().setLevel(Level.DEBUG);
+    GenericTestUtils.setRootLogLevel(Level.DEBUG);
     Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         "org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMappingWithFallback");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
index 2c6c7e4..6c94b1d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
@@ -23,9 +23,9 @@ import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.PlatformName;
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
index 00062c0..bcb2126 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -41,6 +40,7 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
 
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
index 5369c9d..4f1aca0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -34,6 +33,7 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
 
 import javax.net.ssl.HttpsURLConnection;
 import javax.net.ssl.SSLEngine;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
index 7319e4c..c564b97 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHand
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.log4j.Level;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.servlet.ServletContextHandler;
@@ -42,6 +41,7 @@ import org.junit.Before;
 import org.junit.Test;
 import org.eclipse.jetty.servlet.FilterHolder;
 import org.eclipse.jetty.servlet.ServletHolder;
+import org.slf4j.event.Level;
 
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 77a79ff..82a5e08 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -141,6 +141,20 @@ public abstract class GenericTestUtils {
   }
 
   /**
+   * A helper used in log4j2 migration to accept legacy
+   * org.apache.commons.logging apis.
+   * <p>
+   * And will be removed after migration.
+   *
+   * @param log   a log
+   * @param level level to be set
+   */
+  @Deprecated
+  public static void setLogLevel(Log log, org.slf4j.event.Level level) {
+    setLogLevel(log, Level.toLevel(level.toString()));
+  }
+
+  /**
    * @deprecated
    * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
    */
@@ -172,6 +186,22 @@ public abstract class GenericTestUtils {
     setLogLevel(toLog4j(logger), Level.toLevel(level.toString()));
   }
 
+  public static void setRootLogLevel(org.slf4j.event.Level level) {
+    setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString()));
+  }
+
+  public static org.slf4j.event.Level toLevel(String level) {
+    return toLevel(level, org.slf4j.event.Level.DEBUG);
+  }
+
+  public static org.slf4j.event.Level toLevel(
+      String level, org.slf4j.event.Level defaultLevel) {
+    try {
+      return org.slf4j.event.Level.valueOf(level);
+    } catch (IllegalArgumentException e) {
+      return defaultLevel;
+    }
+  }
   /**
    * Extracts the name of the method where the invocation has happened
    * @return String name of the invoking method

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
index b3fc836..c1d45cc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
@@ -27,7 +27,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Supplier;
+import org.slf4j.event.Level;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -151,4 +153,12 @@ public class TestGenericTestUtils extends GenericTestUtils {
       assertExceptionContains(GenericTestUtils.ERROR_INVALID_ARGUMENT, e);
     }
   }
+
+  @Test
+  public void testToLevel() throws Throwable {
+    assertEquals(Level.INFO, toLevel("INFO"));
+    assertEquals(Level.DEBUG, toLevel("NonExistLevel"));
+    assertEquals(Level.INFO, toLevel("INFO", Level.TRACE));
+    assertEquals(Level.TRACE, toLevel("NonExistLevel", Level.TRACE));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
index 34dfc3a..ce28f50 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
@@ -27,10 +27,11 @@ import java.util.Random;
 import java.util.zip.CRC32;
 import java.util.zip.Checksum;
 
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.ChecksumException;
-import org.apache.log4j.Level;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.event.Level;
+
+import static org.slf4j.LoggerFactory.getLogger;
 
 /**
  * Performance tests to compare performance of Crc32|Crc32C implementations
@@ -176,8 +177,8 @@ public class Crc32PerformanceTest {
         crcs.add(Crc32.Native.class);
       }
       crcs.add(Crc32.NativeC.class);
-      ((Log4JLogger)LogFactory.getLog(NativeCodeLoader.class))
-          .getLogger().setLevel(Level.ALL);
+      GenericTestUtils.setLogLevel(getLogger(NativeCodeLoader.class),
+          Level.TRACE);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-nfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml
index 5fdaf44..bd5ab92 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -48,6 +48,12 @@
       <scope>provided</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
index 9d0fe0f..0e416b3 100644
--- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
+++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
@@ -28,8 +28,7 @@ import java.util.Random;
 import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
-import org.apache.log4j.Level;
-import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -38,11 +37,12 @@ import org.jboss.netty.channel.ChannelException;
 import org.jboss.netty.channel.ChannelHandlerContext;
 import org.junit.Test;
 import org.mockito.Mockito;
+import org.slf4j.event.Level;
 
 public class TestFrameDecoder {
   
   static {
-    ((Log4JLogger) RpcProgram.LOG).getLogger().setLevel(Level.ALL);
+    GenericTestUtils.setLogLevel(RpcProgram.LOG, Level.TRACE);
   }
 
   private static int resultSize;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: HDFS-12085. Reconfigure namenode heartbeat interval fails if the interval was set with time unit. Contributed by Weiwei Yang.

Posted by as...@apache.org.
HDFS-12085. Reconfigure namenode heartbeat interval fails if the interval was set with time unit. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a7f02b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a7f02b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a7f02b8

Branch: refs/heads/YARN-5972
Commit: 3a7f02b81520ad4d3eebf92e9dbca662beec0302
Parents: 34f113d
Author: Yiqun Lin <yq...@apache.org>
Authored: Tue Jul 11 20:40:11 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Tue Jul 11 20:40:11 2017 +0800

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/conf/Configuration.java  | 10 +++++++++-
 .../org/apache/hadoop/hdfs/server/namenode/NameNode.java |  5 ++++-
 .../hdfs/server/namenode/TestNameNodeReconfigure.java    | 11 +++++++++++
 3 files changed, 24 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a7f02b8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index d3dd822..550aee7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -1667,7 +1667,15 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
   }
 
-  private long getTimeDurationHelper(String name, String vStr, TimeUnit unit) {
+  /**
+   * Return time duration in the given time unit. Valid units are encoded in
+   * properties as suffixes: nanoseconds (ns), microseconds (us), milliseconds
+   * (ms), seconds (s), minutes (m), hours (h), and days (d).
+   * @param name Property name
+   * @param vStr The string value with time unit suffix to be converted.
+   * @param unit Unit to convert the stored property, if it exists.
+   */
+  public long getTimeDurationHelper(String name, String vStr, TimeUnit unit) {
     vStr = vStr.trim();
     vStr = StringUtils.toLowerCase(vStr);
     ParsedTimeDuration vUnit = ParsedTimeDuration.unitFor(vStr);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a7f02b8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 5d0e8cb..79bbbc5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -2050,7 +2050,10 @@ public class NameNode extends ReconfigurableBase implements
         datanodeManager.setHeartbeatInterval(DFS_HEARTBEAT_INTERVAL_DEFAULT);
         return String.valueOf(DFS_HEARTBEAT_INTERVAL_DEFAULT);
       } else {
-        datanodeManager.setHeartbeatInterval(Long.parseLong(newVal));
+        long newInterval = getConf()
+            .getTimeDurationHelper(DFS_HEARTBEAT_INTERVAL_KEY,
+                newVal, TimeUnit.SECONDS);
+        datanodeManager.setHeartbeatInterval(newInterval);
         return String.valueOf(datanodeManager.getHeartbeatInterval());
       }
     } catch (NumberFormatException nfe) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a7f02b8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index 7b4061a..6b553df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -182,6 +182,17 @@ public class TestNameNodeReconfigure {
         + " has wrong value", 10 * 60 * 1000,
         datanodeManager.getHeartbeatRecheckInterval());
 
+    // change to a value with time unit
+    nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, "1m");
+
+    assertEquals(
+        DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value",
+        60,
+        nameNode.getConf().getLong(DFS_HEARTBEAT_INTERVAL_KEY,
+            DFS_HEARTBEAT_INTERVAL_DEFAULT));
+    assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", 60,
+        datanodeManager.getHeartbeatInterval());
+
     // revert to defaults
     nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, null);
     nameNode.reconfigureProperty(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: Revert "HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by Yiqun Lin."

Posted by as...@apache.org.
Revert "HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by Yiqun Lin."

This reverts commit 89a8edc0149e3f31a5ade9a0927c4b6332cf6b1a.

 Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/441378e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/441378e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/441378e7

Branch: refs/heads/YARN-5972
Commit: 441378e7e4609d89b7181dacc8ba92b253a962df
Parents: 5a75f73
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Jun 29 13:54:16 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu Jun 29 13:54:16 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  7 ++---
 .../hdfs/server/protocol/SlowDiskReports.java   |  5 ++--
 .../dev-support/findbugsExcludeFile.xml         |  5 ----
 .../hdfs/qjournal/server/JournalNode.java       | 16 +++++-------
 .../hdfs/server/common/HdfsServerConstants.java |  7 +----
 .../hdfs/server/datanode/DataStorage.java       | 12 +++------
 .../namenode/NNStorageRetentionManager.java     | 27 +++++++++-----------
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  6 ++---
 .../offlineImageViewer/ImageLoaderCurrent.java  | 10 +++-----
 9 files changed, 33 insertions(+), 62 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 1f6022c..8acda61 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2883,12 +2883,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     }
     synchronized (DFSClient.class) {
       if (STRIPED_READ_THREAD_POOL == null) {
-        // Only after thread pool is fully constructed then save it to
-        // volatile field.
-        ThreadPoolExecutor threadPool = DFSUtilClient.getThreadPoolExecutor(1,
+        STRIPED_READ_THREAD_POOL = DFSUtilClient.getThreadPoolExecutor(1,
             numThreads, 60, "StripedRead-", true);
-        threadPool.allowCoreThreadTimeOut(true);
-        STRIPED_READ_THREAD_POOL = threadPool;
+        STRIPED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
index 496389a..8095c2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
@@ -101,9 +101,8 @@ public final class SlowDiskReports {
     }
 
     boolean areEqual;
-    for (Map.Entry<String, Map<DiskOp, Double>> entry : this.slowDisks
-        .entrySet()) {
-      if (!entry.getValue().equals(that.slowDisks.get(entry.getKey()))) {
+    for (String disk : this.slowDisks.keySet()) {
+      if (!this.slowDisks.get(disk).equals(that.slowDisks.get(disk))) {
         return false;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 9270990..be54efb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -252,9 +252,4 @@
         <Class name="org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture" />
         <Bug pattern="NS_DANGEROUS_NON_SHORT_CIRCUIT" />
     </Match>
-    <Match>
-        <Class name="org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil$1" />
-        <Method name="visitFile" />
-        <Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
-    </Match>
  </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
index 6056e34..af7a84f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
@@ -299,18 +299,14 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
         return file.isDirectory();
       }
     });
-
-    if (journalDirs != null) {
-      for (File journalDir : journalDirs) {
-        String jid = journalDir.getName();
-        if (!status.containsKey(jid)) {
-          Map<String, String> jMap = new HashMap<String, String>();
-          jMap.put("Formatted", "true");
-          status.put(jid, jMap);
-        }
+    for (File journalDir : journalDirs) {
+      String jid = journalDir.getName();
+      if (!status.containsKey(jid)) {
+        Map<String, String> jMap = new HashMap<String, String>();
+        jMap.put("Formatted", "true");
+        status.put(jid, jMap);
       }
     }
-
     return JSON.toString(status);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index ab50eb4..c3098f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -188,10 +188,8 @@ public interface HdfsServerConstants {
         return NamenodeRole.NAMENODE;
       }
     }
-
+    
     public void setClusterId(String cid) {
-      Preconditions.checkState(this == UPGRADE || this == UPGRADEONLY
-          || this == FORMAT);
       clusterId = cid;
     }
 
@@ -216,7 +214,6 @@ public interface HdfsServerConstants {
     }
 
     public void setForce(int force) {
-      Preconditions.checkState(this == RECOVER);
       this.force = force;
     }
     
@@ -229,7 +226,6 @@ public interface HdfsServerConstants {
     }
     
     public void setForceFormat(boolean force) {
-      Preconditions.checkState(this == FORMAT);
       isForceFormat = force;
     }
     
@@ -238,7 +234,6 @@ public interface HdfsServerConstants {
     }
     
     public void setInteractiveFormat(boolean interactive) {
-      Preconditions.checkState(this == FORMAT);
       isInteractiveFormat = interactive;
     }
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 6d6e96a..9a71081 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -1336,14 +1336,10 @@ public class DataStorage extends Storage {
           return name.startsWith(BLOCK_SUBDIR_PREFIX);
         }
       });
-
-    if (otherNames != null) {
-      for (int i = 0; i < otherNames.length; i++) {
-        linkBlocksHelper(new File(from, otherNames[i]),
-            new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout,
-            blockRoot, idBasedLayoutSingleLinks);
-      }
-    }
+    for(int i = 0; i < otherNames.length; i++)
+      linkBlocksHelper(new File(from, otherNames[i]),
+          new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout,
+          blockRoot, idBasedLayoutSingleLinks);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
index 2a83541..98b7e9a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
@@ -255,27 +255,24 @@ public class NNStorageRetentionManager {
     });
 
     // Check whether there is any work to do.
-    if (filesInStorage != null
-        && filesInStorage.length <= numCheckpointsToRetain) {
+    if (filesInStorage.length <= numCheckpointsToRetain) {
       return;
     }
 
     // Create a sorted list of txids from the file names.
     TreeSet<Long> sortedTxIds = new TreeSet<Long>();
-    if (filesInStorage != null) {
-      for (String fName : filesInStorage) {
-        // Extract the transaction id from the file name.
-        long fTxId;
-        try {
-          fTxId = Long.parseLong(fName.substring(oivImagePrefix.length() + 1));
-        } catch (NumberFormatException nfe) {
-          // This should not happen since we have already filtered it.
-          // Log and continue.
-          LOG.warn("Invalid file name. Skipping " + fName);
-          continue;
-        }
-        sortedTxIds.add(Long.valueOf(fTxId));
+    for (String fName : filesInStorage) {
+      // Extract the transaction id from the file name.
+      long fTxId;
+      try {
+        fTxId = Long.parseLong(fName.substring(oivImagePrefix.length() + 1));
+      } catch (NumberFormatException nfe) {
+        // This should not happen since we have already filtered it.
+        // Log and continue.
+        LOG.warn("Invalid file name. Skipping " + fName);
+        continue;
       }
+      sortedTxIds.add(Long.valueOf(fTxId));
     }
 
     int numFilesToDelete = sortedTxIds.size() - numCheckpointsToRetain;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index b771ff0..06f408d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -1977,7 +1977,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if ("-report".equals(cmd)) {
-      if (argv.length > 6) {
+      if (argv.length < 1) {
         printUsage(cmd);
         return exitCode;
       }
@@ -2007,7 +2007,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if (RollingUpgradeCommand.matches(cmd)) {
-      if (argv.length > 2) {
+      if (argv.length < 1 || argv.length > 2) {
         printUsage(cmd);
         return exitCode;
       }
@@ -2082,7 +2082,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if ("-triggerBlockReport".equals(cmd)) {
-      if ((argv.length != 2) && (argv.length != 3)) {
+      if (argv.length < 1) {
         printUsage(cmd);
         return exitCode;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
index 2e2eaf4..f2c7427 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
@@ -722,13 +722,9 @@ class ImageLoaderCurrent implements ImageLoader {
       if (supportSnapshot && supportInodeId) {
         dirNodeMap.put(inodeId, pathName);
       }
-
-      v.visit(ImageElement.NS_QUOTA, in.readLong());
-      if (NameNodeLayoutVersion.supports(Feature.DISKSPACE_QUOTA,
-          imageVersion)) {
-        v.visit(ImageElement.DS_QUOTA, in.readLong());
-      }
-
+      v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
+      if (NameNodeLayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
+        v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
       if (supportSnapshot) {
         boolean snapshottable = in.readBoolean();
         if (!snapshottable) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: Revert "HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He."

Posted by as...@apache.org.
Revert "HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He."

This reverts commit 82cb2a6497caa7c5e693aa41ad18e92f1c7eb16a.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fc5dcc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fc5dcc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fc5dcc2

Branch: refs/heads/YARN-5972
Commit: 8fc5dcc2a199c6b202e55c4cfdf5ae4eb09ef003
Parents: 8153fe2
Author: Akira Ajisaka <aa...@apache.org>
Authored: Sat Jul 8 02:53:18 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Sat Jul 8 02:53:18 2017 +0900

----------------------------------------------------------------------
 .../fs/FileContextCreateMkdirBaseTest.java      |  2 +-
 .../hadoop/fs/FileContextPermissionBase.java    |  5 ++--
 .../apache/hadoop/fs/FileContextUtilBase.java   |  5 ++--
 .../org/apache/hadoop/fs/TestListFiles.java     |  4 +--
 .../fs/TestLocalFileSystemPermission.java       |  2 +-
 .../ha/TestActiveStandbyElectorRealZK.java      |  7 +++--
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |  6 ++--
 .../hadoop/ha/TestZKFailoverController.java     |  5 ++--
 .../io/serializer/TestSerializationFactory.java |  6 ++--
 .../org/apache/hadoop/ipc/MiniRPCBenchmark.java | 13 +++++----
 .../java/org/apache/hadoop/ipc/TestIPC.java     |  5 ++--
 .../apache/hadoop/ipc/TestMiniRPCBenchmark.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java     |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 16 +++++------
 .../hadoop/security/TestGroupFallback.java      | 12 ++++----
 .../hadoop/security/TestUGIWithMiniKdc.java     |  2 +-
 .../security/TestUserGroupInformation.java      |  2 +-
 .../hadoop/security/ssl/TestSSLFactory.java     |  2 +-
 .../delegation/web/TestWebDelegationToken.java  |  2 +-
 .../apache/hadoop/test/GenericTestUtils.java    | 30 --------------------
 .../hadoop/test/TestGenericTestUtils.java       | 10 -------
 .../hadoop/util/Crc32PerformanceTest.java       | 11 ++++---
 hadoop-common-project/hadoop-nfs/pom.xml        |  6 ----
 .../apache/hadoop/oncrpc/TestFrameDecoder.java  |  6 ++--
 .../TestDFSStripedOutputStreamWithFailure.java  | 28 ------------------
 25 files changed, 59 insertions(+), 132 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
index fbd598c..c1de27a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
+import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -29,7 +30,6 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
 
 import org.apache.hadoop.test.GenericTestUtils;
-import org.slf4j.event.Level;
 
 /**
  * <p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
index 240989e..dff89f9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
@@ -23,7 +23,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.StringTokenizer;
 
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -33,7 +32,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.event.Level;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
@@ -63,7 +61,8 @@ public abstract class FileContextPermissionBase {
   
   {
     try {
-      GenericTestUtils.setLogLevel(FileSystem.LOG, Level.DEBUG);
+      ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
+      .setLevel(org.apache.log4j.Level.DEBUG);
     }
     catch(Exception e) {
       System.out.println("Cannot change log level\n"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
index 0a96d3e..bebf4bf 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
@@ -23,12 +23,10 @@ import static org.junit.Assert.assertTrue;
 
 import java.util.Arrays;
 
-import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.event.Level;
 
 /**
  * <p>
@@ -50,7 +48,8 @@ public abstract class FileContextUtilBase {
   
   {
     try {
-      GenericTestUtils.setLogLevel(FileSystem.LOG, Level.DEBUG);
+      ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
+      .setLevel(org.apache.log4j.Level.DEBUG);
     } catch(Exception e) {
       System.out.println("Cannot change log level\n"
           + StringUtils.stringifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
index 44308ea..010754f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
@@ -24,18 +24,18 @@ import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
 
 import static org.junit.Assert.*;
 import org.junit.Test;
 import org.junit.BeforeClass;
-import org.slf4j.event.Level;
 
 /**
  * This class tests the FileStatus API.
  */
 public class TestListFiles {
   static {
-    GenericTestUtils.setLogLevel(FileSystem.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(FileSystem.LOG, Level.ALL);
   }
 
   static final long seed = 0xDEADBEEFL;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
index 81756f9..1478111 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
@@ -21,10 +21,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
+import org.apache.log4j.Level;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
 
 import java.io.IOException;
 import java.util.ArrayList;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
index bbef9ef..59c0886 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
@@ -24,11 +24,12 @@ import static org.junit.Assert.assertTrue;
 import java.util.Collections;
 import java.util.UUID;
 
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.ActiveStandbyElector.State;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
+import org.apache.log4j.Level;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.ZooKeeper;
@@ -38,7 +39,6 @@ import org.mockito.AdditionalMatchers;
 import org.mockito.Mockito;
 
 import com.google.common.primitives.Ints;
-import org.slf4j.event.Level;
 
 /**
  * Test for {@link ActiveStandbyElector} using real zookeeper.
@@ -47,7 +47,8 @@ public class TestActiveStandbyElectorRealZK extends ClientBaseWithFixes {
   static final int NUM_ELECTORS = 2;
   
   static {
-    GenericTestUtils.setLogLevel(ActiveStandbyElector.LOG, Level.TRACE);
+    ((Log4JLogger)ActiveStandbyElector.LOG).getLogger().setLevel(
+        Level.ALL);
   }
   
   static final String PARENT_DIR = "/" + UUID.randomUUID();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
index f0ebc1e..51fb829 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
@@ -21,18 +21,18 @@ import static org.junit.Assert.*;
 
 import java.net.InetSocketAddress;
 
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.SshFenceByTcpPort.Args;
-import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
 import org.junit.Assume;
 import org.junit.Test;
-import org.slf4j.event.Level;
 
 public class TestSshFenceByTcpPort {
 
   static {
-    GenericTestUtils.setLogLevel(SshFenceByTcpPort.LOG, Level.TRACE);
+    ((Log4JLogger)SshFenceByTcpPort.LOG).getLogger().setLevel(Level.ALL);
   }
 
   private static String TEST_FENCING_HOST = System.getProperty(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
index 0fa8e86..846c8ae 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.*;
 import java.security.NoSuchAlgorithmException;
 
 import com.google.common.base.Supplier;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
@@ -29,6 +30,7 @@ import org.apache.hadoop.ha.HealthMonitor.State;
 import org.apache.hadoop.ha.MiniZKFCCluster.DummyZKFC;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.log4j.Level;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
@@ -39,7 +41,6 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
-import org.slf4j.event.Level;
 
 public class TestZKFailoverController extends ClientBaseWithFixes {
   private Configuration conf;
@@ -70,7 +71,7 @@ public class TestZKFailoverController extends ClientBaseWithFixes {
     "digest:" + DIGEST_USER_HASH + ":rwcda";
   
   static {
-    GenericTestUtils.setLogLevel(ActiveStandbyElector.LOG, Level.TRACE);
+    ((Log4JLogger)ActiveStandbyElector.LOG).getLogger().setLevel(Level.ALL);
   }
   
   @Before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
index 2cde3e3..6774155 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
@@ -18,21 +18,21 @@
 package org.apache.hadoop.io.serializer;
 
 import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNotNull;
 
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.Writable;
-import org.slf4j.event.Level;
+import org.apache.log4j.Level;
 
 public class TestSerializationFactory {
 
   static {
-    GenericTestUtils.setLogLevel(SerializationFactory.LOG, Level.TRACE);
+    ((Log4JLogger) SerializationFactory.LOG).getLogger().setLevel(Level.ALL);
   }
 
   static Configuration conf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
index 2290270..cdbd557 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
@@ -26,9 +26,9 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Enumeration;
 
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
@@ -43,7 +43,8 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelect
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
 import org.apache.hadoop.util.Time;
-import org.slf4j.event.Level;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
 
 /**
  * MiniRPCBenchmark measures time to establish an RPC connection 
@@ -254,9 +255,9 @@ public class MiniRPCBenchmark {
   }
 
   static void setLoggingLevel(Level level) {
-    GenericTestUtils.setLogLevel(Server.LOG, level);
-    GenericTestUtils.setLogLevel(Server.AUDITLOG, level);
-    GenericTestUtils.setLogLevel(Client.LOG, level);
+    LogManager.getLogger(Server.class.getName()).setLevel(level);
+    ((Log4JLogger)Server.AUDITLOG).getLogger().setLevel(level);
+    LogManager.getLogger(Client.class.getName()).setLevel(level);
   }
 
   /**
@@ -369,7 +370,7 @@ public class MiniRPCBenchmark {
       useDelegationToken = args[3].equalsIgnoreCase("useToken");
     Level l = Level.ERROR;
     if(args.length > 4)
-      l = GenericTestUtils.toLevel(args[4]);
+      l = Level.toLevel(args[4]);
 
     MiniRPCBenchmark mb = new MiniRPCBenchmark(l);
     long elapsedTime = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 4bda637..1d47473 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -60,6 +60,7 @@ import javax.net.SocketFactory;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -84,6 +85,7 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Before;
@@ -96,7 +98,6 @@ import org.mockito.stubbing.Answer;
 import com.google.common.base.Supplier;
 import com.google.common.primitives.Bytes;
 import com.google.common.primitives.Ints;
-import org.slf4j.event.Level;
 
 /** Unit tests for IPC. */
 public class TestIPC {
@@ -863,7 +864,7 @@ public class TestIPC {
 
   @Test(timeout=30000)
   public void testConnectionIdleTimeouts() throws Exception {
-    GenericTestUtils.setLogLevel(Server.LOG, Level.DEBUG);
+    ((Log4JLogger)Server.LOG).getLogger().setLevel(Level.DEBUG);
     final int maxIdle = 1000;
     final int cleanupInterval = maxIdle*3/4; // stagger cleanups
     final int killMax = 3;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
index a130fa9..0f34be8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.ipc;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.log4j.Level;
 import org.junit.Test;
-import org.slf4j.event.Level;
 
 /**
  * Test {@link MiniRPCBenchmark}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 8725cf4..166b205 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -50,11 +50,11 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
 import org.apache.hadoop.test.MockitoUtil;
+import org.apache.log4j.Level;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.internal.util.reflection.Whitebox;
-import org.slf4j.event.Level;
 
 import javax.net.SocketFactory;
 import java.io.Closeable;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
index 7608cb4..c48ff2e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
@@ -22,6 +22,7 @@ import com.google.protobuf.ServiceException;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -35,7 +36,7 @@ import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.*;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -43,7 +44,6 @@ import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
-import org.slf4j.event.Level;
 
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.CallbackHandler;
@@ -186,12 +186,12 @@ public class TestSaslRPC extends TestRpcBase {
   }
 
   static {
-    GenericTestUtils.setLogLevel(Client.LOG, Level.TRACE);
-    GenericTestUtils.setLogLevel(Server.LOG, Level.TRACE);
-    GenericTestUtils.setLogLevel(SaslRpcClient.LOG, Level.TRACE);
-    GenericTestUtils.setLogLevel(SaslRpcServer.LOG, Level.TRACE);
-    GenericTestUtils.setLogLevel(SaslInputStream.LOG, Level.TRACE);
-    GenericTestUtils.setLogLevel(SecurityUtil.LOG, Level.TRACE);
+    ((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SecurityUtil.LOG).getLogger().setLevel(Level.ALL);
   }
 
   public static class BadTokenSecretManager extends TestTokenSecretManager {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
index 85f17b1..a61eee6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
@@ -25,16 +25,16 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 import org.junit.Test;
-import org.slf4j.event.Level;
 
 public class TestGroupFallback {
   public static final Log LOG = LogFactory.getLog(TestGroupFallback.class);
 
   @Test
   public void testGroupShell() throws Exception {
-    GenericTestUtils.setRootLogLevel(Level.DEBUG);
+    Logger.getRootLogger().setLevel(Level.DEBUG);
     Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         "org.apache.hadoop.security.ShellBasedUnixGroupsMapping");
@@ -50,7 +50,7 @@ public class TestGroupFallback {
 
   @Test
   public void testNetgroupShell() throws Exception {
-    GenericTestUtils.setRootLogLevel(Level.DEBUG);
+    Logger.getRootLogger().setLevel(Level.DEBUG);
     Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         "org.apache.hadoop.security.ShellBasedUnixGroupsNetgroupMapping");
@@ -69,7 +69,7 @@ public class TestGroupFallback {
     LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
         "test the normal path and 'mvn -DTestGroupFallback clear test' will" +
         " test the fall back functionality");
-    GenericTestUtils.setRootLogLevel(Level.DEBUG);
+    Logger.getRootLogger().setLevel(Level.DEBUG);
     Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         "org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback");
@@ -88,7 +88,7 @@ public class TestGroupFallback {
     LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
         "test the normal path and 'mvn -DTestGroupFallback clear test' will" +
         " test the fall back functionality");
-    GenericTestUtils.setRootLogLevel(Level.DEBUG);
+    Logger.getRootLogger().setLevel(Level.DEBUG);
     Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         "org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMappingWithFallback");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
index 6c94b1d..2c6c7e4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
@@ -23,9 +23,9 @@ import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.PlatformName;
+import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Test;
-import org.slf4j.event.Level;
 
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
index bcb2126..00062c0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -40,7 +41,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
 
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
index 4f1aca0..5369c9d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -33,7 +34,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
 
 import javax.net.ssl.HttpsURLConnection;
 import javax.net.ssl.SSLEngine;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
index c564b97..7319e4c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHand
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.servlet.ServletContextHandler;
@@ -41,7 +42,6 @@ import org.junit.Before;
 import org.junit.Test;
 import org.eclipse.jetty.servlet.FilterHolder;
 import org.eclipse.jetty.servlet.ServletHolder;
-import org.slf4j.event.Level;
 
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 82a5e08..77a79ff 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -141,20 +141,6 @@ public abstract class GenericTestUtils {
   }
 
   /**
-   * A helper used in log4j2 migration to accept legacy
-   * org.apache.commons.logging apis.
-   * <p>
-   * And will be removed after migration.
-   *
-   * @param log   a log
-   * @param level level to be set
-   */
-  @Deprecated
-  public static void setLogLevel(Log log, org.slf4j.event.Level level) {
-    setLogLevel(log, Level.toLevel(level.toString()));
-  }
-
-  /**
    * @deprecated
    * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
    */
@@ -186,22 +172,6 @@ public abstract class GenericTestUtils {
     setLogLevel(toLog4j(logger), Level.toLevel(level.toString()));
   }
 
-  public static void setRootLogLevel(org.slf4j.event.Level level) {
-    setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString()));
-  }
-
-  public static org.slf4j.event.Level toLevel(String level) {
-    return toLevel(level, org.slf4j.event.Level.DEBUG);
-  }
-
-  public static org.slf4j.event.Level toLevel(
-      String level, org.slf4j.event.Level defaultLevel) {
-    try {
-      return org.slf4j.event.Level.valueOf(level);
-    } catch (IllegalArgumentException e) {
-      return defaultLevel;
-    }
-  }
   /**
    * Extracts the name of the method where the invocation has happened
    * @return String name of the invoking method

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
index c1d45cc..b3fc836 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
@@ -27,9 +27,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Supplier;
-import org.slf4j.event.Level;
 
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -153,12 +151,4 @@ public class TestGenericTestUtils extends GenericTestUtils {
       assertExceptionContains(GenericTestUtils.ERROR_INVALID_ARGUMENT, e);
     }
   }
-
-  @Test
-  public void testToLevel() throws Throwable {
-    assertEquals(Level.INFO, toLevel("INFO"));
-    assertEquals(Level.DEBUG, toLevel("NonExistLevel"));
-    assertEquals(Level.INFO, toLevel("INFO", Level.TRACE));
-    assertEquals(Level.TRACE, toLevel("NonExistLevel", Level.TRACE));
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
index ce28f50..34dfc3a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
@@ -27,11 +27,10 @@ import java.util.Random;
 import java.util.zip.CRC32;
 import java.util.zip.Checksum;
 
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.ChecksumException;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.slf4j.event.Level;
-
-import static org.slf4j.LoggerFactory.getLogger;
+import org.apache.log4j.Level;
 
 /**
  * Performance tests to compare performance of Crc32|Crc32C implementations
@@ -177,8 +176,8 @@ public class Crc32PerformanceTest {
         crcs.add(Crc32.Native.class);
       }
       crcs.add(Crc32.NativeC.class);
-      GenericTestUtils.setLogLevel(getLogger(NativeCodeLoader.class),
-          Level.TRACE);
+      ((Log4JLogger)LogFactory.getLog(NativeCodeLoader.class))
+          .getLogger().setLevel(Level.ALL);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-nfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml
index bd5ab92..5fdaf44 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -48,12 +48,6 @@
       <scope>provided</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
index 0e416b3..9d0fe0f 100644
--- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
+++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
@@ -28,7 +28,8 @@ import java.util.Random;
 import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
-import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -37,12 +38,11 @@ import org.jboss.netty.channel.ChannelException;
 import org.jboss.netty.channel.ChannelHandlerContext;
 import org.junit.Test;
 import org.mockito.Mockito;
-import org.slf4j.event.Level;
 
 public class TestFrameDecoder {
   
   static {
-    GenericTestUtils.setLogLevel(RpcProgram.LOG, Level.TRACE);
+    ((Log4JLogger) RpcProgram.LOG).getLogger().setLevel(Level.ALL);
   }
 
   private static int resultSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index 5b99d45..9915a2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -391,34 +391,6 @@ public class TestDFSStripedOutputStreamWithFailure {
   }
 
   /**
-   * When all the two DataNodes with partial data block fail
-   */
-  @Test
-  public void runTestWithMultipleFailure2() throws Exception {
-    final HdfsConfiguration conf = newHdfsConfiguration();
-    // two DNs have cellSize and the other DNs have cellSize*2
-    final int length = cellSize * (dataBlocks * 2 - 2);
-    // select the two DNs with partial block to kill
-    final int[] dnIndex = {dataBlocks - 2, dataBlocks - 1};
-    final int[] killPos = getKillPositions(length, dnIndex.length);
-
-    try {
-      LOG.info("runTestWithMultipleFailure2: length==" + length + ", killPos="
-          + Arrays.toString(killPos) + ", dnIndex="
-          + Arrays.toString(dnIndex));
-      setup(conf);
-      runTest(length, killPos, dnIndex, false);
-    } catch (Throwable e) {
-      final String err = "failed, killPos=" + Arrays.toString(killPos)
-          + ", dnIndex=" + Arrays.toString(dnIndex) + ", length=" + length;
-      LOG.error(err);
-      throw e;
-    } finally {
-      tearDown();
-    }
-  }
-
-  /**
    * runTest implementation.
    * @param length file length
    * @param killPos killing positions in ascending order


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org