You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2017/05/17 20:15:24 UTC

[01/29] hadoop git commit: HADOOP-14401. maven-project-info-reports-plugin can be removed. Contributed by Andras Bokor. [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-9806 d0fc899a8 -> 5d021f38e (forced update)


HADOOP-14401. maven-project-info-reports-plugin can be removed. Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d5c8ed8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d5c8ed8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d5c8ed8

Branch: refs/heads/HDFS-9806
Commit: 0d5c8ed8e0176909ee4d68b2969a882c21c0df55
Parents: 83dd14a
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu May 11 16:37:32 2017 -0500
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu May 11 16:37:32 2017 -0500

----------------------------------------------------------------------
 hadoop-common-project/hadoop-auth/pom.xml      | 15 ---------------
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 15 ---------------
 hadoop-project/pom.xml                         |  6 ------
 hadoop-tools/hadoop-aliyun/pom.xml             |  8 --------
 hadoop-tools/hadoop-aws/pom.xml                |  8 --------
 hadoop-tools/hadoop-azure-datalake/pom.xml     | 10 ----------
 hadoop-tools/hadoop-azure/pom.xml              | 10 ----------
 hadoop-tools/hadoop-kafka/pom.xml              |  8 --------
 hadoop-tools/hadoop-openstack/pom.xml          |  9 ---------
 9 files changed, 89 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d5c8ed8/hadoop-common-project/hadoop-auth/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index 8900f58..b4beb82 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -252,21 +252,6 @@
           </plugin>
           <plugin>
             <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-project-info-reports-plugin</artifactId>
-            <executions>
-              <execution>
-                <configuration>
-                  <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
-                </configuration>
-                <phase>package</phase>
-                <goals>
-                  <goal>dependencies</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
             <artifactId>maven-javadoc-plugin</artifactId>
             <executions>
               <execution>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d5c8ed8/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index bce8bc2..d0a01cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -299,21 +299,6 @@
         </executions>
       </plugin>
       <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-project-info-reports-plugin</artifactId>
-        <executions>
-          <execution>
-            <configuration>
-              <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
-            </configuration>
-            <goals>
-              <goal>dependencies</goal>
-            </goals>
-            <phase>package</phase>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d5c8ed8/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 4097a0a..1d97cfc 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -123,7 +123,6 @@
     <maven-jar-plugin.version>2.5</maven-jar-plugin.version>
     <maven-war-plugin.version>2.4</maven-war-plugin.version>
     <maven-source-plugin.version>2.3</maven-source-plugin.version>
-    <maven-project-info-reports-plugin.version>2.9</maven-project-info-reports-plugin.version>
     <maven-pdf-plugin.version>1.2</maven-pdf-plugin.version>
     <maven-remote-resources-plugin.version>1.5</maven-remote-resources-plugin.version>
     <build-helper-maven-plugin.version>1.9</build-helper-maven-plugin.version>
@@ -1388,11 +1387,6 @@
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-project-info-reports-plugin</artifactId>
-          <version>${maven-project-info-reports-plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-resources-plugin</artifactId>
           <version>${maven-resources-plugin.version}</version>
         </plugin>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d5c8ed8/hadoop-tools/hadoop-aliyun/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/pom.xml b/hadoop-tools/hadoop-aliyun/pom.xml
index 4282ea3..74461fe 100644
--- a/hadoop-tools/hadoop-aliyun/pom.xml
+++ b/hadoop-tools/hadoop-aliyun/pom.xml
@@ -70,14 +70,6 @@
       </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-project-info-reports-plugin</artifactId>
-        <configuration>
-          <dependencyDetailsEnabled>false</dependencyDetailsEnabled>
-          <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>
         <configuration>
           <forkedProcessTimeoutInSeconds>3600</forkedProcessTimeoutInSeconds>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d5c8ed8/hadoop-tools/hadoop-aws/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 1084881..e00ae10 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -286,14 +286,6 @@
       </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-project-info-reports-plugin</artifactId>
-        <configuration>
-          <dependencyDetailsEnabled>false</dependencyDetailsEnabled>
-          <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>
         <configuration>
           <forkedProcessTimeoutInSeconds>3600</forkedProcessTimeoutInSeconds>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d5c8ed8/hadoop-tools/hadoop-azure-datalake/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 25197cb..6e1306b 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -38,16 +38,6 @@
     <plugins>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-project-info-reports-plugin</artifactId>
-
-        <configuration>
-          <dependencyDetailsEnabled>false</dependencyDetailsEnabled>
-          <dependencyLocationsEnabled>false
-          </dependencyLocationsEnabled>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-jar-plugin</artifactId>
         <executions>
           <execution>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d5c8ed8/hadoop-tools/hadoop-azure/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index de31a49..0f59c46 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -50,16 +50,6 @@
         </configuration>
       </plugin>
       <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-project-info-reports-plugin</artifactId>
-
-        <configuration>
-          <dependencyDetailsEnabled>false</dependencyDetailsEnabled>
-          <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
-        </configuration>
-      </plugin>
-      
-      <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-checkstyle-plugin</artifactId>
           <configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d5c8ed8/hadoop-tools/hadoop-kafka/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-kafka/pom.xml b/hadoop-tools/hadoop-kafka/pom.xml
index 13e0ac0..b9273fe 100644
--- a/hadoop-tools/hadoop-kafka/pom.xml
+++ b/hadoop-tools/hadoop-kafka/pom.xml
@@ -49,14 +49,6 @@
       </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-project-info-reports-plugin</artifactId>
-        <configuration>
-          <dependencyDetailsEnabled>false</dependencyDetailsEnabled>
-          <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>
         <configuration>
           <forkedProcessTimeoutInSeconds>3600</forkedProcessTimeoutInSeconds>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d5c8ed8/hadoop-tools/hadoop-openstack/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/pom.xml b/hadoop-tools/hadoop-openstack/pom.xml
index 0e0defb..f80fcdf 100644
--- a/hadoop-tools/hadoop-openstack/pom.xml
+++ b/hadoop-tools/hadoop-openstack/pom.xml
@@ -78,15 +78,6 @@
       </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-project-info-reports-plugin</artifactId>
-
-        <configuration>
-          <dependencyDetailsEnabled>false</dependencyDetailsEnabled>
-          <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-dependency-plugin</artifactId>
         <executions>
           <execution>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/29] hadoop git commit: YARN-6598. History server getApplicationReport NPE when fetching report for pre-2.8 job (Jason Lowe via jeagles)

Posted by vi...@apache.org.
YARN-6598. History server getApplicationReport NPE when fetching report for pre-2.8 job (Jason Lowe via jeagles)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c48f2976
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c48f2976
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c48f2976

Branch: refs/heads/HDFS-9806
Commit: c48f2976a3de60b95c4a5ada4f0131c4cdde177a
Parents: 6600abb
Author: Jonathan Eagles <je...@yahoo-inc.com>
Authored: Mon May 15 10:32:01 2017 -0500
Committer: Jonathan Eagles <je...@yahoo-inc.com>
Committed: Mon May 15 10:32:01 2017 -0500

----------------------------------------------------------------------
 ...pplicationHistoryManagerOnTimelineStore.java | 35 ++++++++++++--------
 ...pplicationHistoryManagerOnTimelineStore.java | 32 ++++++++++++++----
 2 files changed, 48 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48f2976/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 54893ef..d18f3dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -330,20 +330,19 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
       }
 
       if (entityInfo.containsKey(ApplicationMetricsConstants.APP_CPU_METRICS)) {
-        long vcoreSeconds=Long.parseLong(entityInfo.get(
-                ApplicationMetricsConstants.APP_CPU_METRICS).toString());
-        long memorySeconds=Long.parseLong(entityInfo.get(
-                ApplicationMetricsConstants.APP_MEM_METRICS).toString());
-        long preemptedMemorySeconds = Long.parseLong(entityInfo.get(
-            ApplicationMetricsConstants
-                .APP_MEM_PREEMPT_METRICS).toString());
-        long preemptedVcoreSeconds = Long.parseLong(entityInfo.get(
-            ApplicationMetricsConstants
-                .APP_CPU_PREEMPT_METRICS).toString());
-        appResources = ApplicationResourceUsageReport
-            .newInstance(0, 0, null, null, null, memorySeconds, vcoreSeconds, 0,
-                0, preemptedMemorySeconds, preemptedVcoreSeconds);
+        long vcoreSeconds = parseLong(entityInfo,
+            ApplicationMetricsConstants.APP_CPU_METRICS);
+        long memorySeconds = parseLong(entityInfo,
+            ApplicationMetricsConstants.APP_MEM_METRICS);
+        long preemptedMemorySeconds = parseLong(entityInfo,
+            ApplicationMetricsConstants.APP_MEM_PREEMPT_METRICS);
+        long preemptedVcoreSeconds = parseLong(entityInfo,
+            ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS);
+        appResources = ApplicationResourceUsageReport.newInstance(0, 0, null,
+            null, null, memorySeconds, vcoreSeconds, 0, 0,
+            preemptedMemorySeconds, preemptedVcoreSeconds);
       }
+
       if (entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) {
         appTags = new HashSet<String>();
         Object obj = entityInfo.get(ApplicationMetricsConstants.APP_TAGS_INFO);
@@ -445,6 +444,16 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
         amNodeLabelExpression), appViewACLs);
   }
 
+  private static long parseLong(Map<String, Object> entityInfo,
+      String infoKey) {
+    long result = 0;
+    Object infoValue = entityInfo.get(infoKey);
+    if (infoValue != null) {
+      result = Long.parseLong(infoValue.toString());
+    }
+    return result;
+  }
+
   private static boolean isFinalState(YarnApplicationState state) {
     return state == YarnApplicationState.FINISHED
         || state == YarnApplicationState.FAILED

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48f2976/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
index 3d4e91b..9600251 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
@@ -143,6 +143,10 @@ public class TestApplicationHistoryManagerOnTimelineStore {
       if (i == 2) {
         entities.addEntity(createApplicationTimelineEntity(
             appId, true, false, false, true, YarnApplicationState.FINISHED));
+      } else if (i == 3) {
+        entities.addEntity(createApplicationTimelineEntity(
+            appId, false, false, false, false, YarnApplicationState.FINISHED,
+            true));
       } else {
         entities.addEntity(createApplicationTimelineEntity(
             appId, false, false, false, false, YarnApplicationState.FINISHED));
@@ -176,7 +180,7 @@ public class TestApplicationHistoryManagerOnTimelineStore {
 
   @Test
   public void testGetApplicationReport() throws Exception {
-    for (int i = 1; i <= 2; ++i) {
+    for (int i = 1; i <= 3; ++i) {
       final ApplicationId appId = ApplicationId.newInstance(0, i);
       ApplicationReport app;
       if (callerUGI == null) {
@@ -214,7 +218,7 @@ public class TestApplicationHistoryManagerOnTimelineStore {
       Assert.assertTrue(app.getApplicationTags().contains("Test_APP_TAGS_2"));
       // App 2 doesn't have the ACLs, such that the default ACLs " " will be used.
       // Nobody except admin and owner has access to the details of the app.
-      if ((i ==  1 && callerUGI != null &&
+      if ((i != 2 && callerUGI != null &&
           callerUGI.getShortUserName().equals("user3")) ||
           (i ==  2 && callerUGI != null &&
           (callerUGI.getShortUserName().equals("user2") ||
@@ -245,10 +249,16 @@ public class TestApplicationHistoryManagerOnTimelineStore {
           applicationResourceUsageReport.getMemorySeconds());
       Assert
           .assertEquals(345, applicationResourceUsageReport.getVcoreSeconds());
-      Assert.assertEquals(456,
+      long expectedPreemptMemSecs = 456;
+      long expectedPreemptVcoreSecs = 789;
+      if (i == 3) {
+        expectedPreemptMemSecs = 0;
+        expectedPreemptVcoreSecs = 0;
+      }
+      Assert.assertEquals(expectedPreemptMemSecs,
           applicationResourceUsageReport.getPreemptedMemorySeconds());
       Assert
-          .assertEquals(789, applicationResourceUsageReport
+          .assertEquals(expectedPreemptVcoreSecs, applicationResourceUsageReport
               .getPreemptedVcoreSeconds());
       Assert.assertEquals(FinalApplicationStatus.UNDEFINED,
           app.getFinalApplicationStatus());
@@ -486,6 +496,14 @@ public class TestApplicationHistoryManagerOnTimelineStore {
       ApplicationId appId, boolean emptyACLs, boolean noAttemptId,
       boolean wrongAppId, boolean enableUpdateEvent,
       YarnApplicationState state) {
+    return createApplicationTimelineEntity(appId, emptyACLs, noAttemptId,
+        wrongAppId, enableUpdateEvent, state, false);
+  }
+
+  private static TimelineEntity createApplicationTimelineEntity(
+      ApplicationId appId, boolean emptyACLs, boolean noAttemptId,
+      boolean wrongAppId, boolean enableUpdateEvent,
+      YarnApplicationState state, boolean missingPreemptMetrics) {
     TimelineEntity entity = new TimelineEntity();
     entity.setEntityType(ApplicationMetricsConstants.ENTITY_TYPE);
     if (wrongAppId) {
@@ -510,8 +528,10 @@ public class TestApplicationHistoryManagerOnTimelineStore {
         Integer.MAX_VALUE + 1L);
     entityInfo.put(ApplicationMetricsConstants.APP_MEM_METRICS, 123);
     entityInfo.put(ApplicationMetricsConstants.APP_CPU_METRICS, 345);
-    entityInfo.put(ApplicationMetricsConstants.APP_MEM_PREEMPT_METRICS,456);
-    entityInfo.put(ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS,789);
+    if (!missingPreemptMetrics) {
+      entityInfo.put(ApplicationMetricsConstants.APP_MEM_PREEMPT_METRICS, 456);
+      entityInfo.put(ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS, 789);
+    }
     if (emptyACLs) {
       entityInfo.put(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO, "");
     } else {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/29] hadoop git commit: YARN-6603. NPE in RMAppsBlock. Contributed by Jason Lowe

Posted by vi...@apache.org.
YARN-6603. NPE in RMAppsBlock. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/489f8593
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/489f8593
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/489f8593

Branch: refs/heads/HDFS-9806
Commit: 489f85933c508bc26de607b921e56e23b979fce8
Parents: c48f297
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Tue May 16 09:26:44 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Tue May 16 09:26:44 2017 -0500

----------------------------------------------------------------------
 .../server/resourcemanager/webapp/RMAppsBlock.java | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/489f8593/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
index 305f1d5..6a18296 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.webapp.AppsBlock;
 import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
@@ -96,13 +97,15 @@ public class RMAppsBlock extends AppsBlock {
       }
 
       String blacklistedNodesCount = "N/A";
-      RMAppAttempt appAttempt =
-          rm.getRMContext().getRMApps().get(appAttemptId.getApplicationId())
-              .getAppAttempts().get(appAttemptId);
-      Set<String> nodes =
-          null == appAttempt ? null : appAttempt.getBlacklistedNodes();
-      if (nodes != null) {
-        blacklistedNodesCount = String.valueOf(nodes.size());
+      RMApp rmApp = rm.getRMContext().getRMApps()
+          .get(appAttemptId.getApplicationId());
+      if (rmApp != null) {
+        RMAppAttempt appAttempt = rmApp.getRMAppAttempt(appAttemptId);
+        Set<String> nodes =
+            null == appAttempt ? null : appAttempt.getBlacklistedNodes();
+        if (nodes != null) {
+          blacklistedNodesCount = String.valueOf(nodes.size());
+        }
       }
       String percent = StringUtils.format("%.1f", app.getProgress());
       appsTableData


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/29] hadoop git commit: HDFS-11190. [READ] Namenode support for data stored in external stores.

Posted by vi...@apache.org.
HDFS-11190. [READ] Namenode support for data stored in external stores.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37114eb0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37114eb0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37114eb0

Branch: refs/heads/HDFS-9806
Commit: 37114eb0d3e489d649d9d91721957d84e1b9cc3d
Parents: 616765e
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Fri Apr 21 11:12:36 2017 -0700
Committer: Virajith Jalaparti <vi...@apache.org>
Committed: Wed May 17 12:41:52 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/protocol/LocatedBlock.java      |  96 ++++-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   5 +
 .../blockmanagement/BlockFormatProvider.java    |  91 ++++
 .../server/blockmanagement/BlockManager.java    |  95 +++--
 .../server/blockmanagement/BlockProvider.java   |  65 +++
 .../BlockStoragePolicySuite.java                |   6 +
 .../blockmanagement/DatanodeDescriptor.java     |  34 +-
 .../server/blockmanagement/DatanodeManager.java |   2 +
 .../blockmanagement/DatanodeStorageInfo.java    |   4 +
 .../blockmanagement/LocatedBlockBuilder.java    | 109 +++++
 .../blockmanagement/ProvidedStorageMap.java     | 427 +++++++++++++++++++
 .../src/main/resources/hdfs-default.xml         |  30 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java     |   4 +
 .../blockmanagement/TestDatanodeManager.java    |  66 ++-
 .../TestNameNodeProvidedImplementation.java     | 345 +++++++++++++++
 15 files changed, 1293 insertions(+), 86 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index 85bec92..5ad0bca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.protocol;
 
 import java.util.Arrays;
+import java.util.Comparator;
 import java.util.List;
 
 import com.google.common.base.Preconditions;
@@ -62,40 +63,50 @@ public class LocatedBlock {
 
   public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs) {
     // By default, startOffset is unknown(-1) and corrupt is false.
-    this(b, locs, null, null, -1, false, EMPTY_LOCS);
+    this(b, convert(locs, null, null), null, null, -1, false, EMPTY_LOCS);
   }
 
   public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs,
       String[] storageIDs, StorageType[] storageTypes) {
-    this(b, locs, storageIDs, storageTypes, -1, false, EMPTY_LOCS);
+    this(b, convert(locs, storageIDs, storageTypes),
+         storageIDs, storageTypes, -1, false, EMPTY_LOCS);
   }
 
-  public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, String[] storageIDs,
-      StorageType[] storageTypes, long startOffset,
+  public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs,
+      String[] storageIDs, StorageType[] storageTypes, long startOffset,
+      boolean corrupt, DatanodeInfo[] cachedLocs) {
+    this(b, convert(locs, storageIDs, storageTypes),
+        storageIDs, storageTypes, startOffset, corrupt,
+        null == cachedLocs || 0 == cachedLocs.length ? EMPTY_LOCS : cachedLocs);
+  }
+
+  public LocatedBlock(ExtendedBlock b, DatanodeInfoWithStorage[] locs,
+      String[] storageIDs, StorageType[] storageTypes, long startOffset,
       boolean corrupt, DatanodeInfo[] cachedLocs) {
     this.b = b;
     this.offset = startOffset;
     this.corrupt = corrupt;
-    if (locs==null) {
-      this.locs = EMPTY_LOCS;
-    } else {
-      this.locs = new DatanodeInfoWithStorage[locs.length];
-      for(int i = 0; i < locs.length; i++) {
-        DatanodeInfo di = locs[i];
-        DatanodeInfoWithStorage storage = new DatanodeInfoWithStorage(di,
-            storageIDs != null ? storageIDs[i] : null,
-            storageTypes != null ? storageTypes[i] : null);
-        this.locs[i] = storage;
-      }
-    }
+    this.locs = null == locs ? EMPTY_LOCS : locs;
     this.storageIDs = storageIDs;
     this.storageTypes = storageTypes;
+    this.cachedLocs = null == cachedLocs || 0 == cachedLocs.length
+      ? EMPTY_LOCS
+      : cachedLocs;
+  }
+
+  private static DatanodeInfoWithStorage[] convert(
+      DatanodeInfo[] infos, String[] storageIDs, StorageType[] storageTypes) {
+    if (null == infos) {
+      return EMPTY_LOCS;
+    }
 
-    if (cachedLocs == null || cachedLocs.length == 0) {
-      this.cachedLocs = EMPTY_LOCS;
-    } else {
-      this.cachedLocs = cachedLocs;
+    DatanodeInfoWithStorage[] ret = new DatanodeInfoWithStorage[infos.length];
+    for(int i = 0; i < infos.length; i++) {
+      ret[i] = new DatanodeInfoWithStorage(infos[i],
+          storageIDs   != null ? storageIDs[i]   : null,
+          storageTypes != null ? storageTypes[i] : null);
     }
+    return ret;
   }
 
   public Token<BlockTokenIdentifier> getBlockToken() {
@@ -145,6 +156,51 @@ public class LocatedBlock {
     }
   }
 
+  /**
+   * Comparator that ensures that a PROVIDED storage type is greater than
+   * any other storage type. Any other storage types are considered equal.
+   */
+  private class ProvidedLastComparator
+    implements Comparator<DatanodeInfoWithStorage> {
+    @Override
+    public int compare(DatanodeInfoWithStorage dns1,
+        DatanodeInfoWithStorage dns2) {
+      if (StorageType.PROVIDED.equals(dns1.getStorageType())
+          && !StorageType.PROVIDED.equals(dns2.getStorageType())) {
+        return 1;
+      }
+      if (!StorageType.PROVIDED.equals(dns1.getStorageType())
+          && StorageType.PROVIDED.equals(dns2.getStorageType())) {
+        return -1;
+      }
+      // Storage types of dns1 and dns2 are now both provided or not provided;
+      // thus, are essentially equal for the purpose of this comparator.
+      return 0;
+    }
+  }
+
+  /**
+   * Moves all locations that have {@link StorageType}
+   * {@code PROVIDED} to the end of the locations array without
+   * changing the relative ordering of the remaining locations
+   * Only the first {@code activeLen} locations are considered.
+   * The caller must immediately invoke {@link
+   * org.apache.hadoop.hdfs.protocol.LocatedBlock#updateCachedStorageInfo}
+   * to update the cached Storage ID/Type arrays.
+   * @param activeLen
+   */
+  public void moveProvidedToEnd(int activeLen) {
+
+    if (activeLen <= 0) {
+      return;
+    }
+    // as this is a stable sort, for elements that are equal,
+    // the current order of the elements is maintained
+    Arrays.sort(locs, 0,
+        (activeLen < locs.length) ? activeLen : locs.length,
+        new ProvidedLastComparator());
+  }
+
   public long getStartOffset() {
     return offset;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 6406d35..e252b6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -314,6 +314,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.namenode.edits.asynclogging";
   public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = false;
 
+  public static final String DFS_NAMENODE_PROVIDED_ENABLED = "dfs.namenode.provided.enabled";
+  public static final boolean DFS_NAMENODE_PROVIDED_ENABLED_DEFAULT = false;
+
+  public static final String DFS_NAMENODE_BLOCK_PROVIDER_CLASS = "dfs.namenode.block.provider.class";
+
   public static final String DFS_PROVIDER_CLASS = "dfs.provider.class";
   public static final String DFS_PROVIDER_DF_CLASS = "dfs.provided.df.class";
   public static final String DFS_PROVIDER_STORAGEUUID = "dfs.provided.storage.id";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockFormatProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockFormatProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockFormatProvider.java
new file mode 100644
index 0000000..930263d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockFormatProvider.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.BlockAlias;
+import org.apache.hadoop.hdfs.server.common.BlockFormat;
+import org.apache.hadoop.hdfs.server.common.TextFileRegionFormat;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Loads provided blocks from a {@link BlockFormat}.
+ */
+public class BlockFormatProvider extends BlockProvider
+    implements Configurable {
+
+  private Configuration conf;
+  private BlockFormat<? extends BlockAlias> blockFormat;
+  public static final Logger LOG =
+      LoggerFactory.getLogger(BlockFormatProvider.class);
+
+  @Override
+  @SuppressWarnings({ "rawtypes", "unchecked" })
+  public void setConf(Configuration conf) {
+    Class<? extends BlockFormat> c = conf.getClass(
+        DFSConfigKeys.DFS_PROVIDER_BLK_FORMAT_CLASS,
+        TextFileRegionFormat.class, BlockFormat.class);
+    blockFormat = ReflectionUtils.newInstance(c, conf);
+    LOG.info("Loaded BlockFormat class : " + c.getClass().getName());
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  public Iterator<Block> iterator() {
+    try {
+      final BlockFormat.Reader<? extends BlockAlias> reader =
+          blockFormat.getReader(null);
+
+      return new Iterator<Block>() {
+
+        private final Iterator<? extends BlockAlias> inner = reader.iterator();
+
+        @Override
+        public boolean hasNext() {
+          return inner.hasNext();
+        }
+
+        @Override
+        public Block next() {
+          return inner.next().getBlock();
+        }
+
+        @Override
+        public void remove() {
+          throw new UnsupportedOperationException();
+        }
+      };
+    } catch (IOException e) {
+      throw new RuntimeException("Failed to read provided blocks", e);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a9592bf..c5b9c8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -375,6 +375,9 @@ public class BlockManager implements BlockStatsMXBean {
    */
   private final short minReplicationToBeInMaintenance;
 
+  /** Storages accessible from multiple DNs. */
+  private final ProvidedStorageMap providedStorageMap;
+
   public BlockManager(final Namesystem namesystem, boolean haEnabled,
       final Configuration conf) throws IOException {
     this.namesystem = namesystem;
@@ -407,6 +410,8 @@ public class BlockManager implements BlockStatsMXBean {
 
     blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
+    providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
+
     this.maxCorruptFilesReturned = conf.getInt(
       DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY,
       DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED);
@@ -1053,7 +1058,7 @@ public class BlockManager implements BlockStatsMXBean {
     final long fileLength = bc.computeContentSummary(
         getStoragePolicySuite()).getLength();
     final long pos = fileLength - lastBlock.getNumBytes();
-    return createLocatedBlock(lastBlock, pos,
+    return createLocatedBlock(null, lastBlock, pos,
         BlockTokenIdentifier.AccessMode.WRITE);
   }
 
@@ -1074,8 +1079,10 @@ public class BlockManager implements BlockStatsMXBean {
     return locations;
   }
 
-  private List<LocatedBlock> createLocatedBlockList(final BlockInfo[] blocks,
-      final long offset, final long length, final int nrBlocksToReturn,
+  private void createLocatedBlockList(
+      LocatedBlockBuilder locatedBlocks,
+      final BlockInfo[] blocks,
+      final long offset, final long length,
       final AccessMode mode) throws IOException {
     int curBlk;
     long curPos = 0, blkSize = 0;
@@ -1090,21 +1097,22 @@ public class BlockManager implements BlockStatsMXBean {
     }
 
     if (nrBlocks > 0 && curBlk == nrBlocks)   // offset >= end of file
-      return Collections.emptyList();
+      return;
 
     long endOff = offset + length;
-    List<LocatedBlock> results = new ArrayList<>(blocks.length);
     do {
-      results.add(createLocatedBlock(blocks[curBlk], curPos, mode));
+      locatedBlocks.addBlock(
+          createLocatedBlock(locatedBlocks, blocks[curBlk], curPos, mode));
       curPos += blocks[curBlk].getNumBytes();
       curBlk++;
     } while (curPos < endOff 
           && curBlk < blocks.length
-          && results.size() < nrBlocksToReturn);
-    return results;
+          && !locatedBlocks.isBlockMax());
+    return;
   }
 
-  private LocatedBlock createLocatedBlock(final BlockInfo[] blocks,
+  private LocatedBlock createLocatedBlock(LocatedBlockBuilder locatedBlocks,
+      final BlockInfo[] blocks,
       final long endPos, final AccessMode mode) throws IOException {
     int curBlk;
     long curPos = 0;
@@ -1117,12 +1125,13 @@ public class BlockManager implements BlockStatsMXBean {
       curPos += blkSize;
     }
     
-    return createLocatedBlock(blocks[curBlk], curPos, mode);
+    return createLocatedBlock(locatedBlocks, blocks[curBlk], curPos, mode);
   }
 
-  private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos,
-    final AccessMode mode) throws IOException {
-    final LocatedBlock lb = createLocatedBlock(blk, pos);
+  private LocatedBlock createLocatedBlock(LocatedBlockBuilder locatedBlocks,
+      final BlockInfo blk, final long pos, final AccessMode mode)
+          throws IOException {
+    final LocatedBlock lb = createLocatedBlock(locatedBlocks, blk, pos);
     if (mode != null) {
       setBlockToken(lb, mode);
     }
@@ -1130,21 +1139,24 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /** @return a LocatedBlock for the given block */
-  private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos)
-      throws IOException {
+  private LocatedBlock createLocatedBlock(LocatedBlockBuilder locatedBlocks,
+      final BlockInfo blk, final long pos) throws IOException {
     if (!blk.isComplete()) {
       final BlockUnderConstructionFeature uc = blk.getUnderConstructionFeature();
       if (blk.isStriped()) {
         final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
         final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(),
             blk);
+        //TODO use locatedBlocks builder??
         return newLocatedStripedBlock(eb, storages, uc.getBlockIndices(), pos,
             false);
       } else {
         final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
         final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(),
             blk);
-        return newLocatedBlock(eb, storages, pos, false);
+        return null == locatedBlocks
+            ? newLocatedBlock(eb, storages, pos, false)
+                : locatedBlocks.newLocatedBlock(eb, storages, pos, false);
       }
     }
 
@@ -1208,9 +1220,10 @@ public class BlockManager implements BlockStatsMXBean {
       " numCorrupt: " + numCorruptNodes +
       " numCorruptRepls: " + numCorruptReplicas;
     final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(), blk);
-    return blockIndices == null ?
-        newLocatedBlock(eb, machines, pos, isCorrupt) :
-        newLocatedStripedBlock(eb, machines, blockIndices, pos, isCorrupt);
+    return blockIndices == null
+        ? null == locatedBlocks ? newLocatedBlock(eb, machines, pos, isCorrupt)
+            : locatedBlocks.newLocatedBlock(eb, machines, pos, isCorrupt)
+        : newLocatedStripedBlock(eb, machines, blockIndices, pos, isCorrupt);
   }
 
   /** Create a LocatedBlocks. */
@@ -1232,27 +1245,31 @@ public class BlockManager implements BlockStatsMXBean {
         LOG.debug("blocks = " + java.util.Arrays.asList(blocks));
       }
       final AccessMode mode = needBlockToken? BlockTokenIdentifier.AccessMode.READ: null;
-      final List<LocatedBlock> locatedblocks = createLocatedBlockList(
-          blocks, offset, length, Integer.MAX_VALUE, mode);
 
-      final LocatedBlock lastlb;
-      final boolean isComplete;
+      LocatedBlockBuilder locatedBlocks = providedStorageMap
+          .newLocatedBlocks(Integer.MAX_VALUE)
+          .fileLength(fileSizeExcludeBlocksUnderConstruction)
+          .lastUC(isFileUnderConstruction)
+          .encryption(feInfo)
+          .erasureCoding(ecPolicy);
+
+      createLocatedBlockList(locatedBlocks, blocks, offset, length, mode);
       if (!inSnapshot) {
         final BlockInfo last = blocks[blocks.length - 1];
         final long lastPos = last.isComplete()?
             fileSizeExcludeBlocksUnderConstruction - last.getNumBytes()
             : fileSizeExcludeBlocksUnderConstruction;
-        lastlb = createLocatedBlock(last, lastPos, mode);
-        isComplete = last.isComplete();
+
+        locatedBlocks
+          .lastBlock(createLocatedBlock(locatedBlocks, last, lastPos, mode))
+          .lastComplete(last.isComplete());
       } else {
-        lastlb = createLocatedBlock(blocks,
-            fileSizeExcludeBlocksUnderConstruction, mode);
-        isComplete = true;
+        locatedBlocks
+          .lastBlock(createLocatedBlock(locatedBlocks, blocks,
+              fileSizeExcludeBlocksUnderConstruction, mode))
+          .lastComplete(true);
       }
-      LocatedBlocks locations = new LocatedBlocks(
-          fileSizeExcludeBlocksUnderConstruction,
-          isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo,
-          ecPolicy);
+      LocatedBlocks locations = locatedBlocks.build();
       // Set caching information for the located blocks.
       CacheManager cm = namesystem.getCacheManager();
       if (cm != null) {
@@ -2336,7 +2353,10 @@ public class BlockManager implements BlockStatsMXBean {
 
       // To minimize startup time, we discard any second (or later) block reports
       // that we receive while still in startup phase.
-      DatanodeStorageInfo storageInfo = node.getStorageInfo(storage.getStorageID());
+      // !#! Register DN with provided storage, not with storage owned by DN
+      // !#! DN should still have a ref to the DNStorageInfo
+      DatanodeStorageInfo storageInfo =
+          providedStorageMap.getStorage(node, storage);
 
       if (storageInfo == null) {
         // We handle this for backwards compatibility.
@@ -2368,9 +2388,12 @@ public class BlockManager implements BlockStatsMXBean {
             nodeID.getDatanodeUuid());
         processFirstBlockReport(storageInfo, newReport);
       } else {
-        invalidatedBlocks = processReport(storageInfo, newReport, context);
+        // Block reports for provided storage are not
+        // maintained by DN heartbeats
+        if (!StorageType.PROVIDED.equals(storageInfo.getStorageType())) {
+          invalidatedBlocks = processReport(storageInfo, newReport, context);
+        }
       }
-      
       storageInfo.receivedBlockReport();
     } finally {
       endTime = Time.monotonicNow();
@@ -2589,7 +2612,7 @@ public class BlockManager implements BlockStatsMXBean {
    * @param report - the initial block report, to be processed
    * @throws IOException 
    */
-  private void processFirstBlockReport(
+  void processFirstBlockReport(
       final DatanodeStorageInfo storageInfo,
       final BlockListAsLongs report) throws IOException {
     if (report == null) return;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockProvider.java
new file mode 100644
index 0000000..d8bed16
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockProvider.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.io.IOException;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.ProvidedStorageMap.ProvidedBlockList;
+import org.apache.hadoop.hdfs.util.RwLock;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Used to load provided blocks in the {@link BlockManager}.
+ */
+public abstract class BlockProvider implements Iterable<Block> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ProvidedStorageMap.class);
+
+  private RwLock lock;
+  private BlockManager bm;
+  private DatanodeStorageInfo storage;
+  private boolean hasDNs = false;
+
+  /**
+   * @param lock the namesystem lock
+   * @param bm block manager
+   * @param storage storage for provided blocks
+   */
+  void init(RwLock lock, BlockManager bm, DatanodeStorageInfo storage) {
+    this.bm = bm;
+    this.lock = lock;
+    this.storage = storage;
+  }
+
+  /**
+   * start the processing of block report for provided blocks.
+   * @throws IOException
+   */
+  void start() throws IOException {
+    assert lock.hasWriteLock() : "Not holding write lock";
+    if (hasDNs) {
+      return;
+    }
+    LOG.info("Calling process first blk report from storage: " + storage);
+    // first pass; periodic refresh should call bm.processReport
+    bm.processFirstBlockReport(storage, new ProvidedBlockList(iterator()));
+    hasDNs = true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
index c8923da..6ea5198 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
@@ -82,6 +82,12 @@ public class BlockStoragePolicySuite {
         HdfsConstants.COLD_STORAGE_POLICY_NAME,
         new StorageType[]{StorageType.ARCHIVE}, StorageType.EMPTY_ARRAY,
         StorageType.EMPTY_ARRAY);
+    final byte providedId = HdfsConstants.PROVIDED_STORAGE_POLICY_ID;
+    policies[providedId] = new BlockStoragePolicy(providedId,
+      HdfsConstants.PROVIDED_STORAGE_POLICY_NAME,
+      new StorageType[]{StorageType.PROVIDED, StorageType.DISK},
+      new StorageType[]{StorageType.PROVIDED, StorageType.DISK},
+      new StorageType[]{StorageType.PROVIDED, StorageType.DISK});
     return new BlockStoragePolicySuite(hotId, policies);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 4b87fd4..ab16f1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -151,7 +151,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   private final LeavingServiceStatus leavingServiceStatus =
       new LeavingServiceStatus();
 
-  private final Map<String, DatanodeStorageInfo> storageMap =
+  protected final Map<String, DatanodeStorageInfo> storageMap =
       new HashMap<>();
 
   /**
@@ -322,6 +322,12 @@ public class DatanodeDescriptor extends DatanodeInfo {
   boolean hasStaleStorages() {
     synchronized (storageMap) {
       for (DatanodeStorageInfo storage : storageMap.values()) {
+        if (StorageType.PROVIDED.equals(storage.getStorageType())) {
+          // to verify provided storage participated in this hb, requires
+          // check to pass DNDesc.
+          // e.g., storageInfo.verifyBlockReportId(this, curBlockReportId)
+          continue;
+        }
         if (storage.areBlockContentsStale()) {
           return true;
         }
@@ -439,17 +445,22 @@ public class DatanodeDescriptor extends DatanodeInfo {
     this.volumeFailures = volFailures;
     this.volumeFailureSummary = volumeFailureSummary;
     for (StorageReport report : reports) {
+      totalCapacity += report.getCapacity();
+      totalRemaining += report.getRemaining();
+      totalBlockPoolUsed += report.getBlockPoolUsed();
+      totalDfsUsed += report.getDfsUsed();
+      totalNonDfsUsed += report.getNonDfsUsed();
+
+      if (StorageType.PROVIDED.equals(
+          report.getStorage().getStorageType())) {
+        continue;
+      }
       DatanodeStorageInfo storage = updateStorage(report.getStorage());
       if (checkFailedStorages) {
         failedStorageInfos.remove(storage);
       }
 
       storage.receivedHeartbeat(report);
-      totalCapacity += report.getCapacity();
-      totalRemaining += report.getRemaining();
-      totalBlockPoolUsed += report.getBlockPoolUsed();
-      totalDfsUsed += report.getDfsUsed();
-      totalNonDfsUsed += report.getNonDfsUsed();
     }
     rollBlocksScheduled(getLastUpdateMonotonic());
 
@@ -471,6 +482,17 @@ public class DatanodeDescriptor extends DatanodeInfo {
     }
   }
 
+  void injectStorage(DatanodeStorageInfo s) {
+    synchronized (storageMap) {
+      DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
+      if (null == storage) {
+        storageMap.put(s.getStorageID(), s);
+      } else {
+        assert storage == s : "found " + storage + " expected " + s;
+      }
+    }
+  }
+
   /**
    * Remove stale storages from storageMap. We must not remove any storages
    * as long as they have associated block replicas.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 7dcc9fd..038aaf7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -525,6 +525,8 @@ public class DatanodeManager {
     } else {
       networktopology.sortByDistance(client, lb.getLocations(), activeLen);
     }
+    //move PROVIDED storage to the end to prefer local replicas.
+    lb.moveProvidedToEnd(activeLen);
     // must update cache since we modified locations array
     lb.updateCachedStorageInfo();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index 8af86d3..ed3905f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -172,6 +172,10 @@ public class DatanodeStorageInfo {
     this.state = state;
   }
 
+  void setHeartbeatedSinceFailover(boolean value) {
+    heartbeatedSinceFailover = value;
+  }
+
   boolean areBlocksOnFailedStorage() {
     return getState() == State.FAILED && !blocks.isEmpty();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LocatedBlockBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LocatedBlockBuilder.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LocatedBlockBuilder.java
new file mode 100644
index 0000000..0056887
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LocatedBlockBuilder.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+class LocatedBlockBuilder {
+
+  protected long flen;
+  protected List<LocatedBlock> blocks = Collections.<LocatedBlock>emptyList();
+  protected boolean isUC;
+  protected LocatedBlock last;
+  protected boolean lastComplete;
+  protected FileEncryptionInfo feInfo;
+  private final int maxBlocks;
+  protected ErasureCodingPolicy ecPolicy;
+
+  LocatedBlockBuilder(int maxBlocks) {
+    this.maxBlocks = maxBlocks;
+  }
+
+  boolean isBlockMax() {
+    return blocks.size() >= maxBlocks;
+  }
+
+  LocatedBlockBuilder fileLength(long fileLength) {
+    flen = fileLength;
+    return this;
+  }
+
+  LocatedBlockBuilder addBlock(LocatedBlock block) {
+    if (blocks.isEmpty()) {
+      blocks = new ArrayList<>();
+    }
+    blocks.add(block);
+    return this;
+  }
+
+  // return new block so tokens can be set
+  LocatedBlock newLocatedBlock(ExtendedBlock eb,
+      DatanodeStorageInfo[] storage,
+      long pos, boolean isCorrupt) {
+    LocatedBlock blk =
+        BlockManager.newLocatedBlock(eb, storage, pos, isCorrupt);
+    return blk;
+  }
+
+  LocatedBlockBuilder lastUC(boolean underConstruction) {
+    isUC = underConstruction;
+    return this;
+  }
+
+  LocatedBlockBuilder lastBlock(LocatedBlock block) {
+    last = block;
+    return this;
+  }
+
+  LocatedBlockBuilder lastComplete(boolean complete) {
+    lastComplete = complete;
+    return this;
+  }
+
+  LocatedBlockBuilder encryption(FileEncryptionInfo fileEncryptionInfo) {
+    feInfo = fileEncryptionInfo;
+    return this;
+  }
+
+  LocatedBlockBuilder erasureCoding(ErasureCodingPolicy codingPolicy) {
+    ecPolicy = codingPolicy;
+    return this;
+  }
+
+  LocatedBlocks build(DatanodeDescriptor client) {
+    return build();
+  }
+
+  LocatedBlocks build() {
+    return new LocatedBlocks(flen, isUC, blocks, last,
+        lastComplete, feInfo, ecPolicy);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
new file mode 100644
index 0000000..d222344
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
@@ -0,0 +1,427 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentSkipListMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
+import org.apache.hadoop.hdfs.util.RwLock;
+import org.apache.hadoop.util.ReflectionUtils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.protobuf.ByteString;
+
+/**
+ * This class allows us to manage and multiplex between storages local to
+ * datanodes, and provided storage.
+ */
+public class ProvidedStorageMap {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ProvidedStorageMap.class);
+
+  // limit to a single provider for now
+  private final BlockProvider blockProvider;
+  private final String storageId;
+  private final ProvidedDescriptor providedDescriptor;
+  private final DatanodeStorageInfo providedStorageInfo;
+  private boolean providedEnabled;
+
+  ProvidedStorageMap(RwLock lock, BlockManager bm, Configuration conf)
+      throws IOException {
+
+    storageId = conf.get(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID,
+        DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT);
+
+    providedEnabled = conf.getBoolean(
+        DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED,
+        DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED_DEFAULT);
+
+    if (!providedEnabled) {
+      // disable mapping
+      blockProvider = null;
+      providedDescriptor = null;
+      providedStorageInfo = null;
+      return;
+    }
+
+    DatanodeStorage ds = new DatanodeStorage(
+        storageId, State.NORMAL, StorageType.PROVIDED);
+    providedDescriptor = new ProvidedDescriptor();
+    providedStorageInfo = providedDescriptor.createProvidedStorage(ds);
+
+    // load block reader into storage
+    Class<? extends BlockProvider> fmt = conf.getClass(
+        DFSConfigKeys.DFS_NAMENODE_BLOCK_PROVIDER_CLASS,
+        BlockFormatProvider.class, BlockProvider.class);
+
+    blockProvider = ReflectionUtils.newInstance(fmt, conf);
+    blockProvider.init(lock, bm, providedStorageInfo);
+    LOG.info("Loaded block provider class: " +
+        blockProvider.getClass() + " storage: " + providedStorageInfo);
+  }
+
+  /**
+   * @param dn datanode descriptor
+   * @param s data node storage
+   * @return the {@link DatanodeStorageInfo} for the specified datanode.
+   * If {@code s} corresponds to a provided storage, the storage info
+   * representing provided storage is returned.
+   * @throws IOException
+   */
+  DatanodeStorageInfo getStorage(DatanodeDescriptor dn, DatanodeStorage s)
+      throws IOException {
+    if (providedEnabled && storageId.equals(s.getStorageID())) {
+      if (StorageType.PROVIDED.equals(s.getStorageType())) {
+        // poll service, initiate
+        blockProvider.start();
+        dn.injectStorage(providedStorageInfo);
+        return providedDescriptor.getProvidedStorage(dn, s);
+      }
+      LOG.warn("Reserved storage {} reported as non-provided from {}", s, dn);
+    }
+    return dn.getStorageInfo(s.getStorageID());
+  }
+
+  public LocatedBlockBuilder newLocatedBlocks(int maxValue) {
+    if (!providedEnabled) {
+      return new LocatedBlockBuilder(maxValue);
+    }
+    return new ProvidedBlocksBuilder(maxValue);
+  }
+
+  /**
+   * Builder used for creating {@link LocatedBlocks} when a block is provided.
+   */
+  class ProvidedBlocksBuilder extends LocatedBlockBuilder {
+
+    private ShadowDatanodeInfoWithStorage pending;
+
+    ProvidedBlocksBuilder(int maxBlocks) {
+      super(maxBlocks);
+      pending = new ShadowDatanodeInfoWithStorage(
+          providedDescriptor, storageId);
+    }
+
+    @Override
+    LocatedBlock newLocatedBlock(ExtendedBlock eb,
+        DatanodeStorageInfo[] storages, long pos, boolean isCorrupt) {
+
+      DatanodeInfoWithStorage[] locs =
+        new DatanodeInfoWithStorage[storages.length];
+      String[] sids = new String[storages.length];
+      StorageType[] types = new StorageType[storages.length];
+      for (int i = 0; i < storages.length; ++i) {
+        sids[i] = storages[i].getStorageID();
+        types[i] = storages[i].getStorageType();
+        if (StorageType.PROVIDED.equals(storages[i].getStorageType())) {
+          locs[i] = pending;
+        } else {
+          locs[i] = new DatanodeInfoWithStorage(
+              storages[i].getDatanodeDescriptor(), sids[i], types[i]);
+        }
+      }
+      return new LocatedBlock(eb, locs, sids, types, pos, isCorrupt, null);
+    }
+
+    @Override
+    LocatedBlocks build(DatanodeDescriptor client) {
+      // TODO: to support multiple provided storages, need to pass/maintain map
+      // set all fields of pending DatanodeInfo
+      List<String> excludedUUids = new ArrayList<String>();
+      for (LocatedBlock b: blocks) {
+        DatanodeInfo[] infos = b.getLocations();
+        StorageType[] types = b.getStorageTypes();
+
+        for (int i = 0; i < types.length; i++) {
+          if (!StorageType.PROVIDED.equals(types[i])) {
+            excludedUUids.add(infos[i].getDatanodeUuid());
+          }
+        }
+      }
+
+      DatanodeDescriptor dn = providedDescriptor.choose(client, excludedUUids);
+      if (dn == null) {
+        dn = providedDescriptor.choose(client);
+      }
+
+      pending.replaceInternal(dn);
+      return new LocatedBlocks(
+          flen, isUC, blocks, last, lastComplete, feInfo, ecPolicy);
+    }
+
+    @Override
+    LocatedBlocks build() {
+      return build(providedDescriptor.chooseRandom());
+    }
+  }
+
+  /**
+   * An abstract {@link DatanodeInfoWithStorage} to represent provided storage.
+   */
+  static class ShadowDatanodeInfoWithStorage extends DatanodeInfoWithStorage {
+    private String shadowUuid;
+
+    ShadowDatanodeInfoWithStorage(DatanodeDescriptor d, String storageId) {
+      super(d, storageId, StorageType.PROVIDED);
+    }
+
+    @Override
+    public String getDatanodeUuid() {
+      return shadowUuid;
+    }
+
+    public void setDatanodeUuid(String uuid) {
+      shadowUuid = uuid;
+    }
+
+    void replaceInternal(DatanodeDescriptor dn) {
+      updateRegInfo(dn); // overwrite DatanodeID (except UUID)
+      setDatanodeUuid(dn.getDatanodeUuid());
+      setCapacity(dn.getCapacity());
+      setDfsUsed(dn.getDfsUsed());
+      setRemaining(dn.getRemaining());
+      setBlockPoolUsed(dn.getBlockPoolUsed());
+      setCacheCapacity(dn.getCacheCapacity());
+      setCacheUsed(dn.getCacheUsed());
+      setLastUpdate(dn.getLastUpdate());
+      setLastUpdateMonotonic(dn.getLastUpdateMonotonic());
+      setXceiverCount(dn.getXceiverCount());
+      setNetworkLocation(dn.getNetworkLocation());
+      adminState = dn.getAdminState();
+      setUpgradeDomain(dn.getUpgradeDomain());
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      return super.equals(obj);
+    }
+
+    @Override
+    public int hashCode() {
+      return super.hashCode();
+    }
+  }
+
+  /**
+   * An abstract DatanodeDescriptor to track datanodes with provided storages.
+   * NOTE: never resolved through registerDatanode, so not in the topology.
+   */
+  static class ProvidedDescriptor extends DatanodeDescriptor {
+
+    private final NavigableMap<String, DatanodeDescriptor> dns =
+        new ConcurrentSkipListMap<>();
+
+    ProvidedDescriptor() {
+      super(new DatanodeID(
+            null,                         // String ipAddr,
+            null,                         // String hostName,
+            UUID.randomUUID().toString(), // String datanodeUuid,
+            0,                            // int xferPort,
+            0,                            // int infoPort,
+            0,                            // int infoSecurePort,
+            0));                          // int ipcPort
+    }
+
+    DatanodeStorageInfo getProvidedStorage(
+        DatanodeDescriptor dn, DatanodeStorage s) {
+      dns.put(dn.getDatanodeUuid(), dn);
+      // TODO: maintain separate RPC ident per dn
+      return storageMap.get(s.getStorageID());
+    }
+
+    DatanodeStorageInfo createProvidedStorage(DatanodeStorage ds) {
+      assert null == storageMap.get(ds.getStorageID());
+      DatanodeStorageInfo storage = new DatanodeStorageInfo(this, ds);
+      storage.setHeartbeatedSinceFailover(true);
+      storageMap.put(storage.getStorageID(), storage);
+      return storage;
+    }
+
+    DatanodeDescriptor choose(DatanodeDescriptor client) {
+      // exact match for now
+      DatanodeDescriptor dn = dns.get(client.getDatanodeUuid());
+      if (null == dn) {
+        dn = chooseRandom();
+      }
+      return dn;
+    }
+
+    DatanodeDescriptor choose(DatanodeDescriptor client,
+        List<String> excludedUUids) {
+      // exact match for now
+      DatanodeDescriptor dn = dns.get(client.getDatanodeUuid());
+
+      if (null == dn || excludedUUids.contains(client.getDatanodeUuid())) {
+        dn = null;
+        Set<String> exploredUUids = new HashSet<String>();
+
+        while(exploredUUids.size() < dns.size()) {
+          Map.Entry<String, DatanodeDescriptor> d =
+                  dns.ceilingEntry(UUID.randomUUID().toString());
+          if (null == d) {
+            d = dns.firstEntry();
+          }
+          String uuid = d.getValue().getDatanodeUuid();
+          //this node has already been explored, and was not selected earlier
+          if (exploredUUids.contains(uuid)) {
+            continue;
+          }
+          exploredUUids.add(uuid);
+          //this node has been excluded
+          if (excludedUUids.contains(uuid)) {
+            continue;
+          }
+          return dns.get(uuid);
+        }
+      }
+
+      return dn;
+    }
+
+    DatanodeDescriptor chooseRandom(DatanodeStorageInfo[] excludedStorages) {
+      // TODO: Currently this is not uniformly random;
+      // skewed toward sparse sections of the ids
+      Set<DatanodeDescriptor> excludedNodes =
+          new HashSet<DatanodeDescriptor>();
+      if (excludedStorages != null) {
+        for (int i= 0; i < excludedStorages.length; i++) {
+          LOG.info("Excluded: " + excludedStorages[i].getDatanodeDescriptor());
+          excludedNodes.add(excludedStorages[i].getDatanodeDescriptor());
+        }
+      }
+      Set<DatanodeDescriptor> exploredNodes = new HashSet<DatanodeDescriptor>();
+
+      while(exploredNodes.size() < dns.size()) {
+        Map.Entry<String, DatanodeDescriptor> d =
+            dns.ceilingEntry(UUID.randomUUID().toString());
+        if (null == d) {
+          d = dns.firstEntry();
+        }
+        DatanodeDescriptor node = d.getValue();
+        //this node has already been explored, and was not selected earlier
+        if (exploredNodes.contains(node)) {
+          continue;
+        }
+        exploredNodes.add(node);
+        //this node has been excluded
+        if (excludedNodes.contains(node)) {
+          continue;
+        }
+        return node;
+      }
+      return null;
+    }
+
+    DatanodeDescriptor chooseRandom() {
+      return chooseRandom(null);
+    }
+
+    @Override
+    void addBlockToBeReplicated(Block block, DatanodeStorageInfo[] targets) {
+      // pick a random datanode, delegate to it
+      DatanodeDescriptor node = chooseRandom(targets);
+      if (node != null) {
+        node.addBlockToBeReplicated(block, targets);
+      } else {
+        LOG.error("Cannot find a source node to replicate block: "
+            + block + " from");
+      }
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      return (this == obj) || super.equals(obj);
+    }
+
+    @Override
+    public int hashCode() {
+      return super.hashCode();
+    }
+  }
+
+  /**
+   * Used to emulate block reports for provided blocks.
+   */
+  static class ProvidedBlockList extends BlockListAsLongs {
+
+    private final Iterator<Block> inner;
+
+    ProvidedBlockList(Iterator<Block> inner) {
+      this.inner = inner;
+    }
+
+    @Override
+    public Iterator<BlockReportReplica> iterator() {
+      return new Iterator<BlockReportReplica>() {
+        @Override
+        public BlockReportReplica next() {
+          return new BlockReportReplica(inner.next());
+        }
+        @Override
+        public boolean hasNext() {
+          return inner.hasNext();
+        }
+        @Override
+        public void remove() {
+          throw new UnsupportedOperationException();
+        }
+      };
+    }
+
+    @Override
+    public int getNumberOfBlocks() {
+      // VERIFY: only printed for debugging
+      return -1;
+    }
+
+    @Override
+    public ByteString getBlocksBuffer() {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public long[] getBlockListAsLongs() {
+      // should only be used for backwards compat, DN.ver > NN.ver
+      throw new UnsupportedOperationException();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 6df243f..0b8063c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4461,14 +4461,30 @@
   </property>
 
   <property>
+    <name>dfs.namenode.provided.enabled</name>
+    <value>false</value>
+    <description>
+      Enables the Namenode to handle provided storages.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.block.provider.class</name>
+    <value>org.apache.hadoop.hdfs.server.blockmanagement.BlockFormatProvider</value>
+    <description>
+      The class that is used to load provided blocks in the Namenode.
+    </description>
+  </property>
+
+  <property>
     <name>dfs.provider.class</name>
     <value>org.apache.hadoop.hdfs.server.common.TextFileRegionProvider</value>
     <description>
-        The class that is used to load information about blocks stored in
-        provided storages.
-        org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TextFileRegionProvider
-        is used as the default, which expects the blocks to be specified
-        using a delimited text file.
+      The class that is used to load information about blocks stored in
+      provided storages.
+      org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TextFileRegionProvider
+      is used as the default, which expects the blocks to be specified
+      using a delimited text file.
     </description>
   </property>
 
@@ -4476,7 +4492,7 @@
     <name>dfs.provided.df.class</name>
     <value>org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.DefaultProvidedVolumeDF</value>
     <description>
-        The class that is used to measure usage statistics of provided stores.
+      The class that is used to measure usage statistics of provided stores.
     </description>
   </property>
 
@@ -4484,7 +4500,7 @@
     <name>dfs.provided.storage.id</name>
     <value>DS-PROVIDED</value>
     <description>
-        The storage ID used for provided stores.
+      The storage ID used for provided stores.
     </description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index 3a8fb59..12045a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -84,6 +84,7 @@ public class TestBlockStoragePolicy {
   static final byte ONESSD  = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
   static final byte ALLSSD  = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
   static final byte LAZY_PERSIST  = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
+  static final byte PROVIDED  = HdfsConstants.PROVIDED_STORAGE_POLICY_ID;
 
   @Test (timeout=300000)
   public void testConfigKeyEnabled() throws IOException {
@@ -143,6 +144,9 @@ public class TestBlockStoragePolicy {
     expectedPolicyStrings.put(ALLSSD, "BlockStoragePolicy{ALL_SSD:" + ALLSSD +
         ", storageTypes=[SSD], creationFallbacks=[DISK], " +
         "replicationFallbacks=[DISK]}");
+    expectedPolicyStrings.put(PROVIDED, "BlockStoragePolicy{PROVIDED:" + PROVIDED +
+        ", storageTypes=[PROVIDED, DISK], creationFallbacks=[PROVIDED, DISK], " +
+        "replicationFallbacks=[PROVIDED, DISK]}");
 
     for(byte i = 1; i < 16; i++) {
       final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i); 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
index 30e2aaf..dd24311 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
@@ -25,6 +25,7 @@ import java.net.URL;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -291,7 +292,7 @@ public class TestDatanodeManager {
    */
   @Test
   public void testSortLocatedBlocks() throws IOException, URISyntaxException {
-    HelperFunction(null);
+    HelperFunction(null, 0);
   }
 
   /**
@@ -303,7 +304,7 @@ public class TestDatanodeManager {
    */
   @Test
   public void testgoodScript() throws IOException, URISyntaxException {
-    HelperFunction("/" + Shell.appendScriptExtension("topology-script"));
+    HelperFunction("/" + Shell.appendScriptExtension("topology-script"), 0);
   }
 
 
@@ -316,7 +317,21 @@ public class TestDatanodeManager {
    */
   @Test
   public void testBadScript() throws IOException, URISyntaxException {
-    HelperFunction("/"+ Shell.appendScriptExtension("topology-broken-script"));
+    HelperFunction("/"+ Shell.appendScriptExtension("topology-broken-script"), 0);
+  }
+
+  /**
+   * Test with different sorting functions but include datanodes
+   * with provided storage
+   * @throws IOException
+   * @throws URISyntaxException
+   */
+  @Test
+  public void testWithProvidedTypes() throws IOException, URISyntaxException {
+    HelperFunction(null, 1);
+    HelperFunction(null, 3);
+    HelperFunction("/" + Shell.appendScriptExtension("topology-script"), 1);
+    HelperFunction("/" + Shell.appendScriptExtension("topology-script"), 2);
   }
 
   /**
@@ -324,11 +339,12 @@ public class TestDatanodeManager {
    * we invoke this function with and without topology scripts
    *
    * @param scriptFileName - Script Name or null
+   * @param providedStorages - number of provided storages to add
    *
    * @throws URISyntaxException
    * @throws IOException
    */
-  public void HelperFunction(String scriptFileName)
+  public void HelperFunction(String scriptFileName, int providedStorages)
     throws URISyntaxException, IOException {
     // create the DatanodeManager which will be tested
     Configuration conf = new Configuration();
@@ -343,17 +359,25 @@ public class TestDatanodeManager {
     }
     DatanodeManager dm = mockDatanodeManager(fsn, conf);
 
+    int totalDNs = 5 + providedStorages;
+
     // register 5 datanodes, each with different storage ID and type
-    DatanodeInfo[] locs = new DatanodeInfo[5];
-    String[] storageIDs = new String[5];
-    StorageType[] storageTypes = new StorageType[]{
-      StorageType.ARCHIVE,
-      StorageType.DEFAULT,
-      StorageType.DISK,
-      StorageType.RAM_DISK,
-      StorageType.SSD
-    };
-    for (int i = 0; i < 5; i++) {
+    DatanodeInfo[] locs = new DatanodeInfo[totalDNs];
+    String[] storageIDs = new String[totalDNs];
+    List<StorageType> storageTypesList = new ArrayList<>(
+        Arrays.asList(StorageType.ARCHIVE,
+            StorageType.DEFAULT,
+            StorageType.DISK,
+            StorageType.RAM_DISK,
+            StorageType.SSD));
+
+    for (int i = 0; i < providedStorages; i++) {
+      storageTypesList.add(StorageType.PROVIDED);
+    }
+
+    StorageType[] storageTypes= storageTypesList.toArray(new StorageType[0]);
+
+    for (int i = 0; i < totalDNs; i++) {
       // register new datanode
       String uuid = "UUID-" + i;
       String ip = "IP-" + i;
@@ -389,9 +413,9 @@ public class TestDatanodeManager {
     DatanodeInfo[] sortedLocs = block.getLocations();
     storageIDs = block.getStorageIDs();
     storageTypes = block.getStorageTypes();
-    assertThat(sortedLocs.length, is(5));
-    assertThat(storageIDs.length, is(5));
-    assertThat(storageTypes.length, is(5));
+    assertThat(sortedLocs.length, is(totalDNs));
+    assertThat(storageIDs.length, is(totalDNs));
+    assertThat(storageTypes.length, is(totalDNs));
     for (int i = 0; i < sortedLocs.length; i++) {
       assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageID(),
         is(storageIDs[i]));
@@ -405,6 +429,14 @@ public class TestDatanodeManager {
       is(DatanodeInfo.AdminStates.DECOMMISSIONED));
     assertThat(sortedLocs[sortedLocs.length - 2].getAdminState(),
       is(DatanodeInfo.AdminStates.DECOMMISSIONED));
+    // check that the StorageType of datanoodes immediately
+    // preceding the decommissioned datanodes is PROVIDED
+    for (int i = 0; i < providedStorages; i++) {
+      assertThat(
+          ((DatanodeInfoWithStorage)
+              sortedLocs[sortedLocs.length - 3 - i]).getStorageType(),
+          is(StorageType.PROVIDED));
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37114eb0/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
new file mode 100644
index 0000000..3b75806
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
@@ -0,0 +1,345 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.nio.ByteBuffer;
+import java.nio.channels.Channels;
+import java.nio.channels.ReadableByteChannel;
+import java.util.Random;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockFormatProvider;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockProvider;
+import org.apache.hadoop.hdfs.server.common.BlockFormat;
+import org.apache.hadoop.hdfs.server.common.FileRegionProvider;
+import org.apache.hadoop.hdfs.server.common.TextFileRegionFormat;
+import org.apache.hadoop.hdfs.server.common.TextFileRegionProvider;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.junit.Assert.*;
+
+public class TestNameNodeProvidedImplementation {
+
+  @Rule public TestName name = new TestName();
+  public static final Logger LOG =
+      LoggerFactory.getLogger(TestNameNodeProvidedImplementation.class);
+
+  final Random r = new Random();
+  final File fBASE = new File(MiniDFSCluster.getBaseDirectory());
+  final Path BASE = new Path(fBASE.toURI().toString());
+  final Path NAMEPATH = new Path(BASE, "providedDir");;
+  final Path NNDIRPATH = new Path(BASE, "nnDir");
+  final Path BLOCKFILE = new Path(NNDIRPATH, "blocks.csv");
+  final String SINGLEUSER = "usr1";
+  final String SINGLEGROUP = "grp1";
+
+  Configuration conf;
+  MiniDFSCluster cluster;
+
+  @Before
+  public void setSeed() throws Exception {
+    if (fBASE.exists() && !FileUtil.fullyDelete(fBASE)) {
+      throw new IOException("Could not fully delete " + fBASE);
+    }
+    long seed = r.nextLong();
+    r.setSeed(seed);
+    System.out.println(name.getMethodName() + " seed: " + seed);
+    conf = new HdfsConfiguration();
+    conf.set(SingleUGIResolver.USER, SINGLEUSER);
+    conf.set(SingleUGIResolver.GROUP, SINGLEGROUP);
+
+    conf.set(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID,
+        DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, true);
+
+    conf.setClass(DFSConfigKeys.DFS_NAMENODE_BLOCK_PROVIDER_CLASS,
+        BlockFormatProvider.class, BlockProvider.class);
+    conf.setClass(DFSConfigKeys.DFS_PROVIDER_CLASS,
+        TextFileRegionProvider.class, FileRegionProvider.class);
+    conf.setClass(DFSConfigKeys.DFS_PROVIDER_BLK_FORMAT_CLASS,
+        TextFileRegionFormat.class, BlockFormat.class);
+
+    conf.set(DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_WRITE_PATH,
+        BLOCKFILE.toString());
+    conf.set(DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_READ_PATH,
+        BLOCKFILE.toString());
+    conf.set(DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_DELIMITER, ",");
+
+    File imageDir = new File(NAMEPATH.toUri());
+    if (!imageDir.exists()) {
+      LOG.info("Creating directory: " + imageDir);
+      imageDir.mkdirs();
+    }
+
+    File nnDir = new File(NNDIRPATH.toUri());
+    if (!nnDir.exists()) {
+      nnDir.mkdirs();
+    }
+
+    // create 10 random files under BASE
+    for (int i=0; i < 10; i++) {
+      File newFile = new File(new Path(NAMEPATH, "file" + i).toUri());
+      if(!newFile.exists()) {
+        try {
+          LOG.info("Creating " + newFile.toString());
+          newFile.createNewFile();
+          Writer writer = new OutputStreamWriter(
+              new FileOutputStream(newFile.getAbsolutePath()), "utf-8");
+          for(int j=0; j < 10*i; j++) {
+            writer.write("0");
+          }
+          writer.flush();
+          writer.close();
+        } catch (IOException e) {
+          e.printStackTrace();
+        }
+      }
+    }
+  }
+
+  @After
+  public void shutdown() throws Exception {
+    try {
+      if (cluster != null) {
+        cluster.shutdown(true, true);
+      }
+    } finally {
+      cluster = null;
+    }
+  }
+
+  void createImage(TreeWalk t, Path out,
+      Class<? extends BlockResolver> blockIdsClass) throws Exception {
+    ImageWriter.Options opts = ImageWriter.defaults();
+    opts.setConf(conf);
+    opts.output(out.toString())
+        .blocks(TextFileRegionFormat.class)
+        .blockIds(blockIdsClass);
+    try (ImageWriter w = new ImageWriter(opts)) {
+      for (TreePath e : t) {
+        w.accept(e);
+      }
+    }
+  }
+
+  void startCluster(Path nspath, int numDatanodes,
+      StorageType[] storageTypes,
+      StorageType[][] storageTypesPerDatanode)
+      throws IOException {
+    conf.set(DFS_NAMENODE_NAME_DIR_KEY, nspath.toString());
+
+    if (storageTypesPerDatanode != null) {
+      cluster = new MiniDFSCluster.Builder(conf)
+          .format(false)
+          .manageNameDfsDirs(false)
+          .numDataNodes(numDatanodes)
+          .storageTypes(storageTypesPerDatanode)
+          .build();
+    } else if (storageTypes != null) {
+      cluster = new MiniDFSCluster.Builder(conf)
+          .format(false)
+          .manageNameDfsDirs(false)
+          .numDataNodes(numDatanodes)
+          .storagesPerDatanode(storageTypes.length)
+          .storageTypes(storageTypes)
+          .build();
+    } else {
+      cluster = new MiniDFSCluster.Builder(conf)
+          .format(false)
+          .manageNameDfsDirs(false)
+          .numDataNodes(numDatanodes)
+          .build();
+    }
+    cluster.waitActive();
+  }
+
+  @Test(timeout = 20000)
+  public void testLoadImage() throws Exception {
+    final long seed = r.nextLong();
+    LOG.info("NAMEPATH: " + NAMEPATH);
+    createImage(new RandomTreeWalk(seed), NNDIRPATH, FixedBlockResolver.class);
+    startCluster(NNDIRPATH, 0, new StorageType[] {StorageType.PROVIDED}, null);
+
+    FileSystem fs = cluster.getFileSystem();
+    for (TreePath e : new RandomTreeWalk(seed)) {
+      FileStatus rs = e.getFileStatus();
+      Path hp = new Path(rs.getPath().toUri().getPath());
+      assertTrue(fs.exists(hp));
+      FileStatus hs = fs.getFileStatus(hp);
+      assertEquals(rs.getPath().toUri().getPath(),
+                   hs.getPath().toUri().getPath());
+      assertEquals(rs.getPermission(), hs.getPermission());
+      assertEquals(rs.getLen(), hs.getLen());
+      assertEquals(SINGLEUSER, hs.getOwner());
+      assertEquals(SINGLEGROUP, hs.getGroup());
+      assertEquals(rs.getAccessTime(), hs.getAccessTime());
+      assertEquals(rs.getModificationTime(), hs.getModificationTime());
+    }
+  }
+
+  @Test(timeout=20000)
+  public void testBlockLoad() throws Exception {
+    conf.setClass(ImageWriter.Options.UGI_CLASS,
+        SingleUGIResolver.class, UGIResolver.class);
+    createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
+        FixedBlockResolver.class);
+    startCluster(NNDIRPATH, 1, new StorageType[] {StorageType.PROVIDED}, null);
+  }
+
+  @Test(timeout=500000)
+  public void testDefaultReplication() throws Exception {
+    int targetReplication = 2;
+    conf.setInt(FixedBlockMultiReplicaResolver.REPLICATION, targetReplication);
+    createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
+        FixedBlockMultiReplicaResolver.class);
+    // make the last Datanode with only DISK
+    startCluster(NNDIRPATH, 3, null,
+        new StorageType[][] {
+          {StorageType.PROVIDED},
+          {StorageType.PROVIDED},
+          {StorageType.DISK}}
+        );
+    // wait for the replication to finish
+    Thread.sleep(50000);
+
+    FileSystem fs = cluster.getFileSystem();
+    int count = 0;
+    for (TreePath e : new FSTreeWalk(NAMEPATH, conf)) {
+      FileStatus rs = e.getFileStatus();
+      Path hp = removePrefix(NAMEPATH, rs.getPath());
+      LOG.info("hp " + hp.toUri().getPath());
+      //skip HDFS specific files, which may have been created later on.
+      if (hp.toString().contains("in_use.lock")
+          || hp.toString().contains("current")) {
+        continue;
+      }
+      e.accept(count++);
+      assertTrue(fs.exists(hp));
+      FileStatus hs = fs.getFileStatus(hp);
+
+      if (rs.isFile()) {
+        BlockLocation[] bl = fs.getFileBlockLocations(
+            hs.getPath(), 0, hs.getLen());
+        int i = 0;
+        for(; i < bl.length; i++) {
+          int currentRep = bl[i].getHosts().length;
+          assertEquals(targetReplication , currentRep);
+        }
+      }
+    }
+  }
+
+
+  static Path removePrefix(Path base, Path walk) {
+    Path wpath = new Path(walk.toUri().getPath());
+    Path bpath = new Path(base.toUri().getPath());
+    Path ret = new Path("/");
+    while (!(bpath.equals(wpath) || "".equals(wpath.getName()))) {
+      ret = "".equals(ret.getName())
+        ? new Path("/", wpath.getName())
+        : new Path(new Path("/", wpath.getName()),
+                   new Path(ret.toString().substring(1)));
+      wpath = wpath.getParent();
+    }
+    if (!bpath.equals(wpath)) {
+      throw new IllegalArgumentException(base + " not a prefix of " + walk);
+    }
+    return ret;
+  }
+
+  @Test(timeout=30000)
+  public void testBlockRead() throws Exception {
+    conf.setClass(ImageWriter.Options.UGI_CLASS,
+        FsUGIResolver.class, UGIResolver.class);
+    createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
+        FixedBlockResolver.class);
+    startCluster(NNDIRPATH, 3, new StorageType[] {StorageType.PROVIDED}, null);
+    FileSystem fs = cluster.getFileSystem();
+    Thread.sleep(2000);
+    int count = 0;
+    // read NN metadata, verify contents match
+    for (TreePath e : new FSTreeWalk(NAMEPATH, conf)) {
+      FileStatus rs = e.getFileStatus();
+      Path hp = removePrefix(NAMEPATH, rs.getPath());
+      LOG.info("hp " + hp.toUri().getPath());
+      //skip HDFS specific files, which may have been created later on.
+      if(hp.toString().contains("in_use.lock")
+          || hp.toString().contains("current")) {
+        continue;
+      }
+      e.accept(count++);
+      assertTrue(fs.exists(hp));
+      FileStatus hs = fs.getFileStatus(hp);
+      assertEquals(hp.toUri().getPath(), hs.getPath().toUri().getPath());
+      assertEquals(rs.getPermission(), hs.getPermission());
+      assertEquals(rs.getOwner(), hs.getOwner());
+      assertEquals(rs.getGroup(), hs.getGroup());
+
+      if (rs.isFile()) {
+        assertEquals(rs.getLen(), hs.getLen());
+        try (ReadableByteChannel i = Channels.newChannel(
+              new FileInputStream(new File(rs.getPath().toUri())))) {
+          try (ReadableByteChannel j = Channels.newChannel(
+                fs.open(hs.getPath()))) {
+            ByteBuffer ib = ByteBuffer.allocate(4096);
+            ByteBuffer jb = ByteBuffer.allocate(4096);
+            while (true) {
+              int il = i.read(ib);
+              int jl = j.read(jb);
+              if (il < 0 || jl < 0) {
+                assertEquals(il, jl);
+                break;
+              }
+              ib.flip();
+              jb.flip();
+              int cmp = Math.min(ib.remaining(), jb.remaining());
+              for (int k = 0; k < cmp; ++k) {
+                assertEquals(ib.get(), jb.get());
+              }
+              ib.compact();
+              jb.compact();
+            }
+
+          }
+        }
+      }
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/29] hadoop git commit: MAPREDUCE-6459. Native task crashes when merging spilled file on ppc64. Contributed by Tao Jie and Ayappan.

Posted by vi...@apache.org.
MAPREDUCE-6459. Native task crashes when merging spilled file on ppc64. Contributed by Tao Jie and Ayappan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/035d4683
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/035d4683
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/035d4683

Branch: refs/heads/HDFS-9806
Commit: 035d46836be9463a1e5d8237274150e63dc5190a
Parents: e6f6682
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed May 17 07:50:29 2017 -0400
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed May 17 07:50:29 2017 -0400

----------------------------------------------------------------------
 .../hadoop-mapreduce-client-nativetask/src/CMakeLists.txt          | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/035d4683/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
index d9cec1a..bbeece9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
@@ -26,7 +26,7 @@ set(GTEST_SRC_DIR ${CMAKE_SOURCE_DIR}/../../../../hadoop-common-project/hadoop-c
 
 # Add extra compiler and linker flags.
 # -Wno-sign-compare
-hadoop_add_compiler_flags("-DNDEBUG -DSIMPLE_MEMCPY -fno-strict-aliasing")
+hadoop_add_compiler_flags("-DNDEBUG -DSIMPLE_MEMCPY -fno-strict-aliasing -fsigned-char")
 
 # Source location.
 set(SRC main/native)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/29] hadoop git commit: YARN-6447. Provide container sandbox policies for groups (gphillips via rkanter)

Posted by vi...@apache.org.
YARN-6447. Provide container sandbox policies for groups (gphillips via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18c494a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18c494a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18c494a0

Branch: refs/heads/HDFS-9806
Commit: 18c494a00c8ead768f3a868b450dceea485559df
Parents: 101852c
Author: Robert Kanter <rk...@apache.org>
Authored: Tue May 16 18:02:39 2017 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Tue May 16 18:02:39 2017 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |   4 +
 .../JavaSandboxLinuxContainerRuntime.java       |  64 ++++++++---
 .../TestJavaSandboxLinuxContainerRuntime.java   | 106 ++++++++++++++++---
 3 files changed, 147 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18c494a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 82274fe..5e4c826 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1500,6 +1500,10 @@ public class YarnConfiguration extends Configuration {
   public static final String YARN_CONTAINER_SANDBOX_POLICY =
       YARN_CONTAINER_SANDBOX + ".policy";
 
+  /** Prefix for group to policy file mapping.*/
+  public static final String YARN_CONTAINER_SANDBOX_POLICY_GROUP_PREFIX =
+      YARN_CONTAINER_SANDBOX_POLICY + ".group.";
+
   /** The group which will run by default without the java security manager.*/
   public static final String YARN_CONTAINER_SANDBOX_WHITELIST_GROUP =
       YARN_CONTAINER_SANDBOX + ".whitelist-group";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18c494a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java
index 1e5bf57..0b858bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java
@@ -50,10 +50,13 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.stream.Collectors;
 
 import static org.apache.hadoop.fs.Path.SEPARATOR;
 import static org.apache.hadoop.util.Shell.SYSPROP_HADOOP_HOME_DIR;
 import static org.apache.hadoop.yarn.api.ApplicationConstants.Environment.JAVA_HOME;
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.YARN_CONTAINER_SANDBOX;
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.YARN_CONTAINER_SANDBOX_POLICY_GROUP_PREFIX;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_ID_STR;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_LOCAL_DIRS;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_RUN_CMDS;
@@ -93,13 +96,21 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  *     Accepts canonical path to a java policy file on the local filesystem.
  *     This file will be loaded as the base policy, any additional container
  *     grants will be appended to this base file.  If not specified, the default
- *     java.policy   file provided with hadoop resources will be used.
+ *     java.policy file provided with hadoop resources will be used.
  *   </li>
  *   <li>
  *     {@value YarnConfiguration#YARN_CONTAINER_SANDBOX_WHITELIST_GROUP} :
  *     Optional setting to specify a YARN queue which will be exempt from the
  *     sand-boxing process.
  *   </li>
+ *   <li>
+ *     {@value
+ *     YarnConfiguration#YARN_CONTAINER_SANDBOX_POLICY_GROUP_PREFIX}$groupName :
+ *     Optional setting to map groups to java policy files.  The value is a path
+ *     to the java policy file for $groupName.  A user which is a member of
+ *     multiple groups with different policies will receive the superset of all
+ *     the permissions across their groups.
+ *   </li>
  * </ul>
  */
 @InterfaceAudience.Private
@@ -138,7 +149,7 @@ public class JavaSandboxLinuxContainerRuntime
     this.configuration = conf;
     this.sandboxMode =
         SandboxMode.get(
-            this.configuration.get(YarnConfiguration.YARN_CONTAINER_SANDBOX,
+            this.configuration.get(YARN_CONTAINER_SANDBOX,
                 YarnConfiguration.DEFAULT_YARN_CONTAINER_SANDBOX));
 
     super.initialize(conf);
@@ -211,8 +222,10 @@ public class JavaSandboxLinuxContainerRuntime
         ctx.getExecutionAttribute(CONTAINER_RUN_CMDS);
     Map<String, String> env =
         ctx.getContainer().getLaunchContext().getEnvironment();
+    String username =
+        ctx.getExecutionAttribute(USER);
 
-    if(!isSandboxContainerWhitelisted(ctx, commands)) {
+    if(!isSandboxContainerWhitelisted(username, commands)) {
       String tmpDirBase = configuration.get("hadoop.tmp.dir");
       if (tmpDirBase == null) {
         throw new ContainerExecutionException("hadoop.tmp.dir not set!");
@@ -223,6 +236,8 @@ public class JavaSandboxLinuxContainerRuntime
         String containerID = ctx.getExecutionAttribute(CONTAINER_ID_STR);
         initializePolicyDir();
 
+        List<String> groupPolicyFiles =
+            getGroupPolicyFiles(configuration, ctx.getExecutionAttribute(USER));
         Path policyFilePath = Files.createFile(
             Paths.get(policyFileDir.toString(),
             containerID + "-" + NMContainerPolicyUtils.POLICY_FILE),
@@ -231,12 +246,12 @@ public class JavaSandboxLinuxContainerRuntime
 
         containerPolicies.put(containerID, policyFilePath);
 
-        NMContainerPolicyUtils.generatePolicyFile(
-            policyOutputStream, localDirs, resources, configuration);
+        NMContainerPolicyUtils.generatePolicyFile(policyOutputStream,
+            localDirs, groupPolicyFiles, resources, configuration);
         NMContainerPolicyUtils.appendSecurityFlags(
             commands, env, policyFilePath, sandboxMode);
 
-      } catch (Exception e) {
+      } catch (IOException e) {
         throw new ContainerExecutionException(e);
       } finally {
         IOUtils.cleanup(LOG, policyOutputStream);
@@ -264,15 +279,32 @@ public class JavaSandboxLinuxContainerRuntime
     return sandboxMode != SandboxMode.disabled;
   }
 
+  private static List<String> getGroupPolicyFiles(Configuration conf,
+      String user) throws ContainerExecutionException {
+    Groups groups = Groups.getUserToGroupsMappingService(conf);
+    List<String> userGroups;
+    try {
+      userGroups = groups.getGroups(user);
+    } catch (IOException e) {
+      throw new ContainerExecutionException("Container user does not exist");
+    }
+
+    return userGroups.stream()
+        .map(group -> conf.get(YARN_CONTAINER_SANDBOX_POLICY_GROUP_PREFIX
+            + group))
+        .filter(groupPolicy -> groupPolicy != null)
+        .collect(Collectors.toList());
+  }
+
   /**
    * Determine if the container should be whitelisted (i.e. exempt from the
    * Java Security Manager).
-   * @param ctx The container runtime context for the requested container
+   * @param username The name of the user running the container
    * @param commands The list of run commands for the container
    * @return boolean value denoting whether the container should be whitelisted.
    * @throws ContainerExecutionException If container user can not be resolved
    */
-  private boolean isSandboxContainerWhitelisted(ContainerRuntimeContext ctx,
+  private boolean isSandboxContainerWhitelisted(String username,
       List<String> commands) throws ContainerExecutionException {
     String whitelistGroup = configuration.get(
         YarnConfiguration.YARN_CONTAINER_SANDBOX_WHITELIST_GROUP);
@@ -281,7 +313,7 @@ public class JavaSandboxLinuxContainerRuntime
     boolean isWhitelisted = false;
 
     try {
-      userGroups = groups.getGroups(ctx.getExecutionAttribute(USER));
+      userGroups = groups.getGroups(username);
     } catch (IOException e) {
       throw new ContainerExecutionException("Container user does not exist");
     }
@@ -399,8 +431,9 @@ public class JavaSandboxLinuxContainerRuntime
      * base policy file or if it is unable to create a new policy file.
      */
     static void generatePolicyFile(OutputStream policyOutStream,
-        List<String> localDirs, Map<org.apache.hadoop.fs.Path,
-        List<String>> resources, Configuration conf)
+        List<String> localDirs, List<String> groupPolicyPaths,
+        Map<org.apache.hadoop.fs.Path, List<String>> resources,
+        Configuration conf)
         throws IOException {
 
       String policyFilePath =
@@ -414,13 +447,16 @@ public class JavaSandboxLinuxContainerRuntime
         cacheDirs.add(path.getParent().toString());
       }
 
-      if(policyFilePath == null) {
+      if (groupPolicyPaths != null) {
+        for(String policyPath : groupPolicyPaths) {
+          Files.copy(Paths.get(policyPath), policyOutStream);
+        }
+      } else if (policyFilePath == null) {
         IOUtils.copyBytes(
             NMContainerPolicyUtils.class.getResourceAsStream("/" + POLICY_FILE),
             policyOutStream, conf, false);
       } else {
         Files.copy(Paths.get(policyFilePath), policyOutStream);
-        policyOutStream.flush();
       }
 
       Formatter filePermissionFormat = new Formatter(policyOutStream,
@@ -484,7 +520,7 @@ public class JavaSandboxLinuxContainerRuntime
 
     private static boolean validateJavaHome(String containerJavaHome)
         throws ContainerExecutionException{
-      if (System.getenv(JAVA_HOME.name()) == null){
+      if (System.getenv(JAVA_HOME.name()) == null) {
         throw new ContainerExecutionException(
             "JAVA_HOME is not set for NodeManager");
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18c494a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestJavaSandboxLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestJavaSandboxLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestJavaSandboxLinuxContainerRuntime.java
index e10d0dd..bdd435e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestJavaSandboxLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestJavaSandboxLinuxContainerRuntime.java
@@ -37,20 +37,29 @@ import org.junit.rules.ExpectedException;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FilePermission;
+import java.io.FileWriter;
+import java.io.IOException;
 import java.io.OutputStream;
+import java.net.SocketPermission;
 import java.nio.file.Files;
 import java.nio.file.Paths;
+import java.security.Permission;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Formatter;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Properties;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import static org.apache.hadoop.yarn.api.ApplicationConstants.Environment.JAVA_HOME;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.LOG;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.MULTI_COMMAND_REGEX;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.CLEAN_CMD_REGEX;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.CONTAINS_JAVA_CMD;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.POLICY_APPEND_FLAG;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.POLICY_FILE;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.POLICY_FLAG;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.SECURITY_FLAG;
@@ -76,8 +85,9 @@ import static org.mockito.Mockito.when;
  */
 public class TestJavaSandboxLinuxContainerRuntime {
 
-  private static final String HADOOP_HOME = "hadoop.home.dir";
-  private static String hadoopHomeDir = System.getProperty(HADOOP_HOME);
+  private final static String HADOOP_HOME = "hadoop.home.dir";
+  private final static String HADOOP_HOME_DIR = System.getProperty(HADOOP_HOME);
+  private final Properties baseProps = new Properties(System.getProperties());
 
   @Rule
   public ExpectedException exception = ExpectedException.none();
@@ -101,11 +111,12 @@ public class TestJavaSandboxLinuxContainerRuntime {
   private final static String WHITELIST_GROUP = "captains";
   private final static String CONTAINER_ID = "container_1234567890";
   private final static String APPLICATION_ID = "application_1234567890";
+  private File baseTestDirectory;
 
   @Before
   public void setup() throws Exception {
 
-    File baseTestDirectory = new File(System.getProperty("test.build.data",
+    baseTestDirectory = new File(System.getProperty("test.build.data",
         System.getProperty("java.io.tmpdir", "target")),
         TestJavaSandboxLinuxContainerRuntime.class.getName());
 
@@ -114,10 +125,8 @@ public class TestJavaSandboxLinuxContainerRuntime {
 
     conf = new Configuration();
     conf.set(CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES,
-        WHITELIST_USER + "=" + WHITELIST_GROUP + ";"
+        WHITELIST_USER + "=" + WHITELIST_GROUP + "," + NORMAL_GROUP + ";"
             + NORMAL_USER + "=" + NORMAL_GROUP + ";");
-    conf.set(YarnConfiguration.YARN_CONTAINER_SANDBOX_WHITELIST_GROUP,
-        WHITELIST_GROUP);
     conf.set("hadoop.tmp.dir", baseTestDirectory.getAbsolutePath());
 
     Files.deleteIfExists(Paths.get(baseTestDirectory.getAbsolutePath(),
@@ -151,21 +160,20 @@ public class TestJavaSandboxLinuxContainerRuntime {
 
     runtimeContextBuilder = createRuntimeContext();
 
-    if (hadoopHomeDir == null) {
+    if (HADOOP_HOME_DIR == null) {
       System.setProperty(HADOOP_HOME, policyFile.getParent());
     }
 
     OutputStream outStream = new FileOutputStream(policyFile);
     JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils
-        .generatePolicyFile(outStream, symLinks, resources, conf);
+        .generatePolicyFile(outStream, symLinks, null, resources, conf);
     outStream.close();
 
     System.setProperty("java.security.policy", policyFile.getCanonicalPath());
     securityManager = new SecurityManager();
-
   }
 
-  public  ContainerRuntimeContext.Builder createRuntimeContext(){
+  public ContainerRuntimeContext.Builder createRuntimeContext(){
 
     Container container = mock(Container.class);
     ContainerLaunchContext  ctx = mock(ContainerLaunchContext.class);
@@ -194,6 +202,71 @@ public class TestJavaSandboxLinuxContainerRuntime {
     return builder;
   }
 
+  public static final String SOCKET_PERMISSION_FORMAT =
+      "grant { \n" +
+      "   permission %1s \"%2s\", \"%3s\";\n" +
+      "};\n";
+  public static final String RUNTIME_PERMISSION_FORMAT =
+      "grant { \n" +
+          "   permission %1s \"%2s\";\n" +
+          "};\n";
+
+  @Test
+  public void testGroupPolicies()
+      throws IOException, ContainerExecutionException {
+    // Generate new policy files each containing one grant
+    File openSocketPolicyFile =
+        File.createTempFile("openSocket", "policy", baseTestDirectory);
+    File classLoaderPolicyFile =
+        File.createTempFile("createClassLoader", "policy", baseTestDirectory);
+    Permission socketPerm = new SocketPermission("localhost:0", "listen");
+    Permission runtimePerm = new RuntimePermission("createClassLoader");
+
+    StringBuilder socketPermString = new StringBuilder();
+    Formatter openSocketPolicyFormatter = new Formatter(socketPermString);
+    openSocketPolicyFormatter.format(SOCKET_PERMISSION_FORMAT,
+        socketPerm.getClass().getName(), socketPerm.getName(),
+        socketPerm.getActions());
+    FileWriter socketPermWriter = new FileWriter(openSocketPolicyFile);
+    socketPermWriter.write(socketPermString.toString());
+    socketPermWriter.close();
+
+    StringBuilder classLoaderPermString = new StringBuilder();
+    Formatter classLoaderPolicyFormatter = new Formatter(classLoaderPermString);
+    classLoaderPolicyFormatter.format(RUNTIME_PERMISSION_FORMAT,
+        runtimePerm.getClass().getName(), runtimePerm.getName());
+    FileWriter classLoaderPermWriter = new FileWriter(classLoaderPolicyFile);
+    classLoaderPermWriter.write(classLoaderPermString.toString());
+    classLoaderPermWriter.close();
+
+    conf.set(YarnConfiguration.YARN_CONTAINER_SANDBOX_POLICY_GROUP_PREFIX +
+        WHITELIST_GROUP, openSocketPolicyFile.toString());
+    conf.set(YarnConfiguration.YARN_CONTAINER_SANDBOX_POLICY_GROUP_PREFIX +
+        NORMAL_GROUP, classLoaderPolicyFile.toString());
+
+    String[] inputCommand = {"$JAVA_HOME/bin/java jar MyJob.jar"};
+    List<String> commands = Arrays.asList(inputCommand);
+
+    runtimeContextBuilder.setExecutionAttribute(USER, WHITELIST_USER);
+    runtimeContextBuilder.setExecutionAttribute(CONTAINER_RUN_CMDS, commands);
+
+    runtime.prepareContainer(runtimeContextBuilder.build());
+
+    //pull generated policy from cmd
+    Matcher policyMatches = Pattern.compile(POLICY_APPEND_FLAG + "=?([^ ]+)")
+        .matcher(commands.get(0));
+    policyMatches.find();
+    String generatedPolicy = policyMatches.group(1);
+
+    //Test that generated policy file has included both policies
+    Assert.assertTrue(
+        Files.readAllLines(Paths.get(generatedPolicy)).contains(
+            classLoaderPermString.toString().split("\n")[1]));
+    Assert.assertTrue(
+        Files.readAllLines(Paths.get(generatedPolicy)).contains(
+            socketPermString.toString().split("\n")[1]));
+  }
+
   @Test
   public void testGrant() throws Exception {
     FilePermission grantPermission =
@@ -235,7 +308,6 @@ public class TestJavaSandboxLinuxContainerRuntime {
     JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils
         .appendSecurityFlags(commands, env, policyFilePath,
               JavaSandboxLinuxContainerRuntime.SandboxMode.permissive);
-
   }
 
   @Test
@@ -247,6 +319,9 @@ public class TestJavaSandboxLinuxContainerRuntime {
     };
     List<String> commands = Arrays.asList(inputCommand);
 
+    conf.set(YarnConfiguration.YARN_CONTAINER_SANDBOX_WHITELIST_GROUP,
+        WHITELIST_GROUP);
+
     runtimeContextBuilder.setExecutionAttribute(USER, WHITELIST_USER);
     runtimeContextBuilder.setExecutionAttribute(CONTAINER_RUN_CMDS, commands);
     runtime.prepareContainer(runtimeContextBuilder.build());
@@ -254,7 +329,6 @@ public class TestJavaSandboxLinuxContainerRuntime {
     Assert.assertTrue("Command should not be modified when user is " +
             "member of whitelisted group",
         inputCommand[0].equals(commands.get(0)));
-
   }
 
   @Test
@@ -265,6 +339,9 @@ public class TestJavaSandboxLinuxContainerRuntime {
     };
     List<String> commands = Arrays.asList(inputCommand);
 
+    conf.set(YarnConfiguration.YARN_CONTAINER_SANDBOX_WHITELIST_GROUP,
+        WHITELIST_GROUP);
+
     runtimeContextBuilder.setExecutionAttribute(USER, WHITELIST_USER);
     runtimeContextBuilder.setExecutionAttribute(CONTAINER_RUN_CMDS, commands);
     runtime.prepareContainer(runtimeContextBuilder.build());
@@ -283,6 +360,9 @@ public class TestJavaSandboxLinuxContainerRuntime {
     };
     List<String> commands = Arrays.asList(inputCommand);
 
+    conf.set(YarnConfiguration.YARN_CONTAINER_SANDBOX_WHITELIST_GROUP,
+        WHITELIST_GROUP);
+
     runtimeContextBuilder.setExecutionAttribute(USER, NORMAL_USER);
     runtimeContextBuilder.setExecutionAttribute(CONTAINER_RUN_CMDS, commands);
     runtime.prepareContainer(runtimeContextBuilder.build());
@@ -373,6 +453,6 @@ public class TestJavaSandboxLinuxContainerRuntime {
 
   @After
   public void cleanup(){
-    System.setSecurityManager(null);
+    System.setProperties(baseProps);
   }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/29] hadoop git commit: YARN-6587. Refactor of ResourceManager#startWebApp in a Util class. (Giovanni Matteo Fumarola via curino).

Posted by vi...@apache.org.
YARN-6587. Refactor of ResourceManager#startWebApp in a Util class. (Giovanni Matteo Fumarola via curino).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6600abbb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6600abbb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6600abbb

Branch: refs/heads/HDFS-9806
Commit: 6600abbb5c23a83e3a9ef48a945bc8fe19c8178a
Parents: 2397a26
Author: Carlo Curino <cu...@apache.org>
Authored: Fri May 12 17:48:55 2017 -0700
Committer: Carlo Curino <cu...@apache.org>
Committed: Fri May 12 17:48:55 2017 -0700

----------------------------------------------------------------------
 .../server/resourcemanager/ResourceManager.java |  93 +-----------
 .../resourcemanager/webapp/RMWebAppUtil.java    | 149 +++++++++++++++++++
 2 files changed, 152 insertions(+), 90 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6600abbb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 75d6df2..1f5e8cd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -29,17 +29,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.http.lib.StaticUserWebFilter;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.Groups;
-import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.service.Service;
@@ -103,9 +99,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRen
 import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollectorManager;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebAppUtil;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
-import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilter;
-import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer;
 import org.apache.hadoop.yarn.server.webproxy.AppReportFetcher;
 import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxy;
@@ -1038,92 +1033,10 @@ public class ResourceManager extends CompositeService implements Recoverable {
 
   protected void startWepApp() {
 
-    // Use the customized yarn filter instead of the standard kerberos filter to
-    // allow users to authenticate using delegation tokens
-    // 4 conditions need to be satisfied -
-    // 1. security is enabled
-    // 2. http auth type is set to kerberos
-    // 3. "yarn.resourcemanager.webapp.use-yarn-filter" override is set to true
-    // 4. hadoop.http.filter.initializers container AuthenticationFilterInitializer
-
     Configuration conf = getConfig();
-    boolean enableCorsFilter =
-        conf.getBoolean(YarnConfiguration.RM_WEBAPP_ENABLE_CORS_FILTER,
-            YarnConfiguration.DEFAULT_RM_WEBAPP_ENABLE_CORS_FILTER);
-    boolean useYarnAuthenticationFilter =
-        conf.getBoolean(
-          YarnConfiguration.RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER,
-          YarnConfiguration.DEFAULT_RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER);
-    String authPrefix = "hadoop.http.authentication.";
-    String authTypeKey = authPrefix + "type";
-    String filterInitializerConfKey = "hadoop.http.filter.initializers";
-    String actualInitializers = "";
-    Class<?>[] initializersClasses =
-        conf.getClasses(filterInitializerConfKey);
-
-    // setup CORS
-    if (enableCorsFilter) {
-      conf.setBoolean(HttpCrossOriginFilterInitializer.PREFIX
-          + HttpCrossOriginFilterInitializer.ENABLED_SUFFIX, true);
-    }
-
-    boolean hasHadoopAuthFilterInitializer = false;
-    boolean hasRMAuthFilterInitializer = false;
-    if (initializersClasses != null) {
-      for (Class<?> initializer : initializersClasses) {
-        if (initializer.getName().equals(
-          AuthenticationFilterInitializer.class.getName())) {
-          hasHadoopAuthFilterInitializer = true;
-        }
-        if (initializer.getName().equals(
-          RMAuthenticationFilterInitializer.class.getName())) {
-          hasRMAuthFilterInitializer = true;
-        }
-      }
-      if (UserGroupInformation.isSecurityEnabled()
-          && useYarnAuthenticationFilter
-          && hasHadoopAuthFilterInitializer
-          && conf.get(authTypeKey, "").equals(
-            KerberosAuthenticationHandler.TYPE)) {
-        ArrayList<String> target = new ArrayList<String>();
-        for (Class<?> filterInitializer : initializersClasses) {
-          if (filterInitializer.getName().equals(
-            AuthenticationFilterInitializer.class.getName())) {
-            if (hasRMAuthFilterInitializer == false) {
-              target.add(RMAuthenticationFilterInitializer.class.getName());
-            }
-            continue;
-          }
-          target.add(filterInitializer.getName());
-        }
-        actualInitializers = StringUtils.join(",", target);
 
-        LOG.info("Using RM authentication filter(kerberos/delegation-token)"
-            + " for RM webapp authentication");
-        RMAuthenticationFilter
-          .setDelegationTokenSecretManager(getClientRMService().rmDTSecretManager);
-        conf.set(filterInitializerConfKey, actualInitializers);
-      }
-    }
-
-    // if security is not enabled and the default filter initializer has not 
-    // been set, set the initializer to include the
-    // RMAuthenticationFilterInitializer which in turn will set up the simple
-    // auth filter.
-
-    String initializers = conf.get(filterInitializerConfKey);
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      if (initializersClasses == null || initializersClasses.length == 0) {
-        conf.set(filterInitializerConfKey,
-          RMAuthenticationFilterInitializer.class.getName());
-        conf.set(authTypeKey, "simple");
-      } else if (initializers.equals(StaticUserWebFilter.class.getName())) {
-        conf.set(filterInitializerConfKey,
-          RMAuthenticationFilterInitializer.class.getName() + ","
-              + initializers);
-        conf.set(authTypeKey, "simple");
-      }
-    }
+    RMWebAppUtil.setupSecurityAndFilters(conf,
+        getClientRMService().rmDTSecretManager);
 
     Builder<ApplicationMasterService> builder = 
         WebApps

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6600abbb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java
new file mode 100644
index 0000000..263828b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java
@@ -0,0 +1,149 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import java.util.ArrayList;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.lib.StaticUserWebFilter;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
+import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager;
+import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilter;
+import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer;
+
+/**
+ * Util class for ResourceManager WebApp.
+ */
+public final class RMWebAppUtil {
+
+  private static final Log LOG = LogFactory.getLog(RMWebAppUtil.class);
+
+  /**
+   * Private constructor.
+   */
+  private RMWebAppUtil() {
+    // not called
+  }
+
+  /**
+   * Helper method to setup filters and authentication for ResourceManager
+   * WebServices.
+   *
+   * Use the customized yarn filter instead of the standard kerberos filter to
+   * allow users to authenticate using delegation tokens 4 conditions need to be
+   * satisfied:
+   *
+   * 1. security is enabled.
+   *
+   * 2. http auth type is set to kerberos.
+   *
+   * 3. "yarn.resourcemanager.webapp.use-yarn-filter" override is set to true.
+   *
+   * 4. hadoop.http.filter.initializers container
+   * AuthenticationFilterInitializer.
+   *
+   * @param conf RM configuration.
+   * @param rmDTSecretManager RM specific delegation token secret manager.
+   **/
+  public static void setupSecurityAndFilters(Configuration conf,
+      RMDelegationTokenSecretManager rmDTSecretManager) {
+
+    boolean enableCorsFilter =
+        conf.getBoolean(YarnConfiguration.RM_WEBAPP_ENABLE_CORS_FILTER,
+            YarnConfiguration.DEFAULT_RM_WEBAPP_ENABLE_CORS_FILTER);
+    boolean useYarnAuthenticationFilter = conf.getBoolean(
+        YarnConfiguration.RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER,
+        YarnConfiguration.DEFAULT_RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER);
+    String authPrefix = "hadoop.http.authentication.";
+    String authTypeKey = authPrefix + "type";
+    String filterInitializerConfKey = "hadoop.http.filter.initializers";
+    String actualInitializers = "";
+    Class<?>[] initializersClasses = conf.getClasses(filterInitializerConfKey);
+
+    // setup CORS
+    if (enableCorsFilter) {
+      conf.setBoolean(HttpCrossOriginFilterInitializer.PREFIX
+          + HttpCrossOriginFilterInitializer.ENABLED_SUFFIX, true);
+    }
+
+    boolean hasHadoopAuthFilterInitializer = false;
+    boolean hasRMAuthFilterInitializer = false;
+    if (initializersClasses != null) {
+      for (Class<?> initializer : initializersClasses) {
+        if (initializer.getName()
+            .equals(AuthenticationFilterInitializer.class.getName())) {
+          hasHadoopAuthFilterInitializer = true;
+        }
+        if (initializer.getName()
+            .equals(RMAuthenticationFilterInitializer.class.getName())) {
+          hasRMAuthFilterInitializer = true;
+        }
+      }
+      if (UserGroupInformation.isSecurityEnabled()
+          && useYarnAuthenticationFilter && hasHadoopAuthFilterInitializer
+          && conf.get(authTypeKey, "")
+              .equals(KerberosAuthenticationHandler.TYPE)) {
+        ArrayList<String> target = new ArrayList<String>();
+        for (Class<?> filterInitializer : initializersClasses) {
+          if (filterInitializer.getName()
+              .equals(AuthenticationFilterInitializer.class.getName())) {
+            if (!hasRMAuthFilterInitializer) {
+              target.add(RMAuthenticationFilterInitializer.class.getName());
+            }
+            continue;
+          }
+          target.add(filterInitializer.getName());
+        }
+        actualInitializers = StringUtils.join(",", target);
+
+        LOG.info("Using RM authentication filter(kerberos/delegation-token)"
+            + " for RM webapp authentication");
+        RMAuthenticationFilter
+            .setDelegationTokenSecretManager(rmDTSecretManager);
+        conf.set(filterInitializerConfKey, actualInitializers);
+      }
+    }
+
+    // if security is not enabled and the default filter initializer has not
+    // been set, set the initializer to include the
+    // RMAuthenticationFilterInitializer which in turn will set up the simple
+    // auth filter.
+
+    String initializers = conf.get(filterInitializerConfKey);
+    if (!UserGroupInformation.isSecurityEnabled()) {
+      if (initializersClasses == null || initializersClasses.length == 0) {
+        conf.set(filterInitializerConfKey,
+            RMAuthenticationFilterInitializer.class.getName());
+        conf.set(authTypeKey, "simple");
+      } else if (initializers.equals(StaticUserWebFilter.class.getName())) {
+        conf.set(filterInitializerConfKey,
+            RMAuthenticationFilterInitializer.class.getName() + ","
+                + initializers);
+        conf.set(authTypeKey, "simple");
+      }
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/29] hadoop git commit: HDFS-11653. [READ] ProvidedReplica should return an InputStream that is bounded by its length

Posted by vi...@apache.org.
HDFS-11653. [READ] ProvidedReplica should return an InputStream that is bounded by its length


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20f2d7f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20f2d7f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20f2d7f8

Branch: refs/heads/HDFS-9806
Commit: 20f2d7f82c072bc9343da853ec7b339f3a3ebb8b
Parents: 37114eb
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Thu May 4 12:43:48 2017 -0700
Committer: Virajith Jalaparti <vi...@apache.org>
Committed: Wed May 17 12:41:52 2017 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/ProvidedReplica.java   |   5 +-
 .../datanode/TestProvidedReplicaImpl.java       | 163 +++++++++++++++++++
 2 files changed, 167 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20f2d7f8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
index b021ea2..946ab5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
@@ -22,6 +22,8 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.URI;
+
+import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -98,7 +100,8 @@ public abstract class ProvidedReplica extends ReplicaInfo {
     if (remoteFS != null) {
       FSDataInputStream ins = remoteFS.open(new Path(fileURI));
       ins.seek(fileOffset + seekOffset);
-      return new FSDataInputStream(ins);
+      return new BoundedInputStream(
+          new FSDataInputStream(ins), getBlockDataLength());
     } else {
       throw new IOException("Remote filesystem for provided replica " + this +
           " does not exist");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20f2d7f8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
new file mode 100644
index 0000000..8258c21
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.Channels;
+import java.nio.channels.ReadableByteChannel;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.io.input.BoundedInputStream;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tests the implementation of {@link ProvidedReplica}.
+ */
+public class TestProvidedReplicaImpl {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestProvidedReplicaImpl.class);
+  private static final String BASE_DIR =
+      new FileSystemTestHelper().getTestRootDir();
+  private static final String FILE_NAME = "provided-test";
+  //length of the file that is associated with the provided blocks.
+  private static final long FILE_LEN = 128 * 1024 * 10L + 64 * 1024;
+  //length of each provided block.
+  private static final long BLK_LEN = 128 * 1024L;
+
+  private static List<ProvidedReplica> replicas;
+
+  private static void createFileIfNotExists(String baseDir) throws IOException {
+    File newFile = new File(baseDir, FILE_NAME);
+    newFile.getParentFile().mkdirs();
+    if(!newFile.exists()) {
+      newFile.createNewFile();
+      OutputStream writer = new FileOutputStream(newFile.getAbsolutePath());
+      //FILE_LEN is length in bytes.
+      byte[] bytes = new byte[1];
+      bytes[0] = (byte) 0;
+      for(int i=0; i< FILE_LEN; i++) {
+        writer.write(bytes);
+      }
+      writer.flush();
+      writer.close();
+      LOG.info("Created provided file " + newFile +
+          " of length " + newFile.length());
+    }
+  }
+
+  private static void createProvidedReplicas(Configuration conf) {
+    long numReplicas = (long) Math.ceil((double) FILE_LEN/BLK_LEN);
+    File providedFile = new File(BASE_DIR, FILE_NAME);
+    replicas = new ArrayList<ProvidedReplica>();
+
+    LOG.info("Creating " + numReplicas + " provided replicas");
+    for (int i=0; i<numReplicas; i++) {
+      long currentReplicaLength =
+          FILE_LEN >= (i+1)*BLK_LEN ? BLK_LEN : FILE_LEN - i*BLK_LEN;
+      replicas.add(
+          new FinalizedProvidedReplica(i, providedFile.toURI(), i*BLK_LEN,
+          currentReplicaLength, 0, null, conf));
+    }
+  }
+
+  @Before
+  public void setUp() throws IOException {
+    createFileIfNotExists(new File(BASE_DIR).getAbsolutePath());
+    createProvidedReplicas(new Configuration());
+  }
+
+  /**
+   * Checks if {@code ins} matches the provided file from offset
+   * {@code fileOffset} for length {@ dataLength}.
+   * @param file the local file
+   * @param ins input stream to compare against
+   * @param fileOffset offset
+   * @param dataLength length
+   * @throws IOException
+   */
+  private void verifyReplicaContents(File file,
+      InputStream ins, long fileOffset, long dataLength)
+          throws IOException {
+
+    InputStream fileIns = new FileInputStream(file);
+    fileIns.skip(fileOffset);
+
+    try (ReadableByteChannel i =
+        Channels.newChannel(new BoundedInputStream(fileIns, dataLength))) {
+      try (ReadableByteChannel j = Channels.newChannel(ins)) {
+        ByteBuffer ib = ByteBuffer.allocate(4096);
+        ByteBuffer jb = ByteBuffer.allocate(4096);
+        while (true) {
+          int il = i.read(ib);
+          int jl = j.read(jb);
+          if (il < 0 || jl < 0) {
+            assertEquals(il, jl);
+            break;
+          }
+          ib.flip();
+          jb.flip();
+          int cmp = Math.min(ib.remaining(), jb.remaining());
+          for (int k = 0; k < cmp; ++k) {
+            assertEquals(ib.get(), jb.get());
+          }
+          ib.compact();
+          jb.compact();
+        }
+      }
+    }
+  }
+
+  @Test
+  public void testProvidedReplicaRead() throws IOException {
+
+    File providedFile = new File(BASE_DIR, FILE_NAME);
+    for(int i=0; i < replicas.size(); i++) {
+      ProvidedReplica replica = replicas.get(i);
+      //block data should exist!
+      assertTrue(replica.blockDataExists());
+      assertEquals(providedFile.toURI(), replica.getBlockURI());
+      verifyReplicaContents(providedFile, replica.getDataInputStream(0),
+          BLK_LEN*i, replica.getBlockDataLength());
+    }
+    LOG.info("All replica contents verified");
+
+    providedFile.delete();
+    //the block data should no longer be found!
+    for(int i=0; i < replicas.size(); i++) {
+      ProvidedReplica replica = replicas.get(i);
+      assertTrue(!replica.blockDataExists());
+    }
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/29] hadoop git commit: HDFS-10706. [READ] Add tool generating FSImage from external store

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSingleUGIResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSingleUGIResolver.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSingleUGIResolver.java
new file mode 100644
index 0000000..9aef106
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSingleUGIResolver.java
@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import static org.junit.Assert.*;
+
+/**
+ * Validate resolver assigning all paths to a single owner/group.
+ */
+public class TestSingleUGIResolver {
+
+  @Rule public TestName name = new TestName();
+
+  private static final int TESTUID = 10101;
+  private static final int TESTGID = 10102;
+  private static final String TESTUSER = "tenaqvyybdhragqvatbf";
+  private static final String TESTGROUP = "tnyybcvatlnxf";
+
+  private SingleUGIResolver ugi = new SingleUGIResolver();
+
+  @Before
+  public void setup() {
+    Configuration conf = new Configuration(false);
+    conf.setInt(SingleUGIResolver.UID, TESTUID);
+    conf.setInt(SingleUGIResolver.GID, TESTGID);
+    conf.set(SingleUGIResolver.USER, TESTUSER);
+    conf.set(SingleUGIResolver.GROUP, TESTGROUP);
+    ugi.setConf(conf);
+    System.out.println(name.getMethodName());
+  }
+
+  @Test
+  public void testRewrite() {
+    FsPermission p1 = new FsPermission((short)0755);
+    match(ugi.resolve(file("dingo", "dingo", p1)), p1);
+    match(ugi.resolve(file(TESTUSER, "dingo", p1)), p1);
+    match(ugi.resolve(file("dingo", TESTGROUP, p1)), p1);
+    match(ugi.resolve(file(TESTUSER, TESTGROUP, p1)), p1);
+
+    FsPermission p2 = new FsPermission((short)0x8000);
+    match(ugi.resolve(file("dingo", "dingo", p2)), p2);
+    match(ugi.resolve(file(TESTUSER, "dingo", p2)), p2);
+    match(ugi.resolve(file("dingo", TESTGROUP, p2)), p2);
+    match(ugi.resolve(file(TESTUSER, TESTGROUP, p2)), p2);
+
+    Map<Integer, String> ids = ugi.ugiMap();
+    assertEquals(2, ids.size());
+    assertEquals(TESTUSER, ids.get(10101));
+    assertEquals(TESTGROUP, ids.get(10102));
+  }
+
+  @Test
+  public void testDefault() {
+    String user;
+    try {
+      user = UserGroupInformation.getCurrentUser().getShortUserName();
+    } catch (IOException e) {
+      user = "hadoop";
+    }
+    Configuration conf = new Configuration(false);
+    ugi.setConf(conf);
+    Map<Integer, String> ids = ugi.ugiMap();
+    assertEquals(2, ids.size());
+    assertEquals(user, ids.get(0));
+    assertEquals(user, ids.get(1));
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testInvalidUid() {
+    Configuration conf = ugi.getConf();
+    conf.setInt(SingleUGIResolver.UID, (1 << 24) + 1);
+    ugi.setConf(conf);
+    ugi.resolve(file(TESTUSER, TESTGROUP, new FsPermission((short)0777)));
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testInvalidGid() {
+    Configuration conf = ugi.getConf();
+    conf.setInt(SingleUGIResolver.GID, (1 << 24) + 1);
+    ugi.setConf(conf);
+    ugi.resolve(file(TESTUSER, TESTGROUP, new FsPermission((short)0777)));
+  }
+
+  @Test(expected=IllegalStateException.class)
+  public void testDuplicateIds() {
+    Configuration conf = new Configuration(false);
+    conf.setInt(SingleUGIResolver.UID, 4344);
+    conf.setInt(SingleUGIResolver.GID, 4344);
+    conf.set(SingleUGIResolver.USER, TESTUSER);
+    conf.set(SingleUGIResolver.GROUP, TESTGROUP);
+    ugi.setConf(conf);
+    ugi.ugiMap();
+  }
+
+  static void match(long encoded, FsPermission p) {
+    assertEquals(p, new FsPermission((short)(encoded & 0xFFFF)));
+    long uid = (encoded >>> UGIResolver.USER_STRID_OFFSET);
+    uid &= UGIResolver.USER_GROUP_STRID_MASK;
+    assertEquals(TESTUID, uid);
+    long gid = (encoded >>> UGIResolver.GROUP_STRID_OFFSET);
+    gid &= UGIResolver.USER_GROUP_STRID_MASK;
+    assertEquals(TESTGID, gid);
+  }
+
+  static FileStatus file(String user, String group, FsPermission perm) {
+    Path p = new Path("foo://bar:4344/baz/dingo");
+    return new FileStatus(
+          4344 * (1 << 20),        /* long length,             */
+          false,                   /* boolean isdir,           */
+          1,                       /* int block_replication,   */
+          256 * (1 << 20),         /* long blocksize,          */
+          0L,                      /* long modification_time,  */
+          0L,                      /* long access_time,        */
+          perm,                    /* FsPermission permission, */
+          user,                    /* String owner,            */
+          group,                   /* String group,            */
+          p);                      /* Path path                */
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/resources/log4j.properties b/hadoop-tools/hadoop-fs2img/src/test/resources/log4j.properties
new file mode 100644
index 0000000..2ebf29e
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/test/resources/log4j.properties
@@ -0,0 +1,24 @@
+#
+#   Licensed to the Apache Software Foundation (ASF) under one or more
+#   contributor license agreements.  See the NOTICE file distributed with
+#   this work for additional information regarding copyright ownership.
+#   The ASF licenses this file to You under the Apache License, Version 2.0
+#   (the "License"); you may not use this file except in compliance with
+#   the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=INFO,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-tools-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml
index dd28404..13117a7 100644
--- a/hadoop-tools/hadoop-tools-dist/pom.xml
+++ b/hadoop-tools/hadoop-tools-dist/pom.xml
@@ -122,6 +122,12 @@
       <scope>compile</scope>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-fs2img</artifactId>
+      <scope>compile</scope>
+      <version>${project.version}</version>
+    </dependency>
   </dependencies>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml
index 056205c..6a53b50 100644
--- a/hadoop-tools/pom.xml
+++ b/hadoop-tools/pom.xml
@@ -47,6 +47,7 @@
     <module>hadoop-kafka</module>
     <module>hadoop-azure-datalake</module>
     <module>hadoop-aliyun</module>
+    <module>hadoop-fs2img</module>
   </modules>
 
   <build>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/29] hadoop git commit: HDFS-11674. reserveSpaceForReplicas is not released if append request failed due to mirror down and replica recovered (Contributed by Vinayakumar B)

Posted by vi...@apache.org.
HDFS-11674. reserveSpaceForReplicas is not released if append request failed due to mirror down and replica recovered (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1411612a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1411612a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1411612a

Branch: refs/heads/HDFS-9806
Commit: 1411612aa4e70c704b941723217ed4efd8a0125b
Parents: 0d5c8ed
Author: Vinayakumar B <vi...@apache.org>
Authored: Fri May 12 07:38:18 2017 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Fri May 12 07:38:18 2017 +0530

----------------------------------------------------------------------
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  4 ++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |  5 ++
 .../fsdataset/impl/TestSpaceReservation.java    | 67 +++++++++++++++++++-
 3 files changed, 75 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1411612a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 9a5002a..e7d4d25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2455,6 +2455,10 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       LOG.info("initReplicaRecovery: changing replica state for "
           + block + " from " + replica.getState()
           + " to " + rur.getState());
+      if (replica.getState() == ReplicaState.TEMPORARY || replica
+          .getState() == ReplicaState.RBW) {
+        ((ReplicaInPipeline) replica).releaseAllBytesReserved();
+      }
     }
     return rur.createInfo();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1411612a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index c98a336..2cfcc2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -1462,6 +1462,11 @@ public class DFSTestUtil {
     out.abort();
   }
 
+  public static void setPipeline(DFSOutputStream out, LocatedBlock lastBlock)
+      throws IOException {
+    out.getStreamer().setPipelineInConstruction(lastBlock);
+  }
+
   public static byte[] asArray(ByteBuffer buf) {
     byte arr[] = new byte[buf.remaining()];
     buf.duplicate().get(arr);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1411612a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
index fad5216..2daca86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
@@ -31,10 +31,13 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.HdfsBlockLocation;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.*;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@@ -44,6 +47,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Daemon;
 import org.apache.log4j.Level;
 import org.junit.After;
+import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -82,9 +86,12 @@ public class TestSpaceReservation {
 
   private static Random rand = new Random();
 
-  private void initConfig(int blockSize) {
+  @Before
+  public void before() {
     conf = new HdfsConfiguration();
+  }
 
+  private void initConfig(int blockSize) {
     // Refresh disk usage information frequently.
     conf.setInt(FS_DU_INTERVAL_KEY, DU_REFRESH_INTERVAL_MSEC);
     conf.setLong(DFS_BLOCK_SIZE_KEY, blockSize);
@@ -680,4 +687,62 @@ public class TestSpaceReservation {
       }
     }
   }
+
+  @Test(timeout = 60000)
+  public void testReservedSpaceForLeaseRecovery() throws Exception {
+    final short replication = 3;
+    conf.setInt(
+        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 2);
+    conf.setInt(
+        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY,
+        1000);
+    startCluster(BLOCK_SIZE, replication, -1);
+
+    final String methodName = GenericTestUtils.getMethodName();
+    final Path file = new Path("/" + methodName + ".01.dat");
+    // Write to the file and kill the writer.
+    FSDataOutputStream os = fs.create(file, replication);
+    os.write(new byte[8192]);
+    os.hflush();
+    os.close();
+    /*
+     * Reset the pipeline for the append in such a way that, datanode which is
+     * down is one of the mirror, not the first datanode.
+     */
+    HdfsBlockLocation blockLocation = (HdfsBlockLocation) fs.getClient()
+        .getBlockLocations(file.toString(), 0, BLOCK_SIZE)[0];
+    LocatedBlock lastBlock = blockLocation.getLocatedBlock();
+    // stop 3rd node.
+    cluster.stopDataNode(lastBlock.getLocations()[2].getName());
+    try {
+      os = fs.append(file);
+      DFSTestUtil.setPipeline((DFSOutputStream) os.getWrappedStream(),
+          lastBlock);
+      os.writeBytes("hi");
+      os.hsync();
+    } catch (IOException e) {
+      // Append will fail due to not able to replace datanodes in 3 nodes
+      // cluster.
+      LOG.info("", e);
+    }
+    DFSTestUtil.abortStream((DFSOutputStream) os.getWrappedStream());
+    /*
+     * There is a chance that stopped DN could be chosen as primary for
+     * recovery. If so, then recovery will not happen in time. So mark stopped
+     * node as dead to exclude that node.
+     */
+    cluster.setDataNodeDead(lastBlock.getLocations()[2]);
+    fs.recoverLease(file);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        try {
+          return fs.isFileClosed(file);
+        } catch (IOException e) {
+          return false;
+        }
+      }
+    }, 500, 30000);
+    checkReservedSpace(0);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/29] hadoop git commit: HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by Yiqun Lin.

Posted by vi...@apache.org.
HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89a8edc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89a8edc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89a8edc0

Branch: refs/heads/HDFS-9806
Commit: 89a8edc0149e3f31a5ade9a0927c4b6332cf6b1a
Parents: 9b90e52
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue May 16 12:41:59 2017 -0400
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue May 16 12:41:59 2017 -0400

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  7 +++--
 .../hdfs/server/protocol/SlowDiskReports.java   |  5 ++--
 .../dev-support/findbugsExcludeFile.xml         |  5 ++++
 .../hdfs/qjournal/server/JournalNode.java       | 16 +++++++-----
 .../hdfs/server/common/HdfsServerConstants.java |  7 ++++-
 .../hdfs/server/datanode/DataStorage.java       | 12 ++++++---
 .../namenode/NNStorageRetentionManager.java     | 27 +++++++++++---------
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  6 ++---
 .../offlineImageViewer/ImageLoaderCurrent.java  | 10 +++++---
 9 files changed, 62 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a8edc0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index d21b9b4..7de8b71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2857,9 +2857,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     }
     synchronized (DFSClient.class) {
       if (STRIPED_READ_THREAD_POOL == null) {
-        STRIPED_READ_THREAD_POOL = DFSUtilClient.getThreadPoolExecutor(1,
+        // Only after thread pool is fully constructed then save it to
+        // volatile field.
+        ThreadPoolExecutor threadPool = DFSUtilClient.getThreadPoolExecutor(1,
             numThreads, 60, "StripedRead-", true);
-        STRIPED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
+        threadPool.allowCoreThreadTimeOut(true);
+        STRIPED_READ_THREAD_POOL = threadPool;
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a8edc0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
index 8095c2a..496389a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
@@ -101,8 +101,9 @@ public final class SlowDiskReports {
     }
 
     boolean areEqual;
-    for (String disk : this.slowDisks.keySet()) {
-      if (!this.slowDisks.get(disk).equals(that.slowDisks.get(disk))) {
+    for (Map.Entry<String, Map<DiskOp, Double>> entry : this.slowDisks
+        .entrySet()) {
+      if (!entry.getValue().equals(that.slowDisks.get(entry.getKey()))) {
         return false;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a8edc0/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index be54efb..9270990 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -252,4 +252,9 @@
         <Class name="org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture" />
         <Bug pattern="NS_DANGEROUS_NON_SHORT_CIRCUIT" />
     </Match>
+    <Match>
+        <Class name="org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil$1" />
+        <Method name="visitFile" />
+        <Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
+    </Match>
  </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a8edc0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
index 42e9be7..ab724d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
@@ -297,14 +297,18 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
         return file.isDirectory();
       }
     });
-    for (File journalDir : journalDirs) {
-      String jid = journalDir.getName();
-      if (!status.containsKey(jid)) {
-        Map<String, String> jMap = new HashMap<String, String>();
-        jMap.put("Formatted", "true");
-        status.put(jid, jMap);
+
+    if (journalDirs != null) {
+      for (File journalDir : journalDirs) {
+        String jid = journalDir.getName();
+        if (!status.containsKey(jid)) {
+          Map<String, String> jMap = new HashMap<String, String>();
+          jMap.put("Formatted", "true");
+          status.put(jid, jMap);
+        }
       }
     }
+
     return JSON.toString(status);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a8edc0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index c3098f3..ab50eb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -188,8 +188,10 @@ public interface HdfsServerConstants {
         return NamenodeRole.NAMENODE;
       }
     }
-    
+
     public void setClusterId(String cid) {
+      Preconditions.checkState(this == UPGRADE || this == UPGRADEONLY
+          || this == FORMAT);
       clusterId = cid;
     }
 
@@ -214,6 +216,7 @@ public interface HdfsServerConstants {
     }
 
     public void setForce(int force) {
+      Preconditions.checkState(this == RECOVER);
       this.force = force;
     }
     
@@ -226,6 +229,7 @@ public interface HdfsServerConstants {
     }
     
     public void setForceFormat(boolean force) {
+      Preconditions.checkState(this == FORMAT);
       isForceFormat = force;
     }
     
@@ -234,6 +238,7 @@ public interface HdfsServerConstants {
     }
     
     public void setInteractiveFormat(boolean interactive) {
+      Preconditions.checkState(this == FORMAT);
       isInteractiveFormat = interactive;
     }
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a8edc0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 9a71081..6d6e96a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -1336,10 +1336,14 @@ public class DataStorage extends Storage {
           return name.startsWith(BLOCK_SUBDIR_PREFIX);
         }
       });
-    for(int i = 0; i < otherNames.length; i++)
-      linkBlocksHelper(new File(from, otherNames[i]),
-          new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout,
-          blockRoot, idBasedLayoutSingleLinks);
+
+    if (otherNames != null) {
+      for (int i = 0; i < otherNames.length; i++) {
+        linkBlocksHelper(new File(from, otherNames[i]),
+            new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout,
+            blockRoot, idBasedLayoutSingleLinks);
+      }
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a8edc0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
index 98b7e9a..2a83541 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
@@ -255,24 +255,27 @@ public class NNStorageRetentionManager {
     });
 
     // Check whether there is any work to do.
-    if (filesInStorage.length <= numCheckpointsToRetain) {
+    if (filesInStorage != null
+        && filesInStorage.length <= numCheckpointsToRetain) {
       return;
     }
 
     // Create a sorted list of txids from the file names.
     TreeSet<Long> sortedTxIds = new TreeSet<Long>();
-    for (String fName : filesInStorage) {
-      // Extract the transaction id from the file name.
-      long fTxId;
-      try {
-        fTxId = Long.parseLong(fName.substring(oivImagePrefix.length() + 1));
-      } catch (NumberFormatException nfe) {
-        // This should not happen since we have already filtered it.
-        // Log and continue.
-        LOG.warn("Invalid file name. Skipping " + fName);
-        continue;
+    if (filesInStorage != null) {
+      for (String fName : filesInStorage) {
+        // Extract the transaction id from the file name.
+        long fTxId;
+        try {
+          fTxId = Long.parseLong(fName.substring(oivImagePrefix.length() + 1));
+        } catch (NumberFormatException nfe) {
+          // This should not happen since we have already filtered it.
+          // Log and continue.
+          LOG.warn("Invalid file name. Skipping " + fName);
+          continue;
+        }
+        sortedTxIds.add(Long.valueOf(fTxId));
       }
-      sortedTxIds.add(Long.valueOf(fTxId));
     }
 
     int numFilesToDelete = sortedTxIds.size() - numCheckpointsToRetain;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a8edc0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 6d67089..5375cd9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -1917,7 +1917,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if ("-report".equals(cmd)) {
-      if (argv.length < 1) {
+      if (argv.length > 4) {
         printUsage(cmd);
         return exitCode;
       }
@@ -1947,7 +1947,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if (RollingUpgradeCommand.matches(cmd)) {
-      if (argv.length < 1 || argv.length > 2) {
+      if (argv.length > 2) {
         printUsage(cmd);
         return exitCode;
       }
@@ -2022,7 +2022,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if ("-triggerBlockReport".equals(cmd)) {
-      if (argv.length < 1) {
+      if ((argv.length != 2) && (argv.length != 3)) {
         printUsage(cmd);
         return exitCode;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a8edc0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
index f2c7427..2e2eaf4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
@@ -722,9 +722,13 @@ class ImageLoaderCurrent implements ImageLoader {
       if (supportSnapshot && supportInodeId) {
         dirNodeMap.put(inodeId, pathName);
       }
-      v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
-      if (NameNodeLayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
-        v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
+
+      v.visit(ImageElement.NS_QUOTA, in.readLong());
+      if (NameNodeLayoutVersion.supports(Feature.DISKSPACE_QUOTA,
+          imageVersion)) {
+        v.visit(ImageElement.DS_QUOTA, in.readLong());
+      }
+
       if (supportSnapshot) {
         boolean snapshottable = in.readBoolean();
         if (!snapshottable) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/29] hadoop git commit: YARN-6535. Program needs to exit when SLS finishes. (yufeigu via rkanter)

Posted by vi...@apache.org.
YARN-6535. Program needs to exit when SLS finishes. (yufeigu via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/101852ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/101852ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/101852ca

Branch: refs/heads/HDFS-9806
Commit: 101852ca11ed4a9c4d4664c6c797fa7173dc59ae
Parents: b415c6f
Author: Robert Kanter <rk...@apache.org>
Authored: Tue May 16 17:52:17 2017 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Tue May 16 17:52:17 2017 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java    | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/101852ca/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 03281a5..f66d2d0 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -124,6 +124,8 @@ public class SLSRunner extends Configured implements Tool {
   private final static int DEFAULT_MAPPER_PRIORITY = 20;
   private final static int DEFAULT_REDUCER_PRIORITY = 10;
 
+  private static boolean exitAtTheFinish = false;
+
   /**
    * The type of trace in input.
    */
@@ -761,6 +763,9 @@ public class SLSRunner extends Configured implements Tool {
 
     if (remainingApps == 0) {
       LOG.info("SLSRunner tears down.");
+      if (exitAtTheFinish) {
+        System.exit(0);
+      }
     }
   }
 
@@ -857,6 +862,7 @@ public class SLSRunner extends Configured implements Tool {
   }
 
   public static void main(String[] argv) throws Exception {
+    exitAtTheFinish = true;
     ToolRunner.run(new Configuration(), new SLSRunner(), argv);
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/29] hadoop git commit: HADOOP-14419. Remove findbugs report from docs profile. Contributed by Andras Bokor.

Posted by vi...@apache.org.
HADOOP-14419. Remove findbugs report from docs profile. Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6f6682a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6f6682a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6f6682a

Branch: refs/heads/HDFS-9806
Commit: e6f6682aa864635b9f70b387872701c064455ad5
Parents: d87a63a
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed May 17 07:35:41 2017 -0400
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed May 17 07:35:41 2017 -0400

----------------------------------------------------------------------
 BUILDING.txt                |  2 --
 hadoop-project-dist/pom.xml | 20 --------------------
 2 files changed, 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6f6682a/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index 57dad32..f9cc842 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -6,7 +6,6 @@ Requirements:
 * Unix System
 * JDK 1.8+
 * Maven 3.3 or later
-* Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
 * CMake 2.6 or newer (if compiling native code), must be 3.0 or newer on Mac
 * Zlib devel (if compiling native code)
@@ -345,7 +344,6 @@ Requirements:
 * Windows System
 * JDK 1.8+
 * Maven 3.0 or later
-* Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
 * CMake 2.6 or newer
 * Windows SDK 7.1 or Visual Studio 2010 Professional

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6f6682a/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 988d369..8f386e7 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -167,21 +167,6 @@
             </executions>
           </plugin>
           <plugin>
-            <groupId>org.codehaus.mojo</groupId>
-            <artifactId>findbugs-maven-plugin</artifactId>
-            <executions>
-              <execution>
-                <goals>
-                  <goal>findbugs</goal>
-                </goals>
-                <phase>prepare-package</phase>
-              </execution>
-            </executions>
-            <configuration>
-              <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
-            </configuration>
-          </plugin>
-          <plugin>
             <groupId>org.apache.maven.plugins</groupId>
             <artifactId>maven-dependency-plugin</artifactId>
             <executions>
@@ -298,11 +283,6 @@
                         <path refid="maven.compile.classpath"/>
                       </classpath>
                     </javadoc>
-
-                    <xslt style="${env.FINDBUGS_HOME}/src/xsl/default.xsl"
-                          in="${project.build.directory}/findbugsXml.xml"
-                          out="${project.build.directory}/site/findbugs.html"/>
-
                   </target>
                 </configuration>
               </execution>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/29] hadoop git commit: HADOOP-14375. Remove tomcat support from hadoop-functions.sh. Contributed by John Zhuge.

Posted by vi...@apache.org.
HADOOP-14375. Remove tomcat support from hadoop-functions.sh. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9e24a13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9e24a13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9e24a13

Branch: refs/heads/HDFS-9806
Commit: a9e24a13b7e3cdd92898d39ec5bf03382336e597
Parents: 1411612
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri May 12 08:48:51 2017 -0500
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Fri May 12 08:48:51 2017 -0500

----------------------------------------------------------------------
 .../src/main/bin/hadoop-functions.sh            | 24 ---------
 .../scripts/hadoop_finalize_catalina_opts.bats  | 56 --------------------
 2 files changed, 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9e24a13/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index fb8e961..8ac1b0c 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1457,30 +1457,6 @@ function hadoop_finalize_classpath
   hadoop_translate_cygwin_path CLASSPATH true
 }
 
-## @description  Finish Catalina configuration prior to execution
-## @audience     private
-## @stability    evolving
-## @replaceable  yes
-function hadoop_finalize_catalina_opts
-{
-
-  local prefix=${HADOOP_CATALINA_PREFIX}
-
-  hadoop_add_param CATALINA_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_HOME}"
-  if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
-    hadoop_add_param CATALINA_OPTS java.library.path "-Djava.library.path=${JAVA_LIBRARY_PATH}"
-  fi
-  hadoop_add_param CATALINA_OPTS "${prefix}.home.dir" "-D${prefix}.home.dir=${HADOOP_HOME}"
-  hadoop_add_param CATALINA_OPTS "${prefix}.config.dir" "-D${prefix}.config.dir=${HADOOP_CATALINA_CONFIG}"
-  hadoop_add_param CATALINA_OPTS "${prefix}.log.dir" "-D${prefix}.log.dir=${HADOOP_CATALINA_LOG}"
-  hadoop_add_param CATALINA_OPTS "${prefix}.temp.dir" "-D${prefix}.temp.dir=${HADOOP_CATALINA_TEMP}"
-  hadoop_add_param CATALINA_OPTS "${prefix}.admin.port" "-D${prefix}.admin.port=${HADOOP_CATALINA_ADMIN_PORT}"
-  hadoop_add_param CATALINA_OPTS "${prefix}.http.port" "-D${prefix}.http.port=${HADOOP_CATALINA_HTTP_PORT}"
-  hadoop_add_param CATALINA_OPTS "${prefix}.max.threads" "-D${prefix}.max.threads=${HADOOP_CATALINA_MAX_THREADS}"
-  hadoop_add_param CATALINA_OPTS "${prefix}.max.http.header.size" "-D${prefix}.max.http.header.size=${HADOOP_CATALINA_MAX_HTTP_HEADER_SIZE}"
-  hadoop_add_param CATALINA_OPTS "${prefix}.ssl.keystore.file" "-D${prefix}.ssl.keystore.file=${HADOOP_CATALINA_SSL_KEYSTORE_FILE}"
-}
-
 ## @description  Finish all the remaining environment settings prior
 ## @description  to executing Java.  This is a wrapper that calls
 ## @description  the other `finalize` routines.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9e24a13/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_catalina_opts.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_catalina_opts.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_catalina_opts.bats
deleted file mode 100644
index d91223e..0000000
--- a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_catalina_opts.bats
+++ /dev/null
@@ -1,56 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-load hadoop-functions_test_helper
-
-@test "hadoop_finalize_catalina_opts (raw)" {
-  local j
-
-  HADOOP_IS_CYGWIN=false
-  HADOOP_CATALINA_PREFIX=test
-  CATALINA_OPTS=""
-  hadoop_finalize_catalina_opts
-  for j in test.home.dir \
-        test.config.dir \
-        test.log.dir \
-        test.admin.port \
-        test.http.port \
-        test.max.threads \
-        test.ssl.keystore.file; do
-    [ "${CATALINA_OPTS#*${j}}" != "${CATALINA_OPTS}" ]
-  done
-}
-
-@test "hadoop_finalize_catalina_opts (cygwin)" {
-  local j
-
-  skip "catalina commands not supported under cygwin yet"
-
-  HADOOP_IS_CYGWIN=true
-  HADOOP_CATALINA_PREFIX=test
-  CATALINA_OPTS=""
-
-  catalina_translate_cygwin_path () {
-    eval ${1}="foobarbaz"
-  }
-
-  hadoop_finalize_catalina_opts
-  for j in test.home.dir \
-        test.config.dir \
-        test.log.dir \
-        test.ssl.keystore.file; do
-    [ "${CATALINA_OPTS#*${j}=foobarbaz}" != "${CATALINA_OPTS}" ]
-  done
-}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/29] hadoop git commit: HDFS-10675. Datanode support to read from external stores.

Posted by vi...@apache.org.
HDFS-10675. Datanode support to read from external stores.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2630e4fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2630e4fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2630e4fd

Branch: refs/heads/HDFS-9806
Commit: 2630e4fdb4bde1a88baf449ea30b94fa41c2b990
Parents: d4aa9e3
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Wed Mar 29 14:29:28 2017 -0700
Committer: Virajith Jalaparti <vi...@apache.org>
Committed: Wed May 17 12:41:51 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/StorageType.java  |   3 +-
 .../org/apache/hadoop/fs/shell/TestCount.java   |   3 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java     |   4 +
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |   4 +
 .../src/main/proto/hdfs.proto                   |   1 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  15 +
 .../hadoop/hdfs/server/common/BlockAlias.java   |  29 +
 .../hadoop/hdfs/server/common/BlockFormat.java  |  82 +++
 .../hadoop/hdfs/server/common/FileRegion.java   | 121 +++++
 .../hdfs/server/common/FileRegionProvider.java  |  37 ++
 .../hadoop/hdfs/server/common/Storage.java      |  71 ++-
 .../hadoop/hdfs/server/common/StorageInfo.java  |   6 +
 .../server/common/TextFileRegionFormat.java     | 442 ++++++++++++++++
 .../server/common/TextFileRegionProvider.java   |  88 ++++
 .../server/datanode/BlockPoolSliceStorage.java  |  21 +-
 .../hdfs/server/datanode/DataStorage.java       |  44 +-
 .../hdfs/server/datanode/DirectoryScanner.java  |  20 +-
 .../datanode/FinalizedProvidedReplica.java      |  91 ++++
 .../hdfs/server/datanode/ProvidedReplica.java   | 248 +++++++++
 .../hdfs/server/datanode/ReplicaBuilder.java    | 100 +++-
 .../hdfs/server/datanode/ReplicaInfo.java       |  20 +-
 .../hdfs/server/datanode/StorageLocation.java   |  26 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   4 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  32 +-
 .../fsdataset/impl/DefaultProvidedVolumeDF.java |  58 ++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  40 +-
 .../datanode/fsdataset/impl/FsDatasetUtil.java  |  25 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  19 +-
 .../fsdataset/impl/FsVolumeImplBuilder.java     |   6 +
 .../fsdataset/impl/ProvidedVolumeDF.java        |  34 ++
 .../fsdataset/impl/ProvidedVolumeImpl.java      | 526 +++++++++++++++++++
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   2 +-
 .../server/namenode/FSImageCompression.java     |   2 +-
 .../hadoop/hdfs/server/namenode/NNStorage.java  |  10 +-
 .../src/main/resources/hdfs-default.xml         |  78 +++
 .../org/apache/hadoop/hdfs/TestDFSRollback.java |   6 +-
 .../hadoop/hdfs/TestDFSStartupVersions.java     |   2 +-
 .../org/apache/hadoop/hdfs/TestDFSUpgrade.java  |   4 +-
 .../apache/hadoop/hdfs/UpgradeUtilities.java    |  16 +-
 .../hdfs/server/common/TestTextBlockFormat.java | 160 ++++++
 .../server/datanode/SimulatedFSDataset.java     |   6 +-
 .../extdataset/ExternalDatasetImpl.java         |   5 +-
 .../fsdataset/impl/TestFsDatasetImpl.java       |  17 +-
 .../fsdataset/impl/TestProvidedImpl.java        | 426 +++++++++++++++
 .../hdfs/server/namenode/TestClusterId.java     |   5 +-
 45 files changed, 2872 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java
index 0948801..2ecd206 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java
@@ -37,7 +37,8 @@ public enum StorageType {
   RAM_DISK(true),
   SSD(false),
   DISK(false),
-  ARCHIVE(false);
+  ARCHIVE(false),
+  PROVIDED(false);
 
   private final boolean isTransient;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
index 2a1c38c..1666a3c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
@@ -285,7 +285,7 @@ public class TestCount {
         // <----13---> <-------17------> <----13-----> <------17------->
         "    SSD_QUOTA     REM_SSD_QUOTA    DISK_QUOTA    REM_DISK_QUOTA " +
         // <----13---> <-------17------>
-        "ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " +
+        "ARCHIVE_QUOTA REM_ARCHIVE_QUOTA PROVIDED_QUOTA REM_PROVIDED_QUOTA " +
         "PATHNAME";
     verify(out).println(withStorageTypeHeader);
     verifyNoMoreInteractions(out);
@@ -340,6 +340,7 @@ public class TestCount {
         "    SSD_QUOTA     REM_SSD_QUOTA " +
         "   DISK_QUOTA    REM_DISK_QUOTA " +
         "ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " +
+        "PROVIDED_QUOTA REM_PROVIDED_QUOTA " +
         "PATHNAME";
     verify(out).println(withStorageTypeHeader);
     verifyNoMoreInteractions(out);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 0d31bc4..7e92dad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -47,6 +47,10 @@ public final class HdfsConstants {
   public static final String WARM_STORAGE_POLICY_NAME = "WARM";
   public static final byte COLD_STORAGE_POLICY_ID = 2;
   public static final String COLD_STORAGE_POLICY_NAME = "COLD";
+  // branch HDFS-9806 XXX temporary until HDFS-7076
+  public static final byte PROVIDED_STORAGE_POLICY_ID = 1;
+  public static final String PROVIDED_STORAGE_POLICY_NAME = "PROVIDED";
+
 
   // TODO should be conf injected?
   public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 614f653..18f0267 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -381,6 +381,8 @@ public class PBHelperClient {
       return StorageTypeProto.ARCHIVE;
     case RAM_DISK:
       return StorageTypeProto.RAM_DISK;
+    case PROVIDED:
+      return StorageTypeProto.PROVIDED;
     default:
       throw new IllegalStateException(
           "BUG: StorageType not found, type=" + type);
@@ -397,6 +399,8 @@ public class PBHelperClient {
       return StorageType.ARCHIVE;
     case RAM_DISK:
       return StorageType.RAM_DISK;
+    case PROVIDED:
+      return StorageType.PROVIDED;
     default:
       throw new IllegalStateException(
           "BUG: StorageTypeProto not found, type=" + type);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index 08ed3c8..470304a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -210,6 +210,7 @@ enum StorageTypeProto {
   SSD = 2;
   ARCHIVE = 3;
   RAM_DISK = 4;
+  PROVIDED = 5;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b95c7e6..6406d35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -314,6 +314,21 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.namenode.edits.asynclogging";
   public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = false;
 
+  public static final String DFS_PROVIDER_CLASS = "dfs.provider.class";
+  public static final String DFS_PROVIDER_DF_CLASS = "dfs.provided.df.class";
+  public static final String DFS_PROVIDER_STORAGEUUID = "dfs.provided.storage.id";
+  public static final String DFS_PROVIDER_STORAGEUUID_DEFAULT =  "DS-PROVIDED";
+  public static final String DFS_PROVIDER_BLK_FORMAT_CLASS = "dfs.provided.blockformat.class";
+
+  public static final String DFS_PROVIDED_BLOCK_MAP_DELIMITER = "dfs.provided.textprovider.delimiter";
+  public static final String DFS_PROVIDED_BLOCK_MAP_DELIMITER_DEFAULT = ",";
+
+  public static final String DFS_PROVIDED_BLOCK_MAP_READ_PATH = "dfs.provided.textprovider.read.path";
+  public static final String DFS_PROVIDED_BLOCK_MAP_PATH_DEFAULT = "file:///tmp/blocks.csv";
+
+  public static final String DFS_PROVIDED_BLOCK_MAP_CODEC = "dfs.provided.textprovider.read.codec";
+  public static final String DFS_PROVIDED_BLOCK_MAP_WRITE_PATH  = "dfs.provided.textprovider.write.path";
+
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int     DFS_LIST_LIMIT_DEFAULT = 1000;
   public static final String  DFS_CONTENT_SUMMARY_LIMIT_KEY = "dfs.content-summary.limit";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/BlockAlias.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/BlockAlias.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/BlockAlias.java
new file mode 100644
index 0000000..b2fac97
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/BlockAlias.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.common;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+
+/**
+ * Interface used to load provided blocks.
+ */
+public interface BlockAlias {
+
+  Block getBlock();
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/BlockFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/BlockFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/BlockFormat.java
new file mode 100644
index 0000000..66e7fdf
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/BlockFormat.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.common;
+
+import java.io.Closeable;
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+
+/**
+ * An abstract class used to read and write block maps for provided blocks.
+ */
+public abstract class BlockFormat<T extends BlockAlias>  {
+
+  /**
+   * An abstract class that is used to read {@link BlockAlias}es
+   * for provided blocks.
+   */
+  public static abstract class Reader<U extends BlockAlias>
+      implements Iterable<U>, Closeable {
+
+    /**
+     * reader options.
+     */
+    public interface Options { }
+
+    public abstract U resolve(Block ident) throws IOException;
+
+  }
+
+  /**
+   * Returns the reader for the provided block map.
+   * @param opts reader options
+   * @return {@link Reader} to the block map.
+   * @throws IOException
+   */
+  public abstract Reader<T> getReader(Reader.Options opts) throws IOException;
+
+  /**
+   * An abstract class used as a writer for the provided block map.
+   */
+  public static abstract class Writer<U extends BlockAlias>
+      implements Closeable {
+    /**
+     * writer options.
+     */
+    public interface Options { }
+
+    public abstract void store(U token) throws IOException;
+
+  }
+
+  /**
+   * Returns the writer for the provided block map.
+   * @param opts writer options.
+   * @return {@link Writer} to the block map.
+   * @throws IOException
+   */
+  public abstract Writer<T> getWriter(Writer.Options opts) throws IOException;
+
+  /**
+   * Refresh based on the underlying block map.
+   * @throws IOException
+   */
+  public abstract void refresh() throws IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegion.java
new file mode 100644
index 0000000..c568b90
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegion.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.common;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+
+/**
+ * This class is used to represent provided blocks that are file regions,
+ * i.e., can be described using (path, offset, length).
+ */
+public class FileRegion implements BlockAlias {
+
+  private final Path path;
+  private final long offset;
+  private final long length;
+  private final long blockId;
+  private final String bpid;
+  private final long genStamp;
+
+  public FileRegion(long blockId, Path path, long offset,
+      long length, String bpid, long genStamp) {
+    this.path = path;
+    this.offset = offset;
+    this.length = length;
+    this.blockId = blockId;
+    this.bpid = bpid;
+    this.genStamp = genStamp;
+  }
+
+  public FileRegion(long blockId, Path path, long offset,
+      long length, String bpid) {
+    this(blockId, path, offset, length, bpid,
+        HdfsConstants.GRANDFATHER_GENERATION_STAMP);
+
+  }
+
+  public FileRegion(long blockId, Path path, long offset,
+      long length, long genStamp) {
+    this(blockId, path, offset, length, null, genStamp);
+
+  }
+
+  public FileRegion(long blockId, Path path, long offset, long length) {
+    this(blockId, path, offset, length, null);
+  }
+
+  @Override
+  public Block getBlock() {
+    return new Block(blockId, length, genStamp);
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (!(other instanceof FileRegion)) {
+      return false;
+    }
+    FileRegion o = (FileRegion) other;
+    return blockId == o.blockId
+      && offset == o.offset
+      && length == o.length
+      && genStamp == o.genStamp
+      && path.equals(o.path);
+  }
+
+  @Override
+  public int hashCode() {
+    return (int)(blockId & Integer.MIN_VALUE);
+  }
+
+  public Path getPath() {
+    return path;
+  }
+
+  public long getOffset() {
+    return offset;
+  }
+
+  public long getLength() {
+    return length;
+  }
+
+  public long getGenerationStamp() {
+    return genStamp;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("{ block=\"").append(getBlock()).append("\"");
+    sb.append(", path=\"").append(getPath()).append("\"");
+    sb.append(", off=\"").append(getOffset()).append("\"");
+    sb.append(", len=\"").append(getBlock().getNumBytes()).append("\"");
+    sb.append(", genStamp=\"").append(getBlock()
+        .getGenerationStamp()).append("\"");
+    sb.append(", bpid=\"").append(bpid).append("\"");
+    sb.append(" }");
+    return sb.toString();
+  }
+
+  public String getBlockPoolId() {
+    return this.bpid;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegionProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegionProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegionProvider.java
new file mode 100644
index 0000000..2e94239
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegionProvider.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Iterator;
+
+/**
+ * This class is a stub for reading file regions from the block map.
+ */
+public class FileRegionProvider implements Iterable<FileRegion> {
+  @Override
+  public Iterator<FileRegion> iterator() {
+    return Collections.emptyListIterator();
+  }
+
+  public void refresh() throws IOException {
+    return;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 414d3a7..9ad61d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -40,6 +40,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@@ -196,7 +197,10 @@ public abstract class Storage extends StorageInfo {
     Iterator<StorageDirectory> it =
       (dirType == null) ? dirIterator() : dirIterator(dirType);
     for ( ;it.hasNext(); ) {
-      list.add(new File(it.next().getCurrentDir(), fileName));
+      File currentDir = it.next().getCurrentDir();
+      if (currentDir != null) {
+        list.add(new File(currentDir, fileName));
+      }
     }
     return list;
   }
@@ -328,10 +332,20 @@ public abstract class Storage extends StorageInfo {
      */
     public StorageDirectory(String bpid, StorageDirType dirType,
         boolean isShared, StorageLocation location) {
-      this(new File(location.getBpURI(bpid, STORAGE_DIR_CURRENT)), dirType,
+      this(getBlockPoolCurrentDir(bpid, location), dirType,
           isShared, location);
     }
 
+    private static File getBlockPoolCurrentDir(String bpid,
+        StorageLocation location) {
+      if (location == null ||
+          location.getStorageType() == StorageType.PROVIDED) {
+        return null;
+      } else {
+        return new File(location.getBpURI(bpid, STORAGE_DIR_CURRENT));
+      }
+    }
+
     private StorageDirectory(File dir, StorageDirType dirType,
         boolean isShared, StorageLocation location) {
       this.root = dir;
@@ -347,7 +361,8 @@ public abstract class Storage extends StorageInfo {
     }
 
     private static File getStorageLocationFile(StorageLocation location) {
-      if (location == null) {
+      if (location == null ||
+          location.getStorageType() == StorageType.PROVIDED) {
         return null;
       }
       try {
@@ -406,6 +421,10 @@ public abstract class Storage extends StorageInfo {
      */
     public void clearDirectory() throws IOException {
       File curDir = this.getCurrentDir();
+      if (curDir == null) {
+        //if the directory is null, there is nothing to do.
+        return;
+      }
       if (curDir.exists()) {
         File[] files = FileUtil.listFiles(curDir);
         LOG.info("Will remove files: " + Arrays.toString(files));
@@ -423,6 +442,9 @@ public abstract class Storage extends StorageInfo {
      * @return the directory path
      */
     public File getCurrentDir() {
+      if (root == null) {
+        return null;
+      }
       return new File(root, STORAGE_DIR_CURRENT);
     }
 
@@ -443,6 +465,9 @@ public abstract class Storage extends StorageInfo {
      * @return the version file path
      */
     public File getVersionFile() {
+      if (root == null) {
+        return null;
+      }
       return new File(new File(root, STORAGE_DIR_CURRENT), STORAGE_FILE_VERSION);
     }
 
@@ -452,6 +477,9 @@ public abstract class Storage extends StorageInfo {
      * @return the previous version file path
      */
     public File getPreviousVersionFile() {
+      if (root == null) {
+        return null;
+      }
       return new File(new File(root, STORAGE_DIR_PREVIOUS), STORAGE_FILE_VERSION);
     }
 
@@ -462,6 +490,9 @@ public abstract class Storage extends StorageInfo {
      * @return the directory path
      */
     public File getPreviousDir() {
+      if (root == null) {
+        return null;
+      }
       return new File(root, STORAGE_DIR_PREVIOUS);
     }
 
@@ -476,6 +507,9 @@ public abstract class Storage extends StorageInfo {
      * @return the directory path
      */
     public File getPreviousTmp() {
+      if (root == null) {
+        return null;
+      }
       return new File(root, STORAGE_TMP_PREVIOUS);
     }
 
@@ -490,6 +524,9 @@ public abstract class Storage extends StorageInfo {
      * @return the directory path
      */
     public File getRemovedTmp() {
+      if (root == null) {
+        return null;
+      }
       return new File(root, STORAGE_TMP_REMOVED);
     }
 
@@ -503,6 +540,9 @@ public abstract class Storage extends StorageInfo {
      * @return the directory path
      */
     public File getFinalizedTmp() {
+      if (root == null) {
+        return null;
+      }
       return new File(root, STORAGE_TMP_FINALIZED);
     }
 
@@ -517,6 +557,9 @@ public abstract class Storage extends StorageInfo {
      * @return the directory path
      */
     public File getLastCheckpointTmp() {
+      if (root == null) {
+        return null;
+      }
       return new File(root, STORAGE_TMP_LAST_CKPT);
     }
 
@@ -530,6 +573,9 @@ public abstract class Storage extends StorageInfo {
      * @return the directory path
      */
     public File getPreviousCheckpoint() {
+      if (root == null) {
+        return null;
+      }
       return new File(root, STORAGE_PREVIOUS_CKPT);
     }
 
@@ -543,7 +589,7 @@ public abstract class Storage extends StorageInfo {
     private void checkEmptyCurrent() throws InconsistentFSStateException,
         IOException {
       File currentDir = getCurrentDir();
-      if(!currentDir.exists()) {
+      if(currentDir == null || !currentDir.exists()) {
         // if current/ does not exist, it's safe to format it.
         return;
       }
@@ -589,6 +635,13 @@ public abstract class Storage extends StorageInfo {
     public StorageState analyzeStorage(StartupOption startOpt, Storage storage,
         boolean checkCurrentIsEmpty)
         throws IOException {
+
+      if (location != null &&
+          location.getStorageType() == StorageType.PROVIDED) {
+        //currently we assume that PROVIDED storages are always NORMAL
+        return StorageState.NORMAL;
+      }
+
       assert root != null : "root is null";
       boolean hadMkdirs = false;
       String rootPath = root.getCanonicalPath();
@@ -710,6 +763,10 @@ public abstract class Storage extends StorageInfo {
      */
     public void doRecover(StorageState curState) throws IOException {
       File curDir = getCurrentDir();
+      if (curDir == null || root == null) {
+        //at this point, we do not support recovery on PROVIDED storages
+        return;
+      }
       String rootPath = root.getCanonicalPath();
       switch(curState) {
       case COMPLETE_UPGRADE:  // mv previous.tmp -> previous
@@ -883,7 +940,8 @@ public abstract class Storage extends StorageInfo {
     
     @Override
     public String toString() {
-      return "Storage Directory " + this.root;
+      return "Storage Directory root= " + this.root +
+          "; location= " + this.location;
     }
 
     /**
@@ -1153,6 +1211,9 @@ public abstract class Storage extends StorageInfo {
   }
   
   public void writeProperties(File to, StorageDirectory sd) throws IOException {
+    if (to == null) {
+      return;
+    }
     Properties props = new Properties();
     setPropertiesFromFields(props, sd);
     writeProperties(to, props);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
index 50363c9..28871e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
@@ -152,6 +152,9 @@ public class StorageInfo {
    */
   protected void setFieldsFromProperties(
       Properties props, StorageDirectory sd) throws IOException {
+    if (props == null) {
+      return;
+    }
     setLayoutVersion(props, sd);
     setNamespaceID(props, sd);
     setcTime(props, sd);
@@ -241,6 +244,9 @@ public class StorageInfo {
   }
 
   public static Properties readPropertiesFile(File from) throws IOException {
+    if (from == null) {
+      return null;
+    }
     RandomAccessFile file = new RandomAccessFile(from, "rws");
     FileInputStream in = null;
     Properties props = new Properties();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/TextFileRegionFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/TextFileRegionFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/TextFileRegionFormat.java
new file mode 100644
index 0000000..eacd08f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/TextFileRegionFormat.java
@@ -0,0 +1,442 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.io.MultipleIOException;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionCodecFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * This class is used for block maps stored as text files,
+ * with a specified delimiter.
+ */
+public class TextFileRegionFormat
+    extends BlockFormat<FileRegion> implements Configurable {
+
+  private Configuration conf;
+  private ReaderOptions readerOpts = TextReader.defaults();
+  private WriterOptions writerOpts = TextWriter.defaults();
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(TextFileRegionFormat.class);
+  @Override
+  public void setConf(Configuration conf) {
+    readerOpts.setConf(conf);
+    writerOpts.setConf(conf);
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  public Reader<FileRegion> getReader(Reader.Options opts)
+      throws IOException {
+    if (null == opts) {
+      opts = readerOpts;
+    }
+    if (!(opts instanceof ReaderOptions)) {
+      throw new IllegalArgumentException("Invalid options " + opts.getClass());
+    }
+    ReaderOptions o = (ReaderOptions) opts;
+    Configuration readerConf = (null == o.getConf())
+        ? new Configuration()
+            : o.getConf();
+    return createReader(o.file, o.delim, readerConf);
+  }
+
+  @VisibleForTesting
+  TextReader createReader(Path file, String delim, Configuration cfg)
+      throws IOException {
+    FileSystem fs = file.getFileSystem(cfg);
+    if (fs instanceof LocalFileSystem) {
+      fs = ((LocalFileSystem)fs).getRaw();
+    }
+    CompressionCodecFactory factory = new CompressionCodecFactory(cfg);
+    CompressionCodec codec = factory.getCodec(file);
+    return new TextReader(fs, file, codec, delim);
+  }
+
+  @Override
+  public Writer<FileRegion> getWriter(Writer.Options opts) throws IOException {
+    if (null == opts) {
+      opts = writerOpts;
+    }
+    if (!(opts instanceof WriterOptions)) {
+      throw new IllegalArgumentException("Invalid options " + opts.getClass());
+    }
+    WriterOptions o = (WriterOptions) opts;
+    Configuration cfg = (null == o.getConf())
+        ? new Configuration()
+            : o.getConf();
+    if (o.codec != null) {
+      CompressionCodecFactory factory = new CompressionCodecFactory(cfg);
+      CompressionCodec codec = factory.getCodecByName(o.codec);
+      String name = o.file.getName() + codec.getDefaultExtension();
+      o.filename(new Path(o.file.getParent(), name));
+      return createWriter(o.file, codec, o.delim, cfg);
+    }
+    return createWriter(o.file, null, o.delim, conf);
+  }
+
+  @VisibleForTesting
+  TextWriter createWriter(Path file, CompressionCodec codec, String delim,
+      Configuration cfg) throws IOException {
+    FileSystem fs = file.getFileSystem(cfg);
+    if (fs instanceof LocalFileSystem) {
+      fs = ((LocalFileSystem)fs).getRaw();
+    }
+    OutputStream tmp = fs.create(file);
+    java.io.Writer out = new BufferedWriter(new OutputStreamWriter(
+          (null == codec) ? tmp : codec.createOutputStream(tmp), "UTF-8"));
+    return new TextWriter(out, delim);
+  }
+
+  /**
+   * Class specifying reader options for the {@link TextFileRegionFormat}.
+   */
+  public static class ReaderOptions
+      implements TextReader.Options, Configurable {
+
+    private Configuration conf;
+    private String delim =
+        DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_DELIMITER_DEFAULT;
+    private Path file = new Path(
+        new File(DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_PATH_DEFAULT)
+        .toURI().toString());
+
+    @Override
+    public void setConf(Configuration conf) {
+      this.conf = conf;
+      String tmpfile = conf.get(DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_READ_PATH,
+          DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_PATH_DEFAULT);
+      file = new Path(tmpfile);
+      delim = conf.get(DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_DELIMITER,
+          DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_DELIMITER_DEFAULT);
+      LOG.info("TextFileRegionFormat: read path " + tmpfile.toString());
+    }
+
+    @Override
+    public Configuration getConf() {
+      return conf;
+    }
+
+    @Override
+    public ReaderOptions filename(Path file) {
+      this.file = file;
+      return this;
+    }
+
+    @Override
+    public ReaderOptions delimiter(String delim) {
+      this.delim = delim;
+      return this;
+    }
+  }
+
+  /**
+   * Class specifying writer options for the {@link TextFileRegionFormat}.
+   */
+  public static class WriterOptions
+      implements TextWriter.Options, Configurable {
+
+    private Configuration conf;
+    private String codec = null;
+    private Path file =
+        new Path(DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_PATH_DEFAULT);
+    private String delim =
+        DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_DELIMITER_DEFAULT;
+
+    @Override
+    public void setConf(Configuration conf) {
+      this.conf = conf;
+      String tmpfile = conf.get(
+          DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_WRITE_PATH, file.toString());
+      file = new Path(tmpfile);
+      codec = conf.get(DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_CODEC);
+      delim = conf.get(DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_DELIMITER,
+          DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_DELIMITER_DEFAULT);
+    }
+
+    @Override
+    public Configuration getConf() {
+      return conf;
+    }
+
+    @Override
+    public WriterOptions filename(Path file) {
+      this.file = file;
+      return this;
+    }
+
+    public String getCodec() {
+      return codec;
+    }
+
+    public Path getFile() {
+      return file;
+    }
+
+    @Override
+    public WriterOptions codec(String codec) {
+      this.codec = codec;
+      return this;
+    }
+
+    @Override
+    public WriterOptions delimiter(String delim) {
+      this.delim = delim;
+      return this;
+    }
+
+  }
+
+  /**
+   * This class is used as a reader for block maps which
+   * are stored as delimited text files.
+   */
+  public static class TextReader extends Reader<FileRegion> {
+
+    /**
+     * Options for {@link TextReader}.
+     */
+    public interface Options extends Reader.Options {
+      Options filename(Path file);
+      Options delimiter(String delim);
+    }
+
+    static ReaderOptions defaults() {
+      return new ReaderOptions();
+    }
+
+    private final Path file;
+    private final String delim;
+    private final FileSystem fs;
+    private final CompressionCodec codec;
+    private final Map<FRIterator, BufferedReader> iterators;
+
+    protected TextReader(FileSystem fs, Path file, CompressionCodec codec,
+        String delim) {
+      this(fs, file, codec, delim,
+          new IdentityHashMap<FRIterator, BufferedReader>());
+    }
+
+    TextReader(FileSystem fs, Path file, CompressionCodec codec, String delim,
+        Map<FRIterator, BufferedReader> iterators) {
+      this.fs = fs;
+      this.file = file;
+      this.codec = codec;
+      this.delim = delim;
+      this.iterators = Collections.synchronizedMap(iterators);
+    }
+
+    @Override
+    public FileRegion resolve(Block ident) throws IOException {
+      // consider layering index w/ composable format
+      Iterator<FileRegion> i = iterator();
+      try {
+        while (i.hasNext()) {
+          FileRegion f = i.next();
+          if (f.getBlock().equals(ident)) {
+            return f;
+          }
+        }
+      } finally {
+        BufferedReader r = iterators.remove(i);
+        if (r != null) {
+          // null on last element
+          r.close();
+        }
+      }
+      return null;
+    }
+
+    class FRIterator implements Iterator<FileRegion> {
+
+      private FileRegion pending;
+
+      @Override
+      public boolean hasNext() {
+        return pending != null;
+      }
+
+      @Override
+      public FileRegion next() {
+        if (null == pending) {
+          throw new NoSuchElementException();
+        }
+        FileRegion ret = pending;
+        try {
+          pending = nextInternal(this);
+        } catch (IOException e) {
+          throw new RuntimeException(e);
+        }
+        return ret;
+      }
+
+      @Override
+      public void remove() {
+        throw new UnsupportedOperationException();
+      }
+    }
+
+    private FileRegion nextInternal(Iterator<FileRegion> i) throws IOException {
+      BufferedReader r = iterators.get(i);
+      if (null == r) {
+        throw new IllegalStateException();
+      }
+      String line = r.readLine();
+      if (null == line) {
+        iterators.remove(i);
+        return null;
+      }
+      String[] f = line.split(delim);
+      if (f.length != 6) {
+        throw new IOException("Invalid line: " + line);
+      }
+      return new FileRegion(Long.parseLong(f[0]), new Path(f[1]),
+          Long.parseLong(f[2]), Long.parseLong(f[3]), f[5],
+          Long.parseLong(f[4]));
+    }
+
+    public InputStream createStream() throws IOException {
+      InputStream i = fs.open(file);
+      if (codec != null) {
+        i = codec.createInputStream(i);
+      }
+      return i;
+    }
+
+    @Override
+    public Iterator<FileRegion> iterator() {
+      FRIterator i = new FRIterator();
+      try {
+        BufferedReader r =
+            new BufferedReader(new InputStreamReader(createStream(), "UTF-8"));
+        iterators.put(i, r);
+        i.pending = nextInternal(i);
+      } catch (IOException e) {
+        iterators.remove(i);
+        throw new RuntimeException(e);
+      }
+      return i;
+    }
+
+    @Override
+    public void close() throws IOException {
+      ArrayList<IOException> ex = new ArrayList<>();
+      synchronized (iterators) {
+        for (Iterator<BufferedReader> i = iterators.values().iterator();
+             i.hasNext();) {
+          try {
+            BufferedReader r = i.next();
+            r.close();
+          } catch (IOException e) {
+            ex.add(e);
+          } finally {
+            i.remove();
+          }
+        }
+        iterators.clear();
+      }
+      if (!ex.isEmpty()) {
+        throw MultipleIOException.createIOException(ex);
+      }
+    }
+
+  }
+
+  /**
+   * This class is used as a writer for block maps which
+   * are stored as delimited text files.
+   */
+  public static class TextWriter extends Writer<FileRegion> {
+
+    /**
+     * Interface for Writer options.
+     */
+    public interface Options extends Writer.Options {
+      Options codec(String codec);
+      Options filename(Path file);
+      Options delimiter(String delim);
+    }
+
+    public static WriterOptions defaults() {
+      return new WriterOptions();
+    }
+
+    private final String delim;
+    private final java.io.Writer out;
+
+    public TextWriter(java.io.Writer out, String delim) {
+      this.out = out;
+      this.delim = delim;
+    }
+
+    @Override
+    public void store(FileRegion token) throws IOException {
+      out.append(String.valueOf(token.getBlock().getBlockId())).append(delim);
+      out.append(token.getPath().toString()).append(delim);
+      out.append(Long.toString(token.getOffset())).append(delim);
+      out.append(Long.toString(token.getLength())).append(delim);
+      out.append(Long.toString(token.getGenerationStamp())).append(delim);
+      out.append(token.getBlockPoolId()).append("\n");
+    }
+
+    @Override
+    public void close() throws IOException {
+      out.close();
+    }
+
+  }
+
+  @Override
+  public void refresh() throws IOException {
+    //nothing to do;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/TextFileRegionProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/TextFileRegionProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/TextFileRegionProvider.java
new file mode 100644
index 0000000..0fa667e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/TextFileRegionProvider.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/**
+ * This class is used to read file regions from block maps
+ * specified using delimited text.
+ */
+public class TextFileRegionProvider
+    extends FileRegionProvider implements Configurable {
+
+  private Configuration conf;
+  private BlockFormat<FileRegion> fmt;
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public void setConf(Configuration conf) {
+    fmt = ReflectionUtils.newInstance(
+        conf.getClass(DFSConfigKeys.DFS_PROVIDER_BLK_FORMAT_CLASS,
+            TextFileRegionFormat.class,
+            BlockFormat.class),
+        conf);
+    ((Configurable)fmt).setConf(conf); //redundant?
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  public Iterator<FileRegion> iterator() {
+    try {
+      final BlockFormat.Reader<FileRegion> r = fmt.getReader(null);
+      return new Iterator<FileRegion>() {
+
+        private final Iterator<FileRegion> inner = r.iterator();
+
+        @Override
+        public boolean hasNext() {
+          return inner.hasNext();
+        }
+
+        @Override
+        public FileRegion next() {
+          return inner.next();
+        }
+
+        @Override
+        public void remove() {
+          throw new UnsupportedOperationException();
+        }
+      };
+    } catch (IOException e) {
+      throw new RuntimeException("Failed to read provided blocks", e);
+    }
+  }
+
+  @Override
+  public void refresh() throws IOException {
+    fmt.refresh();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index bc41715..012d1f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
@@ -360,6 +361,9 @@ public class BlockPoolSliceStorage extends Storage {
   private boolean doTransition(StorageDirectory sd, NamespaceInfo nsInfo,
       StartupOption startOpt, List<Callable<StorageDirectory>> callables,
       Configuration conf) throws IOException {
+    if (sd.getStorageLocation().getStorageType() == StorageType.PROVIDED) {
+      return false; // regular startup for PROVIDED storage directories
+    }
     if (startOpt == StartupOption.ROLLBACK && sd.getPreviousDir().exists()) {
       Preconditions.checkState(!getTrashRootDir(sd).exists(),
           sd.getPreviousDir() + " and " + getTrashRootDir(sd) + " should not " +
@@ -439,6 +443,10 @@ public class BlockPoolSliceStorage extends Storage {
         LayoutVersion.Feature.FEDERATION, layoutVersion)) {
       return;
     }
+    //no upgrades for storage directories that are PROVIDED
+    if (bpSd.getRoot() == null) {
+      return;
+    }
     final int oldLV = getLayoutVersion();
     LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
         + ".\n   old LV = " + oldLV
@@ -589,8 +597,9 @@ public class BlockPoolSliceStorage extends Storage {
       throws IOException {
     File prevDir = bpSd.getPreviousDir();
     // regular startup if previous dir does not exist
-    if (!prevDir.exists())
+    if (prevDir == null || !prevDir.exists()) {
       return;
+    }
     // read attributes out of the VERSION file of previous directory
     BlockPoolSliceStorage prevInfo = new BlockPoolSliceStorage();
     prevInfo.readPreviousVersionProperties(bpSd);
@@ -631,6 +640,10 @@ public class BlockPoolSliceStorage extends Storage {
    * that holds the snapshot.
    */
   void doFinalize(File dnCurDir) throws IOException {
+    LOG.info("doFinalize: " + dnCurDir);
+    if (dnCurDir == null) {
+      return; //we do nothing if the directory is null
+    }
     File bpRoot = getBpRoot(blockpoolID, dnCurDir);
     StorageDirectory bpSd = new StorageDirectory(bpRoot);
     // block pool level previous directory
@@ -841,6 +854,9 @@ public class BlockPoolSliceStorage extends Storage {
   public void setRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
       throws IOException {
     for (StorageDirectory sd : dnStorageDirs) {
+      if (sd.getCurrentDir() == null) {
+        return;
+      }
       File bpRoot = getBpRoot(blockpoolID, sd.getCurrentDir());
       File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
       if (!storagesWithRollingUpgradeMarker.contains(bpRoot.toString())) {
@@ -863,6 +879,9 @@ public class BlockPoolSliceStorage extends Storage {
   public void clearRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
       throws IOException {
     for (StorageDirectory sd : dnStorageDirs) {
+      if (sd.getCurrentDir() == null) {
+        continue;
+      }
       File bpRoot = getBpRoot(blockpoolID, sd.getCurrentDir());
       File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
       if (!storagesWithoutRollingUpgradeMarker.contains(bpRoot.toString())) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 6d6e96a..a1bde31 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -129,22 +130,31 @@ public class DataStorage extends Storage {
     this.datanodeUuid = newDatanodeUuid;
   }
 
-  private static boolean createStorageID(StorageDirectory sd, int lv) {
+  private static boolean createStorageID(StorageDirectory sd, int lv,
+      Configuration conf) {
     // Clusters previously upgraded from layout versions earlier than
     // ADD_DATANODE_AND_STORAGE_UUIDS failed to correctly generate a
     // new storage ID. We check for that and fix it now.
     final boolean haveValidStorageId = DataNodeLayoutVersion.supports(
         LayoutVersion.Feature.ADD_DATANODE_AND_STORAGE_UUIDS, lv)
         && DatanodeStorage.isValidStorageId(sd.getStorageUuid());
-    return createStorageID(sd, !haveValidStorageId);
+    return createStorageID(sd, !haveValidStorageId, conf);
   }
 
   /** Create an ID for this storage.
    * @return true if a new storage ID was generated.
    * */
   public static boolean createStorageID(
-      StorageDirectory sd, boolean regenerateStorageIds) {
+      StorageDirectory sd, boolean regenerateStorageIds, Configuration conf) {
     final String oldStorageID = sd.getStorageUuid();
+    if (sd.getStorageLocation() != null &&
+        sd.getStorageLocation().getStorageType() == StorageType.PROVIDED) {
+      // We only support one provided storage per datanode for now.
+      // TODO support multiple provided storage ids per datanode.
+      sd.setStorageUuid(conf.get(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID,
+          DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT));
+      return false;
+    }
     if (oldStorageID == null || regenerateStorageIds) {
       sd.setStorageUuid(DatanodeStorage.generateUuid());
       LOG.info("Generated new storageID " + sd.getStorageUuid() +
@@ -273,7 +283,7 @@ public class DataStorage extends Storage {
         LOG.info("Storage directory with location " + location
             + " is not formatted for namespace " + nsInfo.getNamespaceID()
             + ". Formatting...");
-        format(sd, nsInfo, datanode.getDatanodeUuid());
+        format(sd, nsInfo, datanode.getDatanodeUuid(), datanode.getConf());
         break;
       default:  // recovery part is common
         sd.doRecover(curState);
@@ -547,15 +557,15 @@ public class DataStorage extends Storage {
   }
 
   void format(StorageDirectory sd, NamespaceInfo nsInfo,
-              String datanodeUuid) throws IOException {
+              String newDatanodeUuid, Configuration conf) throws IOException {
     sd.clearDirectory(); // create directory
     this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
     this.clusterID = nsInfo.getClusterID();
     this.namespaceID = nsInfo.getNamespaceID();
     this.cTime = 0;
-    setDatanodeUuid(datanodeUuid);
+    setDatanodeUuid(newDatanodeUuid);
 
-    createStorageID(sd, false);
+    createStorageID(sd, false, conf);
     writeProperties(sd);
   }
 
@@ -600,6 +610,9 @@ public class DataStorage extends Storage {
 
   private void setFieldsFromProperties(Properties props, StorageDirectory sd,
       boolean overrideLayoutVersion, int toLayoutVersion) throws IOException {
+    if (props == null) {
+      return;
+    }
     if (overrideLayoutVersion) {
       this.layoutVersion = toLayoutVersion;
     } else {
@@ -694,6 +707,10 @@ public class DataStorage extends Storage {
   private boolean doTransition(StorageDirectory sd, NamespaceInfo nsInfo,
       StartupOption startOpt, List<Callable<StorageDirectory>> callables,
       Configuration conf) throws IOException {
+    if (sd.getStorageLocation().getStorageType() == StorageType.PROVIDED) {
+      createStorageID(sd, layoutVersion, conf);
+      return false; // regular start up for PROVIDED storage directories
+    }
     if (startOpt == StartupOption.ROLLBACK) {
       doRollback(sd, nsInfo); // rollback if applicable
     }
@@ -724,7 +741,7 @@ public class DataStorage extends Storage {
 
     // regular start up.
     if (this.layoutVersion == HdfsServerConstants.DATANODE_LAYOUT_VERSION) {
-      createStorageID(sd, layoutVersion);
+      createStorageID(sd, layoutVersion, conf);
       return false; // need to write properties
     }
 
@@ -733,7 +750,7 @@ public class DataStorage extends Storage {
       if (federationSupported) {
         // If the existing on-disk layout version supports federation,
         // simply update the properties.
-        upgradeProperties(sd);
+        upgradeProperties(sd, conf);
       } else {
         doUpgradePreFederation(sd, nsInfo, callables, conf);
       }
@@ -829,15 +846,16 @@ public class DataStorage extends Storage {
 
     // 4. Write version file under <SD>/current
     clusterID = nsInfo.getClusterID();
-    upgradeProperties(sd);
+    upgradeProperties(sd, conf);
     
     // 5. Rename <SD>/previous.tmp to <SD>/previous
     rename(tmpDir, prevDir);
     LOG.info("Upgrade of " + sd.getRoot()+ " is complete");
   }
 
-  void upgradeProperties(StorageDirectory sd) throws IOException {
-    createStorageID(sd, layoutVersion);
+  void upgradeProperties(StorageDirectory sd, Configuration conf)
+      throws IOException {
+    createStorageID(sd, layoutVersion, conf);
     LOG.info("Updating layout version from " + layoutVersion
         + " to " + HdfsServerConstants.DATANODE_LAYOUT_VERSION
         + " for storage " + sd.getRoot());
@@ -989,7 +1007,7 @@ public class DataStorage extends Storage {
     // then finalize it. Else finalize the corresponding BP.
     for (StorageDirectory sd : getStorageDirs()) {
       File prevDir = sd.getPreviousDir();
-      if (prevDir.exists()) {
+      if (prevDir != null && prevDir.exists()) {
         // data node level storage finalize
         doFinalize(sd);
       } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 18188dd..655b5e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -44,6 +44,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.AutoCloseableLock;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@@ -105,7 +106,7 @@ public class DirectoryScanner implements Runnable {
    * @param b whether to retain diffs
    */
   @VisibleForTesting
-  void setRetainDiffs(boolean b) {
+  public void setRetainDiffs(boolean b) {
     retainDiffs = b;
   }
 
@@ -215,7 +216,8 @@ public class DirectoryScanner implements Runnable {
    * @param dataset the dataset to scan
    * @param conf the Configuration object
    */
-  DirectoryScanner(DataNode datanode, FsDatasetSpi<?> dataset, Configuration conf) {
+  public DirectoryScanner(DataNode datanode, FsDatasetSpi<?> dataset,
+      Configuration conf) {
     this.datanode = datanode;
     this.dataset = dataset;
     int interval = (int) conf.getTimeDuration(
@@ -369,15 +371,14 @@ public class DirectoryScanner implements Runnable {
    * Reconcile differences between disk and in-memory blocks
    */
   @VisibleForTesting
-  void reconcile() throws IOException {
+  public void reconcile() throws IOException {
     scan();
     for (Entry<String, LinkedList<ScanInfo>> entry : diffs.entrySet()) {
       String bpid = entry.getKey();
       LinkedList<ScanInfo> diff = entry.getValue();
       
       for (ScanInfo info : diff) {
-        dataset.checkAndUpdate(bpid, info.getBlockId(), info.getBlockFile(),
-            info.getMetaFile(), info.getVolume());
+        dataset.checkAndUpdate(bpid, info);
       }
     }
     if (!retainDiffs) clear();
@@ -429,11 +430,12 @@ public class DirectoryScanner implements Runnable {
           }
           // Block file and/or metadata file exists on the disk
           // Block exists in memory
-          if (info.getBlockFile() == null) {
+          if (info.getVolume().getStorageType() != StorageType.PROVIDED &&
+              info.getBlockFile() == null) {
             // Block metadata file exits and block file is missing
             addDifference(diffRecord, statsRecord, info);
           } else if (info.getGenStamp() != memBlock.getGenerationStamp()
-              || info.getBlockFileLength() != memBlock.getNumBytes()) {
+              || info.getBlockLength() != memBlock.getNumBytes()) {
             // Block metadata file is missing or has wrong generation stamp,
             // or block file length is different than expected
             statsRecord.mismatchBlocks++;
@@ -609,8 +611,8 @@ public class DirectoryScanner implements Runnable {
       for (String bpid : bpList) {
         LinkedList<ScanInfo> report = new LinkedList<>();
 
-        perfTimer.start();
-        throttleTimer.start();
+        perfTimer.reset().start();
+        throttleTimer.reset().start();
 
         try {
           result.put(bpid, volume.compileReport(bpid, report, this));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
new file mode 100644
index 0000000..722d573
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
+
+/**
+ * This class is used for provided replicas that are finalized.
+ */
+public class FinalizedProvidedReplica extends ProvidedReplica {
+
+  public FinalizedProvidedReplica(long blockId, URI fileURI,
+      long fileOffset, long blockLen, long genStamp,
+      FsVolumeSpi volume, Configuration conf) {
+    super(blockId, fileURI, fileOffset, blockLen, genStamp, volume, conf);
+  }
+
+  @Override
+  public ReplicaState getState() {
+    return ReplicaState.FINALIZED;
+  }
+
+  @Override
+  public long getBytesOnDisk() {
+    return getNumBytes();
+  }
+
+  @Override
+  public long getVisibleLength() {
+    return getNumBytes(); //all bytes are visible
+  }
+
+  @Override  // Object
+  public boolean equals(Object o) {
+    return super.equals(o);
+  }
+
+  @Override  // Object
+  public int hashCode() {
+    return super.hashCode();
+  }
+
+  @Override
+  public String toString() {
+    return super.toString();
+  }
+
+  @Override
+  public ReplicaInfo getOriginalReplica() {
+    throw new UnsupportedOperationException("Replica of type " + getState() +
+        " does not support getOriginalReplica");
+  }
+
+  @Override
+  public long getRecoveryID() {
+    throw new UnsupportedOperationException("Replica of type " + getState() +
+        " does not support getRecoveryID");
+  }
+
+  @Override
+  public void setRecoveryID(long recoveryId) {
+    throw new UnsupportedOperationException("Replica of type " + getState() +
+        " does not support setRecoveryID");
+  }
+
+  @Override
+  public ReplicaRecoveryInfo createInfo() {
+    throw new UnsupportedOperationException("Replica of type " + getState() +
+        " does not support createInfo");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
new file mode 100644
index 0000000..b021ea2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
@@ -0,0 +1,248 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.common.FileRegion;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This abstract class is used as a base class for provided replicas.
+ */
+public abstract class ProvidedReplica extends ReplicaInfo {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(ProvidedReplica.class);
+
+  // Null checksum information for provided replicas.
+  // Shared across all replicas.
+  static final byte[] NULL_CHECKSUM_ARRAY =
+      FsDatasetUtil.createNullChecksumByteArray();
+  private URI fileURI;
+  private long fileOffset;
+  private Configuration conf;
+  private FileSystem remoteFS;
+
+  /**
+   * Constructor.
+   * @param blockId block id
+   * @param fileURI remote URI this block is to be read from
+   * @param fileOffset the offset in the remote URI
+   * @param blockLen the length of the block
+   * @param genStamp the generation stamp of the block
+   * @param volume the volume this block belongs to
+   */
+  public ProvidedReplica(long blockId, URI fileURI, long fileOffset,
+      long blockLen, long genStamp, FsVolumeSpi volume, Configuration conf) {
+    super(volume, blockId, blockLen, genStamp);
+    this.fileURI = fileURI;
+    this.fileOffset = fileOffset;
+    this.conf = conf;
+    try {
+      this.remoteFS = FileSystem.get(fileURI, this.conf);
+    } catch (IOException e) {
+      LOG.warn("Failed to obtain filesystem for " + fileURI);
+      this.remoteFS = null;
+    }
+  }
+
+  public ProvidedReplica(ProvidedReplica r) {
+    super(r);
+    this.fileURI = r.fileURI;
+    this.fileOffset = r.fileOffset;
+    this.conf = r.conf;
+    try {
+      this.remoteFS = FileSystem.newInstance(fileURI, this.conf);
+    } catch (IOException e) {
+      this.remoteFS = null;
+    }
+  }
+
+  @Override
+  public URI getBlockURI() {
+    return this.fileURI;
+  }
+
+  @Override
+  public InputStream getDataInputStream(long seekOffset) throws IOException {
+    if (remoteFS != null) {
+      FSDataInputStream ins = remoteFS.open(new Path(fileURI));
+      ins.seek(fileOffset + seekOffset);
+      return new FSDataInputStream(ins);
+    } else {
+      throw new IOException("Remote filesystem for provided replica " + this +
+          " does not exist");
+    }
+  }
+
+  @Override
+  public OutputStream getDataOutputStream(boolean append) throws IOException {
+    throw new UnsupportedOperationException(
+        "OutputDataStream is not implemented for ProvidedReplica");
+  }
+
+  @Override
+  public URI getMetadataURI() {
+    return null;
+  }
+
+  @Override
+  public OutputStream getMetadataOutputStream(boolean append)
+      throws IOException {
+    return null;
+  }
+
+  @Override
+  public boolean blockDataExists() {
+    if(remoteFS != null) {
+      try {
+        return remoteFS.exists(new Path(fileURI));
+      } catch (IOException e) {
+        return false;
+      }
+    } else {
+      return false;
+    }
+  }
+
+  @Override
+  public boolean deleteBlockData() {
+    throw new UnsupportedOperationException(
+        "ProvidedReplica does not support deleting block data");
+  }
+
+  @Override
+  public long getBlockDataLength() {
+    return this.getNumBytes();
+  }
+
+  @Override
+  public LengthInputStream getMetadataInputStream(long offset)
+      throws IOException {
+    return new LengthInputStream(new ByteArrayInputStream(NULL_CHECKSUM_ARRAY),
+        NULL_CHECKSUM_ARRAY.length);
+  }
+
+  @Override
+  public boolean metadataExists() {
+    return NULL_CHECKSUM_ARRAY == null ? false : true;
+  }
+
+  @Override
+  public boolean deleteMetadata() {
+    throw new UnsupportedOperationException(
+        "ProvidedReplica does not support deleting metadata");
+  }
+
+  @Override
+  public long getMetadataLength() {
+    return NULL_CHECKSUM_ARRAY == null ? 0 : NULL_CHECKSUM_ARRAY.length;
+  }
+
+  @Override
+  public boolean renameMeta(URI destURI) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedReplica does not support renaming metadata");
+  }
+
+  @Override
+  public boolean renameData(URI destURI) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedReplica does not support renaming data");
+  }
+
+  @Override
+  public boolean getPinning(LocalFileSystem localFS) throws IOException {
+    return false;
+  }
+
+  @Override
+  public void setPinning(LocalFileSystem localFS) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedReplica does not support pinning");
+  }
+
+  @Override
+  public void bumpReplicaGS(long newGS) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedReplica does not yet support writes");
+  }
+
+  @Override
+  public boolean breakHardLinksIfNeeded() throws IOException {
+    return false;
+  }
+
+  @Override
+  public ReplicaRecoveryInfo createInfo()
+      throws UnsupportedOperationException {
+    throw new UnsupportedOperationException(
+        "ProvidedReplica does not yet support writes");
+  }
+
+  @Override
+  public int compareWith(ScanInfo info) {
+    //local scanning cannot find any provided blocks.
+    if (info.getFileRegion().equals(
+        new FileRegion(this.getBlockId(), new Path(fileURI),
+            fileOffset, this.getNumBytes(), this.getGenerationStamp()))) {
+      return 0;
+    } else {
+      return (int) (info.getBlockLength() - getNumBytes());
+    }
+  }
+
+  @Override
+  public void truncateBlock(long newLength) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedReplica does not yet support truncate");
+  }
+
+  @Override
+  public void updateWithReplica(StorageLocation replicaLocation) {
+    throw new UnsupportedOperationException(
+        "ProvidedReplica does not yet support update");
+  }
+
+  @Override
+  public void copyMetadata(URI destination) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedReplica does not yet support copy metadata");
+  }
+
+  @Override
+  public void copyBlockdata(URI destination) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedReplica does not yet support copy data");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBuilder.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBuilder.java
index 280aaa0..639467f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBuilder.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBuilder.java
@@ -18,9 +18,13 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.File;
+import java.net.URI;
 
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.FileRegion;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 
 /**
@@ -42,11 +46,20 @@ public class ReplicaBuilder {
 
   private ReplicaInfo fromReplica;
 
+  private URI uri;
+  private long offset;
+  private Configuration conf;
+  private FileRegion fileRegion;
+
   public ReplicaBuilder(ReplicaState state) {
     volume = null;
     writer = null;
     block = null;
     length = -1;
+    fileRegion = null;
+    conf = null;
+    fromReplica = null;
+    uri = null;
     this.state = state;
   }
 
@@ -105,6 +118,26 @@ public class ReplicaBuilder {
     return this;
   }
 
+  public ReplicaBuilder setURI(URI uri) {
+    this.uri = uri;
+    return this;
+  }
+
+  public ReplicaBuilder setConf(Configuration conf) {
+    this.conf = conf;
+    return this;
+  }
+
+  public ReplicaBuilder setOffset(long offset) {
+    this.offset = offset;
+    return this;
+  }
+
+  public ReplicaBuilder setFileRegion(FileRegion fileRegion) {
+    this.fileRegion = fileRegion;
+    return this;
+  }
+
   public LocalReplicaInPipeline buildLocalReplicaInPipeline()
       throws IllegalArgumentException {
     LocalReplicaInPipeline info = null;
@@ -176,7 +209,7 @@ public class ReplicaBuilder {
     }
   }
 
-  private ReplicaInfo buildFinalizedReplica() throws IllegalArgumentException {
+  private LocalReplica buildFinalizedReplica() throws IllegalArgumentException {
     if (null != fromReplica &&
         fromReplica.getState() == ReplicaState.FINALIZED) {
       return new FinalizedReplica((FinalizedReplica)fromReplica);
@@ -193,7 +226,7 @@ public class ReplicaBuilder {
     }
   }
 
-  private ReplicaInfo buildRWR() throws IllegalArgumentException {
+  private LocalReplica buildRWR() throws IllegalArgumentException {
 
     if (null != fromReplica && fromReplica.getState() == ReplicaState.RWR) {
       return new ReplicaWaitingToBeRecovered(
@@ -211,7 +244,7 @@ public class ReplicaBuilder {
     }
   }
 
-  private ReplicaInfo buildRUR() throws IllegalArgumentException {
+  private LocalReplica buildRUR() throws IllegalArgumentException {
     if (null == fromReplica) {
       throw new IllegalArgumentException(
           "Missing a valid replica to recover from");
@@ -228,8 +261,53 @@ public class ReplicaBuilder {
     }
   }
 
-  public ReplicaInfo build() throws IllegalArgumentException {
-    ReplicaInfo info = null;
+  private ProvidedReplica buildProvidedFinalizedReplica()
+      throws IllegalArgumentException {
+    ProvidedReplica info = null;
+    if (fromReplica != null) {
+      throw new IllegalArgumentException("Finalized PROVIDED replica " +
+          "cannot be constructed from another replica");
+    }
+    if (fileRegion == null && uri == null) {
+      throw new IllegalArgumentException(
+          "Trying to construct a provided replica on " + volume +
+          " without enough information");
+    }
+    if (fileRegion == null) {
+      info = new FinalizedProvidedReplica(blockId, uri, offset,
+          length, genStamp, volume, conf);
+    } else {
+      info = new FinalizedProvidedReplica(fileRegion.getBlock().getBlockId(),
+          fileRegion.getPath().toUri(),
+          fileRegion.getOffset(),
+          fileRegion.getBlock().getNumBytes(),
+          fileRegion.getBlock().getGenerationStamp(),
+          volume, conf);
+    }
+    return info;
+  }
+
+  private ProvidedReplica buildProvidedReplica()
+      throws IllegalArgumentException {
+    ProvidedReplica info = null;
+    switch(this.state) {
+    case FINALIZED:
+      info = buildProvidedFinalizedReplica();
+      break;
+    case RWR:
+    case RUR:
+    case RBW:
+    case TEMPORARY:
+    default:
+      throw new IllegalArgumentException("Unknown replica state " +
+          state + " for PROVIDED replica");
+    }
+    return info;
+  }
+
+  private LocalReplica buildLocalReplica()
+      throws IllegalArgumentException {
+    LocalReplica info = null;
     switch(this.state) {
     case FINALIZED:
       info = buildFinalizedReplica();
@@ -249,4 +327,16 @@ public class ReplicaBuilder {
     }
     return info;
   }
+
+  public ReplicaInfo build() throws IllegalArgumentException {
+
+    ReplicaInfo info = null;
+    if(volume != null && volume.getStorageType() == StorageType.PROVIDED) {
+      info = buildProvidedReplica();
+    } else {
+      info = buildLocalReplica();
+    }
+
+    return info;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
index 65e9ba7..3718799 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
@@ -50,6 +50,17 @@ abstract public class ReplicaInfo extends Block
       new FileIoProvider(null, null);
 
   /**
+   * Constructor.
+   * @param block a block
+   * @param vol volume where replica is located
+   * @param dir directory path where block and meta files are located
+   */
+  ReplicaInfo(Block block, FsVolumeSpi vol) {
+    this(vol, block.getBlockId(), block.getNumBytes(),
+        block.getGenerationStamp());
+  }
+
+  /**
   * Constructor
   * @param vol volume where replica is located
   * @param blockId block id
@@ -62,7 +73,14 @@ abstract public class ReplicaInfo extends Block
   }
   
   /**
-   * Get the volume where this replica is located on disk.
+   * Copy constructor.
+   * @param from where to copy from
+   */
+  ReplicaInfo(ReplicaInfo from) {
+    this(from, from.getVolume());
+  }
+
+  /**
    * @return the volume where this replica is located on disk
    */
   public FsVolumeSpi getVolume() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
index b4d5794..fb7acfd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
@@ -98,6 +98,16 @@ public class StorageLocation
 
   public boolean matchesStorageDirectory(StorageDirectory sd,
       String bpid) throws IOException {
+    if (sd.getStorageLocation().getStorageType() == StorageType.PROVIDED &&
+        storageType == StorageType.PROVIDED) {
+      return matchesStorageDirectory(sd);
+    }
+    if (sd.getStorageLocation().getStorageType() == StorageType.PROVIDED ||
+        storageType == StorageType.PROVIDED) {
+      //only one of these is PROVIDED; so it cannot be a match!
+      return false;
+    }
+    //both storage directories are local
     return this.getBpURI(bpid, Storage.STORAGE_DIR_CURRENT).normalize()
         .equals(sd.getRoot().toURI().normalize());
   }
@@ -197,6 +207,10 @@ public class StorageLocation
     if (conf == null) {
       conf = new HdfsConfiguration();
     }
+    if (storageType == StorageType.PROVIDED) {
+      //skip creation if the storage type is PROVIDED
+      return;
+    }
 
     LocalFileSystem localFS = FileSystem.getLocal(conf);
     FsPermission permission = new FsPermission(conf.get(
@@ -213,10 +227,14 @@ public class StorageLocation
 
   @Override  // Checkable
   public VolumeCheckResult check(CheckContext context) throws IOException {
-    DiskChecker.checkDir(
-        context.localFileSystem,
-        new Path(baseURI),
-        context.expectedPermission);
+    //we assume provided storage locations are always healthy,
+    //and check only for local storages.
+    if (storageType != StorageType.PROVIDED) {
+      DiskChecker.checkDir(
+          context.localFileSystem,
+          new Path(baseURI),
+          context.expectedPermission);
+    }
     return VolumeCheckResult.HEALTHY;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index d7e29cf..5a40847 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.UnexpectedReplicaStateException;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
@@ -252,8 +253,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
    * and, in case that they are not matched, update the record or mark it
    * as corrupted.
    */
-  void checkAndUpdate(String bpid, long blockId, File diskFile,
-      File diskMetaFile, FsVolumeSpi vol) throws IOException;
+  void checkAndUpdate(String bpid, ScanInfo info) throws IOException;
 
   /**
    * @param b - the block


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/29] hadoop git commit: HADOOP-14416. Path starting with 'wasb:///' not resolved correctly while authorizing with WASB-Ranger. Contributed by Sivaguru Sankaridurg

Posted by vi...@apache.org.
HADOOP-14416. Path starting with 'wasb:///' not resolved correctly while authorizing with WASB-Ranger. Contributed by Sivaguru Sankaridurg


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b415c6fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b415c6fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b415c6fe

Branch: refs/heads/HDFS-9806
Commit: b415c6fe743242acf1d1d3eb7ea7091d90d2c0d4
Parents: 8236130
Author: Mingliang Liu <li...@apache.org>
Authored: Tue May 16 11:22:11 2017 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Tue May 16 11:22:32 2017 -0700

----------------------------------------------------------------------
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  47 ++---
 .../hadoop/fs/azure/AbstractWasbTestBase.java   |   7 +-
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java |  11 ++
 .../TestNativeAzureFileSystemAuthorization.java | 170 ++++++++-----------
 .../fs/azure/TestWasbRemoteCallHelper.java      |  16 +-
 5 files changed, 116 insertions(+), 135 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b415c6fe/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 03a2782..0ba47ef 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -1433,13 +1433,19 @@ public class NativeAzureFileSystem extends FileSystem {
    * @param operation - A string describing the operation being performed ("delete", "create" etc.).
    * @param originalPath - The originalPath that was being accessed
    */
-  private void performAuthCheck(String requestingAccessForPath, WasbAuthorizationOperations accessType,
-      String operation, String originalPath) throws WasbAuthorizationException, IOException {
+  private void performAuthCheck(Path requestingAccessForPath, WasbAuthorizationOperations accessType,
+      String operation, Path originalPath) throws WasbAuthorizationException, IOException {
+
+    if (azureAuthorization && this.authorizer != null) {
+
+      requestingAccessForPath = requestingAccessForPath.makeQualified(getUri(), getWorkingDirectory());
+      originalPath = originalPath.makeQualified(getUri(), getWorkingDirectory());
+
+      if (!this.authorizer.authorize(requestingAccessForPath.toString(), accessType.toString())) {
+        throw new WasbAuthorizationException(operation
+            + " operation for Path : " + originalPath.toString() + " not allowed");
+      }
 
-    if (azureAuthorization && this.authorizer != null &&
-        !this.authorizer.authorize(requestingAccessForPath, accessType.toString())) {
-      throw new WasbAuthorizationException(operation
-          + " operation for Path : " + originalPath + " not allowed");
     }
   }
 
@@ -1466,7 +1472,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
     Path absolutePath = makeAbsolute(f);
 
-    performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.WRITE, "append", absolutePath.toString());
+    performAuthCheck(absolutePath, WasbAuthorizationOperations.WRITE, "append", absolutePath);
 
     String key = pathToKey(absolutePath);
     FileMetadata meta = null;
@@ -1671,7 +1677,7 @@ public class NativeAzureFileSystem extends FileSystem {
     Path absolutePath = makeAbsolute(f);
     Path ancestor = getAncestor(absolutePath);
 
-    performAuthCheck(ancestor.toString(), WasbAuthorizationOperations.WRITE, "create", absolutePath.toString());
+    performAuthCheck(ancestor, WasbAuthorizationOperations.WRITE, "create", absolutePath);
 
     String key = pathToKey(absolutePath);
 
@@ -1685,7 +1691,7 @@ public class NativeAzureFileSystem extends FileSystem {
         throw new FileAlreadyExistsException("File already exists:" + f);
       }
       else {
-        performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.WRITE, "create", absolutePath.toString());
+        performAuthCheck(absolutePath, WasbAuthorizationOperations.WRITE, "create", absolutePath);
       }
     }
 
@@ -1800,7 +1806,7 @@ public class NativeAzureFileSystem extends FileSystem {
     Path absolutePath = makeAbsolute(f);
     Path parentPath = absolutePath.getParent();
 
-    performAuthCheck(parentPath.toString(), WasbAuthorizationOperations.WRITE, "delete", absolutePath.toString());
+    performAuthCheck(parentPath, WasbAuthorizationOperations.WRITE, "delete", absolutePath);
 
     String key = pathToKey(absolutePath);
 
@@ -2002,14 +2008,12 @@ public class NativeAzureFileSystem extends FileSystem {
           // NOTE: Ideally the subtree needs read-write-execute access check.
           // But we will simplify it to write-access check.
           if (metaFile.isDir()) { // the absolute-path
-            performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.WRITE, "delete",
-                absolutePath.toString());
+            performAuthCheck(absolutePath, WasbAuthorizationOperations.WRITE, "delete", absolutePath);
           }
           for (FileMetadata meta : contents) {
             if (meta.isDir()) {
               Path subTreeDir = keyToPath(meta.getKey());
-              performAuthCheck(subTreeDir.toString(), WasbAuthorizationOperations.WRITE, "delete",
-                  absolutePath.toString());
+              performAuthCheck(subTreeDir, WasbAuthorizationOperations.WRITE, "delete", absolutePath);
             }
           }
         }
@@ -2090,8 +2094,7 @@ public class NativeAzureFileSystem extends FileSystem {
     // Capture the absolute path and the path to key.
     Path absolutePath = makeAbsolute(f);
 
-    performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.READ, "getFileStatus",
-        absolutePath.toString());
+    performAuthCheck(absolutePath, WasbAuthorizationOperations.READ, "getFileStatus", absolutePath);
 
     String key = pathToKey(absolutePath);
     if (key.length() == 0) { // root always exists
@@ -2192,7 +2195,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
     Path absolutePath = makeAbsolute(f);
 
-    performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.READ, "liststatus", absolutePath.toString());
+    performAuthCheck(absolutePath, WasbAuthorizationOperations.READ, "liststatus", absolutePath);
 
     String key = pathToKey(absolutePath);
     Set<FileStatus> status = new TreeSet<FileStatus>();
@@ -2436,7 +2439,7 @@ public class NativeAzureFileSystem extends FileSystem {
     Path absolutePath = makeAbsolute(f);
     Path ancestor = getAncestor(absolutePath);
 
-    performAuthCheck(ancestor.toString(), WasbAuthorizationOperations.WRITE, "mkdirs", absolutePath.toString());
+    performAuthCheck(ancestor, WasbAuthorizationOperations.WRITE, "mkdirs", absolutePath);
 
     PermissionStatus permissionStatus = null;
     if(noUmask) {
@@ -2482,7 +2485,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
     Path absolutePath = makeAbsolute(f);
 
-    performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.READ, "read", absolutePath.toString());
+    performAuthCheck(absolutePath, WasbAuthorizationOperations.READ, "read", absolutePath);
 
     String key = pathToKey(absolutePath);
     FileMetadata meta = null;
@@ -2548,8 +2551,7 @@ public class NativeAzureFileSystem extends FileSystem {
       return false;
     }
 
-    performAuthCheck(srcParentFolder.toString(), WasbAuthorizationOperations.WRITE, "rename",
-        absoluteSrcPath.toString());
+    performAuthCheck(srcParentFolder, WasbAuthorizationOperations.WRITE, "rename", absoluteSrcPath);
 
     String srcKey = pathToKey(absoluteSrcPath);
 
@@ -2562,8 +2564,7 @@ public class NativeAzureFileSystem extends FileSystem {
     Path absoluteDstPath = makeAbsolute(dst);
     Path dstParentFolder = absoluteDstPath.getParent();
 
-    performAuthCheck(dstParentFolder.toString(), WasbAuthorizationOperations.WRITE, "rename",
-        absoluteDstPath.toString());
+    performAuthCheck(dstParentFolder, WasbAuthorizationOperations.WRITE, "rename", absoluteDstPath);
 
     String dstKey = pathToKey(absoluteDstPath);
     FileMetadata dstMetadata = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b415c6fe/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
index 58d278a..6ae18fe 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
@@ -20,11 +20,9 @@ package org.apache.hadoop.fs.azure;
 
 import static org.junit.Assume.assumeNotNull;
 
-import org.apache.hadoop.fs.FileSystem;
-
+import com.google.common.annotations.VisibleForTesting;
 import org.junit.After;
 import org.junit.Before;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -40,7 +38,8 @@ public abstract class AbstractWasbTestBase {
   protected static final Logger LOG =
       LoggerFactory.getLogger(AbstractWasbTestBase.class);
 
-  protected FileSystem fs;
+  @VisibleForTesting
+  protected NativeAzureFileSystem fs;
   private AzureBlobStorageTestAccount testAccount;
 
   @Before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b415c6fe/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
index 445bfd8..0b3422c 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
@@ -23,6 +23,7 @@ import java.util.Map;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 
 /**
  * A mock wasb authorizer implementation.
@@ -32,6 +33,14 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
 
   private Map<AuthorizationComponent, Boolean> authRules;
 
+  // The full qualified URL to the root directory
+  private String qualifiedPrefixUrl;
+
+  public MockWasbAuthorizerImpl(NativeAzureFileSystem fs) {
+    qualifiedPrefixUrl = new Path("/").makeQualified(fs.getUri(), fs.getWorkingDirectory())
+        .toString().replaceAll("/$", "");
+  }
+
   @Override
   public void init(Configuration conf) {
     authRules = new HashMap<AuthorizationComponent, Boolean>();
@@ -40,6 +49,8 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
   public void addAuthRule(String wasbAbsolutePath,
       String accessType, boolean access) {
 
+    wasbAbsolutePath = qualifiedPrefixUrl + wasbAbsolutePath;
+
     AuthorizationComponent component = wasbAbsolutePath.endsWith("*")
         ? new AuthorizationComponent("^" + wasbAbsolutePath.replace("*", ".*"), accessType)
         : new AuthorizationComponent(wasbAbsolutePath, accessType);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b415c6fe/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
index 4e49622..f7a2eb7 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
@@ -78,6 +78,15 @@ public class TestNativeAzureFileSystemAuthorization
   }
 
   /**
+   * Setup the expected exception class, and exception message that the test is supposed to fail with
+   */
+  private void setExpectedFailureMessage(String operation, Path path) {
+    expectedEx.expect(WasbAuthorizationException.class);
+    expectedEx.expectMessage(String.format("%s operation for Path : %s not allowed",
+        operation, path.makeQualified(fs.getUri(), fs.getWorkingDirectory())));
+  }
+
+  /**
    * Positive test to verify Create access check
    * The file is created directly under an existing folder.
    * No intermediate folders need to be created.
@@ -86,13 +95,10 @@ public class TestNativeAzureFileSystemAuthorization
   @Test
   public void testCreateAccessWithoutCreateIntermediateFoldersCheckPositive() throws Throwable {
 
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path parentDir = new Path("/");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
@@ -117,13 +123,10 @@ public class TestNativeAzureFileSystemAuthorization
   @Test
   public void testCreateAccessWithCreateIntermediateFoldersCheckPositive() throws Throwable {
 
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path parentDir = new Path("/testCreateAccessCheckPositive/1/2/3");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
@@ -148,16 +151,12 @@ public class TestNativeAzureFileSystemAuthorization
   @Test // (expected=WasbAuthorizationException.class)
   public void testCreateAccessWithOverwriteCheckNegative() throws Throwable {
 
-    expectedEx.expect(WasbAuthorizationException.class);
-    expectedEx.expectMessage("create operation for Path : /test.dat not allowed");
-
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path parentDir = new Path("/");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    setExpectedFailureMessage("create", testPath);
+
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
@@ -184,13 +183,10 @@ public class TestNativeAzureFileSystemAuthorization
   @Test
   public void testCreateAccessWithOverwriteCheckPositive() throws Throwable {
 
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path parentDir = new Path("/");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
@@ -218,16 +214,12 @@ public class TestNativeAzureFileSystemAuthorization
   @Test // (expected=WasbAuthorizationException.class)
   public void testCreateAccessCheckNegative() throws Throwable {
 
-    expectedEx.expect(WasbAuthorizationException.class);
-    expectedEx.expectMessage("create operation for Path : /testCreateAccessCheckNegative/test.dat not allowed");
-
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path parentDir = new Path("/testCreateAccessCheckNegative");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    setExpectedFailureMessage("create", testPath);
+
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), false);
     fs.updateWasbAuthorizer(authorizer);
@@ -249,14 +241,11 @@ public class TestNativeAzureFileSystemAuthorization
   @Test
   public void testListAccessCheckPositive() throws Throwable {
 
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path parentDir = new Path("/testListAccessCheckPositive");
     Path intermediateFolders = new Path(parentDir, "1/2/3/");
     Path testPath = new Path(intermediateFolders, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
@@ -280,16 +269,12 @@ public class TestNativeAzureFileSystemAuthorization
   @Test //(expected=WasbAuthorizationException.class)
   public void testListAccessCheckNegative() throws Throwable {
 
-    expectedEx.expect(WasbAuthorizationException.class);
-    expectedEx.expectMessage("liststatus operation for Path : /testListAccessCheckNegative/test.dat not allowed");
-
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path parentDir = new Path("/testListAccessCheckNegative");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    setExpectedFailureMessage("liststatus", testPath);
+
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), false);
@@ -312,14 +297,11 @@ public class TestNativeAzureFileSystemAuthorization
   @Test
   public void testRenameAccessCheckPositive() throws Throwable {
 
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path parentDir = new Path("/testRenameAccessCheckPositive");
     Path srcPath = new Path(parentDir, "test1.dat");
     Path dstPath = new Path(parentDir, "test2.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); /* to create parentDir */
     authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true); /* for rename */
@@ -347,16 +329,13 @@ public class TestNativeAzureFileSystemAuthorization
   @Test //(expected=WasbAuthorizationException.class)
   public void testRenameAccessCheckNegative() throws Throwable {
 
-    expectedEx.expect(WasbAuthorizationException.class);
-    expectedEx.expectMessage("rename operation for Path : /testRenameAccessCheckNegative/test1.dat not allowed");
-
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
     Path parentDir = new Path("/testRenameAccessCheckNegative");
     Path srcPath = new Path(parentDir, "test1.dat");
     Path dstPath = new Path(parentDir, "test2.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    setExpectedFailureMessage("rename", srcPath);
+
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); /* to create parent dir */
     authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), false);
@@ -384,17 +363,14 @@ public class TestNativeAzureFileSystemAuthorization
   @Test //(expected=WasbAuthorizationException.class)
   public void testRenameAccessCheckNegativeOnDstFolder() throws Throwable {
 
-    expectedEx.expect(WasbAuthorizationException.class);
-    expectedEx.expectMessage("rename operation for Path : /testRenameAccessCheckNegativeDst/test2.dat not allowed");
-
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
     Path parentSrcDir = new Path("/testRenameAccessCheckNegativeSrc");
     Path srcPath = new Path(parentSrcDir, "test1.dat");
     Path parentDstDir = new Path("/testRenameAccessCheckNegativeDst");
     Path dstPath = new Path(parentDstDir, "test2.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    setExpectedFailureMessage("rename", dstPath);
+
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); /* to create parent dir */
     authorizer.addAuthRule(parentSrcDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
@@ -419,17 +395,15 @@ public class TestNativeAzureFileSystemAuthorization
    * Positive test to verify rename access check - the dstFolder allows rename
    * @throws Throwable
    */
-  @Test //(expected=WasbAuthorizationException.class)
+  @Test
   public void testRenameAccessCheckPositiveOnDstFolder() throws Throwable {
 
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
     Path parentSrcDir = new Path("/testRenameAccessCheckPositiveSrc");
     Path srcPath = new Path(parentSrcDir, "test1.dat");
     Path parentDstDir = new Path("/testRenameAccessCheckPositiveDst");
     Path dstPath = new Path(parentDstDir, "test2.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); /* to create parent dirs */
     authorizer.addAuthRule(parentSrcDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
@@ -461,12 +435,10 @@ public class TestNativeAzureFileSystemAuthorization
   @Test
   public void testReadAccessCheckPositive() throws Throwable {
 
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
     Path parentDir = new Path("/testReadAccessCheckPositive");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
@@ -504,15 +476,12 @@ public class TestNativeAzureFileSystemAuthorization
   @Test //(expected=WasbAuthorizationException.class)
   public void testReadAccessCheckNegative() throws Throwable {
 
-    expectedEx.expect(WasbAuthorizationException.class);
-    expectedEx.expectMessage("read operation for Path : /testReadAccessCheckNegative/test.dat not allowed");
-
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
     Path parentDir = new Path("/testReadAccessCheckNegative");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    setExpectedFailureMessage("read", testPath);
+
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), false);
@@ -548,13 +517,10 @@ public class TestNativeAzureFileSystemAuthorization
   @Test
   public void testFileDeleteAccessCheckPositive() throws Throwable {
 
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path parentDir = new Path("/");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
@@ -576,16 +542,12 @@ public class TestNativeAzureFileSystemAuthorization
   @Test //(expected=WasbAuthorizationException.class)
   public void testFileDeleteAccessCheckNegative() throws Throwable {
 
-    expectedEx.expect(WasbAuthorizationException.class);
-    expectedEx.expectMessage("delete operation for Path : /test.dat not allowed");
-
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path parentDir = new Path("/");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    setExpectedFailureMessage("delete", testPath);
+
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
@@ -622,13 +584,10 @@ public class TestNativeAzureFileSystemAuthorization
   @Test
   public void testFileDeleteAccessWithIntermediateFoldersCheckPositive() throws Throwable {
 
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path parentDir = new Path("/testDeleteIntermediateFolder");
     Path testPath = new Path(parentDir, "1/2/test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); // for create and delete
     authorizer.addAuthRule("/testDeleteIntermediateFolder*",
@@ -655,12 +614,9 @@ public class TestNativeAzureFileSystemAuthorization
   @Test
   public void testGetFileStatusPositive() throws Throwable {
 
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path testPath = new Path("/");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.READ.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
@@ -675,15 +631,11 @@ public class TestNativeAzureFileSystemAuthorization
   @Test //(expected=WasbAuthorizationException.class)
   public void testGetFileStatusNegative() throws Throwable {
 
-    expectedEx.expect(WasbAuthorizationException.class);
-    expectedEx.expectMessage("getFileStatus operation for Path : / not allowed");
-
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path testPath = new Path("/");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    setExpectedFailureMessage("getFileStatus", testPath);
+
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.READ.toString(), false);
     fs.updateWasbAuthorizer(authorizer);
@@ -698,12 +650,9 @@ public class TestNativeAzureFileSystemAuthorization
   @Test
   public void testMkdirsCheckPositive() throws Throwable {
 
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path testPath = new Path("/testMkdirsAccessCheckPositive/1/2/3");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
@@ -726,15 +675,11 @@ public class TestNativeAzureFileSystemAuthorization
   @Test //(expected=WasbAuthorizationException.class)
   public void testMkdirsCheckNegative() throws Throwable {
 
-    expectedEx.expect(WasbAuthorizationException.class);
-    expectedEx.expectMessage("mkdirs operation for Path : /testMkdirsAccessCheckNegative/1/2/3 not allowed");
-
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
-
     Path testPath = new Path("/testMkdirsAccessCheckNegative/1/2/3");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl();
+    setExpectedFailureMessage("mkdirs", testPath);
+
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
     authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), false);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
@@ -749,4 +694,23 @@ public class TestNativeAzureFileSystemAuthorization
       fs.delete(new Path("/testMkdirsAccessCheckNegative"), true);
     }
   }
+
+
+  /**
+   * Positive test triple slash format (wasb:///) access check
+   * @throws Throwable
+   */
+  @Test
+  public void testListStatusWithTripleSlashCheckPositive() throws Throwable {
+
+    Path testPath = new Path("/");
+
+    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
+    authorizer.init(null);
+    authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
+    fs.updateWasbAuthorizer(authorizer);
+
+    Path testPathWithTripleSlash = new Path("wasb:///" + testPath);
+    fs.listStatus(testPathWithTripleSlash);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b415c6fe/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
index d7c40b9..77be1b8 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
@@ -32,6 +32,8 @@ import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
 
 import java.io.ByteArrayInputStream;
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
 import java.nio.charset.StandardCharsets;
 
 import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE;
@@ -261,16 +263,20 @@ public class TestWasbRemoteCallHelper
     performop(mockHttpClient);
   }
 
-  private void setupExpectations() {
+  private void setupExpectations() throws UnsupportedEncodingException {
+
+    String path = new Path("/").makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
+    String pathEncoded = URLEncoder.encode(path, "UTF-8");
+
+    String requestURI = String.format("http://localhost/CHECK_AUTHORIZATION?wasb_absolute_path=%s&operation_type=write", pathEncoded);
     expectedEx.expect(WasbAuthorizationException.class);
     expectedEx.expectMessage("org.apache.hadoop.fs.azure.WasbRemoteCallException: "
-        + "http://localhost/CHECK_AUTHORIZATION?wasb_absolute_path=%2F&"
-        + "operation_type=write:Encountered IOException while making remote call");
+        + requestURI
+        + ":Encountered IOException while making remote call"
+    );
   }
 
   private void performop(HttpClient mockHttpClient) throws Throwable {
-    AzureBlobStorageTestAccount testAccount = createTestAccount();
-    NativeAzureFileSystem fs = testAccount.getFileSystem();
 
     Path testPath = new Path("/", "test.dat");
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/29] hadoop git commit: YARN-6306. NMClient API change for container upgrade. Contributed by Arun Suresh

Posted by vi...@apache.org.
YARN-6306. NMClient API change for container upgrade. Contributed by Arun Suresh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8236130b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8236130b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8236130b

Branch: refs/heads/HDFS-9806
Commit: 8236130b2c61ab0ee9b8ed747ce8cf96af7f17aa
Parents: 89a8edc
Author: Jian He <ji...@apache.org>
Authored: Tue May 16 10:48:46 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue May 16 10:48:46 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/yarn/client/api/NMClient.java |  86 ++++-
 .../yarn/client/api/async/NMClientAsync.java    |  98 ++++-
 .../api/async/impl/NMClientAsyncImpl.java       | 260 +++++++++++++-
 .../yarn/client/api/impl/NMClientImpl.java      |  91 +++++
 .../api/async/impl/TestNMClientAsync.java       | 359 +++++++++++++++----
 .../yarn/client/api/impl/TestNMClient.java      | 141 ++++++++
 .../container/ContainerImpl.java                |   9 +-
 7 files changed, 966 insertions(+), 78 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8236130b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMClient.java
index 47270f5..c1447ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMClient.java
@@ -58,6 +58,10 @@ public abstract class NMClient extends AbstractService {
     return client;
   }
 
+  protected enum UpgradeOp {
+    REINIT, RESTART, COMMIT, ROLLBACK
+  }
+
   private NMTokenCache nmTokenCache = NMTokenCache.getSingleton();
 
   @Private
@@ -79,8 +83,8 @@ public abstract class NMClient extends AbstractService {
    *                               <code>NodeManager</code> to launch the
    *                               container
    * @return a map between the auxiliary service names and their outputs
-   * @throws YarnException
-   * @throws IOException
+   * @throws YarnException YarnException.
+   * @throws IOException IOException.
    */
   public abstract Map<String, ByteBuffer> startContainer(Container container,
       ContainerLaunchContext containerLaunchContext)
@@ -95,9 +99,10 @@ public abstract class NMClient extends AbstractService {
    * {@link Container}.
    * </p>
    *
-   * @param container the container with updated token
-   * @throws YarnException
-   * @throws IOException
+   * @param container the container with updated token.
+   *
+   * @throws YarnException YarnException.
+   * @throws IOException IOException.
    */
   public abstract void increaseContainerResource(Container container)
       throws YarnException, IOException;
@@ -107,9 +112,9 @@ public abstract class NMClient extends AbstractService {
    *
    * @param containerId the Id of the started container
    * @param nodeId the Id of the <code>NodeManager</code>
-   * 
-   * @throws YarnException
-   * @throws IOException
+   *
+   * @throws YarnException YarnException.
+   * @throws IOException IOException.
    */
   public abstract void stopContainer(ContainerId containerId, NodeId nodeId)
       throws YarnException, IOException;
@@ -120,14 +125,62 @@ public abstract class NMClient extends AbstractService {
    * @param containerId the Id of the started container
    * @param nodeId the Id of the <code>NodeManager</code>
    * 
-   * @return the status of a container
-   * @throws YarnException
-   * @throws IOException
+   * @return the status of a container.
+   *
+   * @throws YarnException YarnException.
+   * @throws IOException IOException.
    */
   public abstract ContainerStatus getContainerStatus(ContainerId containerId,
       NodeId nodeId) throws YarnException, IOException;
 
   /**
+   * <p>Re-Initialize the Container.</p>
+   *
+   * @param containerId the Id of the container to Re-Initialize.
+   * @param containerLaunchContex the updated ContainerLaunchContext.
+   * @param autoCommit commit re-initialization automatically ?
+   *
+   * @throws YarnException YarnException.
+   * @throws IOException IOException.
+   */
+  public abstract void reInitializeContainer(ContainerId containerId,
+      ContainerLaunchContext containerLaunchContex, boolean autoCommit)
+      throws YarnException, IOException;
+
+  /**
+   * <p>Restart the specified container.</p>
+   *
+   * @param containerId the Id of the container to restart.
+   *
+   * @throws YarnException YarnException.
+   * @throws IOException IOException.
+   */
+  public abstract void restartContainer(ContainerId containerId)
+      throws YarnException, IOException;
+
+  /**
+   * <p>Rollback last reInitialization of the specified container.</p>
+   *
+   * @param containerId the Id of the container to restart.
+   *
+   * @throws YarnException YarnException.
+   * @throws IOException IOException.
+   */
+  public abstract void rollbackLastReInitialization(ContainerId containerId)
+      throws YarnException, IOException;
+
+  /**
+   * <p>Commit last reInitialization of the specified container.</p>
+   *
+   * @param containerId the Id of the container to commit reInitialize.
+   *
+   * @throws YarnException YarnException.
+   * @throws IOException IOException.
+   */
+  public abstract void commitLastReInitialization(ContainerId containerId)
+      throws YarnException, IOException;
+
+  /**
    * <p>Set whether the containers that are started by this client, and are
    * still running should be stopped when the client stops. By default, the
    * feature should be enabled.</p> However, containers will be stopped only  
@@ -165,4 +218,15 @@ public abstract class NMClient extends AbstractService {
     return nmTokenCache;
   }
 
+  /**
+   * Get the NodeId of the node on which container is running. It returns
+   * null if the container if container is not found or if it is not running.
+   *
+   * @param containerId Container Id of the container.
+   * @return NodeId of the container on which it is running.
+   */
+  public NodeId getNodeIdOfStartedContainer(ContainerId containerId) {
+    return null;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8236130b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/NMClientAsync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/NMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/NMClientAsync.java
index 8e90564..c94942a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/NMClientAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/NMClientAsync.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.client.api.async;
 
 import java.nio.ByteBuffer;
 import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -32,7 +31,6 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.client.api.NMClient;
 import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl;
 import org.apache.hadoop.yarn.client.api.impl.NMClientImpl;
@@ -181,6 +179,38 @@ public abstract class NMClientAsync extends AbstractService {
 
   public abstract void increaseContainerResourceAsync(Container container);
 
+  /**
+   * <p>Re-Initialize the Container.</p>
+   *
+   * @param containerId the Id of the container to Re-Initialize.
+   * @param containerLaunchContex the updated ContainerLaunchContext.
+   * @param autoCommit commit re-initialization automatically ?
+   */
+  public abstract void reInitializeContainerAsync(ContainerId containerId,
+      ContainerLaunchContext containerLaunchContex, boolean autoCommit);
+
+  /**
+   * <p>Restart the specified container.</p>
+   *
+   * @param containerId the Id of the container to restart.
+   */
+  public abstract void restartContainerAsync(ContainerId containerId);
+
+  /**
+   * <p>Rollback last reInitialization of the specified container.</p>
+   *
+   * @param containerId the Id of the container to restart.
+   */
+  public abstract void rollbackLastReInitializationAsync(
+      ContainerId containerId);
+
+  /**
+   * <p>Commit last reInitialization of the specified container.</p>
+   *
+   * @param containerId the Id of the container to commit reInitialize.
+   */
+  public abstract void commitLastReInitializationAsync(ContainerId containerId);
+
   public abstract void stopContainerAsync(
       ContainerId containerId, NodeId nodeId);
 
@@ -303,6 +333,70 @@ public abstract class NMClientAsync extends AbstractService {
      */
     public abstract void onStopContainerError(
         ContainerId containerId, Throwable t);
+
+    /**
+     * Callback for container re-initialization request.
+     *
+     * @param containerId the Id of the container to be Re-Initialized.
+     */
+    public void onContainerReInitialize(ContainerId containerId) {}
+
+    /**
+     * Callback for container restart.
+     *
+     * @param containerId the Id of the container to restart.
+     */
+    public void onContainerRestart(ContainerId containerId) {}
+
+    /**
+     * Callback for rollback of last re-initialization.
+     *
+     * @param containerId the Id of the container to restart.
+     */
+    public void onRollbackLastReInitialization(ContainerId containerId) {}
+
+    /**
+     * Callback for commit of last re-initialization.
+     *
+     * @param containerId the Id of the container to commit reInitialize.
+     */
+    public void onCommitLastReInitialization(ContainerId containerId) {}
+
+    /**
+     * Error Callback for container re-initialization request.
+     *
+     * @param containerId the Id of the container to be Re-Initialized.
+     * @param t a Throwable.
+     */
+    public void onContainerReInitializeError(ContainerId containerId,
+        Throwable t) {}
+
+    /**
+     * Error Callback for container restart.
+     *
+     * @param containerId the Id of the container to restart.
+     * @param t a Throwable.
+     *
+     */
+    public void onContainerRestartError(ContainerId containerId, Throwable t) {}
+
+    /**
+     * Error Callback for rollback of last re-initialization.
+     *
+     * @param containerId the Id of the container to restart.
+     * @param t a Throwable.
+     */
+    public void onRollbackLastReInitializationError(ContainerId containerId,
+        Throwable t) {}
+
+    /**
+     * Error Callback for commit of last re-initialization.
+     *
+     * @param containerId the Id of the container to commit reInitialize.
+     * @param t a Throwable.
+     */
+    public void onCommitLastReInitializationError(ContainerId containerId,
+        Throwable t) {}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8236130b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
index 575ce13..515a8e8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
@@ -282,6 +282,103 @@ public class NMClientAsyncImpl extends NMClientAsync {
     }
   }
 
+  @Override
+  public void reInitializeContainerAsync(ContainerId containerId,
+      ContainerLaunchContext containerLaunchContex, boolean autoCommit){
+    if (!(callbackHandler instanceof AbstractCallbackHandler)) {
+      LOG.error("Callback handler does not implement container re-initialize "
+          + "callback methods");
+      return;
+    }
+    AbstractCallbackHandler handler = (AbstractCallbackHandler) callbackHandler;
+    if (containers.get(containerId) == null) {
+      handler.onContainerReInitializeError(
+          containerId, RPCUtil.getRemoteException(
+              "Container " + containerId + " is not started"));
+    }
+    try {
+      events.put(new ReInitializeContainerEvevnt(containerId,
+          client.getNodeIdOfStartedContainer(containerId),
+          containerLaunchContex, autoCommit));
+    } catch (InterruptedException e) {
+      LOG.warn("Exception when scheduling the event of re-initializing of "
+          + "Container " + containerId);
+      handler.onContainerReInitializeError(containerId, e);
+    }
+  }
+
+  @Override
+  public void restartContainerAsync(ContainerId containerId){
+    if (!(callbackHandler instanceof AbstractCallbackHandler)) {
+      LOG.error("Callback handler does not implement container restart "
+          + "callback methods");
+      return;
+    }
+    AbstractCallbackHandler handler = (AbstractCallbackHandler) callbackHandler;
+    if (containers.get(containerId) == null) {
+      handler.onContainerRestartError(
+          containerId, RPCUtil.getRemoteException(
+              "Container " + containerId + " is not started"));
+    }
+    try {
+      events.put(new ContainerEvent(containerId,
+          client.getNodeIdOfStartedContainer(containerId),
+          null, ContainerEventType.RESTART_CONTAINER));
+    } catch (InterruptedException e) {
+      LOG.warn("Exception when scheduling the event of restart of "
+          + "Container " + containerId);
+      handler.onContainerRestartError(containerId, e);
+    }
+  }
+
+  @Override
+  public void rollbackLastReInitializationAsync(ContainerId containerId){
+    if (!(callbackHandler instanceof AbstractCallbackHandler)) {
+      LOG.error("Callback handler does not implement container rollback "
+          + "callback methods");
+      return;
+    }
+    AbstractCallbackHandler handler = (AbstractCallbackHandler) callbackHandler;
+    if (containers.get(containerId) == null) {
+      handler.onRollbackLastReInitializationError(
+          containerId, RPCUtil.getRemoteException(
+              "Container " + containerId + " is not started"));
+    }
+    try {
+      events.put(new ContainerEvent(containerId,
+          client.getNodeIdOfStartedContainer(containerId),
+          null, ContainerEventType.ROLLBACK_LAST_REINIT));
+    } catch (InterruptedException e) {
+      LOG.warn("Exception when scheduling the event Rollback re-initialization"
+          + " of Container " + containerId);
+      handler.onRollbackLastReInitializationError(containerId, e);
+    }
+  }
+
+  @Override
+  public void commitLastReInitializationAsync(ContainerId containerId){
+    if (!(callbackHandler instanceof AbstractCallbackHandler)) {
+      LOG.error("Callback handler does not implement container commit last " +
+          "re-initialization callback methods");
+      return;
+    }
+    AbstractCallbackHandler handler = (AbstractCallbackHandler) callbackHandler;
+    if (containers.get(containerId) == null) {
+      handler.onCommitLastReInitializationError(
+          containerId, RPCUtil.getRemoteException(
+              "Container " + containerId + " is not started"));
+    }
+    try {
+      events.put(new ContainerEvent(containerId,
+          client.getNodeIdOfStartedContainer(containerId),
+          null, ContainerEventType.COMMIT_LAST_REINT));
+    } catch (InterruptedException e) {
+      LOG.warn("Exception when scheduling the event Commit re-initialization"
+          + " of Container " + containerId);
+      handler.onCommitLastReInitializationError(containerId, e);
+    }
+  }
+
   public void stopContainerAsync(ContainerId containerId, NodeId nodeId) {
     if (containers.get(containerId) == null) {
       callbackHandler.onStopContainerError(containerId,
@@ -330,7 +427,11 @@ public class NMClientAsyncImpl extends NMClientAsync {
     START_CONTAINER,
     STOP_CONTAINER,
     QUERY_CONTAINER,
-    INCREASE_CONTAINER_RESOURCE
+    INCREASE_CONTAINER_RESOURCE,
+    REINITIALIZE_CONTAINER,
+    RESTART_CONTAINER,
+    ROLLBACK_LAST_REINIT,
+    COMMIT_LAST_REINT
   }
 
   protected static class ContainerEvent
@@ -381,6 +482,27 @@ public class NMClientAsyncImpl extends NMClientAsync {
     }
   }
 
+  protected static class ReInitializeContainerEvevnt extends ContainerEvent {
+    private ContainerLaunchContext containerLaunchContext;
+    private boolean autoCommit;
+
+    public ReInitializeContainerEvevnt(ContainerId containerId, NodeId nodeId,
+        ContainerLaunchContext containerLaunchContext, boolean autoCommit) {
+      super(containerId, nodeId, null,
+          ContainerEventType.REINITIALIZE_CONTAINER);
+      this.containerLaunchContext = containerLaunchContext;
+      this.autoCommit = autoCommit;
+    }
+
+    public ContainerLaunchContext getContainerLaunchContext() {
+      return containerLaunchContext;
+    }
+
+    public boolean isAutoCommit() {
+      return autoCommit;
+    }
+  }
+
   protected static class IncreaseContainerResourceEvent extends ContainerEvent {
     private Container container;
 
@@ -416,6 +538,25 @@ public class NMClientAsyncImpl extends NMClientAsync {
             .addTransition(ContainerState.RUNNING, ContainerState.RUNNING,
                 ContainerEventType.INCREASE_CONTAINER_RESOURCE,
                 new IncreaseContainerResourceTransition())
+
+            // Transitions for Container Upgrade
+            .addTransition(ContainerState.RUNNING,
+                EnumSet.of(ContainerState.RUNNING, ContainerState.FAILED),
+                ContainerEventType.REINITIALIZE_CONTAINER,
+                new ReInitializeContainerTransition())
+            .addTransition(ContainerState.RUNNING,
+                EnumSet.of(ContainerState.RUNNING, ContainerState.FAILED),
+                ContainerEventType.RESTART_CONTAINER,
+                new ReInitializeContainerTransition())
+            .addTransition(ContainerState.RUNNING,
+                EnumSet.of(ContainerState.RUNNING, ContainerState.FAILED),
+                ContainerEventType.ROLLBACK_LAST_REINIT,
+                new ReInitializeContainerTransition())
+            .addTransition(ContainerState.RUNNING,
+                EnumSet.of(ContainerState.RUNNING, ContainerState.FAILED),
+                ContainerEventType.COMMIT_LAST_REINT,
+                new ReInitializeContainerTransition())
+
             .addTransition(ContainerState.RUNNING,
                 EnumSet.of(ContainerState.DONE, ContainerState.FAILED),
                 ContainerEventType.STOP_CONTAINER,
@@ -431,6 +572,10 @@ public class NMClientAsyncImpl extends NMClientAsync {
             .addTransition(ContainerState.FAILED, ContainerState.FAILED,
                 EnumSet.of(ContainerEventType.START_CONTAINER,
                     ContainerEventType.STOP_CONTAINER,
+                    ContainerEventType.REINITIALIZE_CONTAINER,
+                    ContainerEventType.RESTART_CONTAINER,
+                    ContainerEventType.COMMIT_LAST_REINT,
+                    ContainerEventType.ROLLBACK_LAST_REINIT,
                     ContainerEventType.INCREASE_CONTAINER_RESOURCE));
 
     protected static class StartContainerTransition implements
@@ -529,6 +674,119 @@ public class NMClientAsyncImpl extends NMClientAsync {
       }
     }
 
+    protected static class ReInitializeContainerTransition implements
+        MultipleArcTransition<StatefulContainer, ContainerEvent,
+            ContainerState> {
+
+      @Override
+      public ContainerState transition(StatefulContainer container,
+          ContainerEvent containerEvent) {
+        ContainerId containerId = containerEvent.getContainerId();
+        AbstractCallbackHandler handler = (AbstractCallbackHandler) container
+                .nmClientAsync.getCallbackHandler();
+        Throwable handlerError = null;
+        try {
+          switch(containerEvent.getType()) {
+          case REINITIALIZE_CONTAINER:
+            if (!(containerEvent instanceof ReInitializeContainerEvevnt)) {
+              LOG.error("Unexpected Event.. [" +containerEvent.getType() + "]");
+              return ContainerState.FAILED;
+            }
+            ReInitializeContainerEvevnt rEvent =
+                (ReInitializeContainerEvevnt)containerEvent;
+            container.nmClientAsync.getClient().reInitializeContainer(
+                containerId, rEvent.getContainerLaunchContext(),
+                rEvent.isAutoCommit());
+            try {
+              handler.onContainerReInitialize(containerId);
+            } catch (Throwable tr) {
+              handlerError = tr;
+            }
+            break;
+          case RESTART_CONTAINER:
+            container.nmClientAsync.getClient().restartContainer(containerId);
+            try {
+              handler.onContainerRestart(containerId);
+            } catch (Throwable tr) {
+              handlerError = tr;
+            }
+            break;
+          case ROLLBACK_LAST_REINIT:
+            container.nmClientAsync.getClient()
+                .rollbackLastReInitialization(containerId);
+            try {
+              handler.onRollbackLastReInitialization(containerId);
+            } catch (Throwable tr) {
+              handlerError = tr;
+            }
+            break;
+          case COMMIT_LAST_REINT:
+            container.nmClientAsync.getClient()
+                .commitLastReInitialization(containerId);
+            try {
+              handler.onCommitLastReInitialization(containerId);
+            } catch (Throwable tr) {
+              handlerError = tr;
+            }
+            break;
+          default:
+            LOG.warn("Event of type [" + containerEvent.getType() + "] not" +
+                " expected here..");
+            break;
+          }
+          if (handlerError != null) {
+            LOG.info("Unchecked exception is thrown in handler for event ["
+                + containerEvent.getType() + "] for Container "
+                + containerId, handlerError);
+          }
+
+          return ContainerState.RUNNING;
+        } catch (Throwable t) {
+          switch(containerEvent.getType()) {
+          case REINITIALIZE_CONTAINER:
+            try {
+              handler.onContainerReInitializeError(containerId, t);
+            } catch (Throwable tr) {
+              handlerError = tr;
+            }
+            break;
+          case RESTART_CONTAINER:
+            try {
+              handler.onContainerRestartError(containerId, t);
+            } catch (Throwable tr) {
+              handlerError = tr;
+            }
+            break;
+          case ROLLBACK_LAST_REINIT:
+            try {
+              handler.onRollbackLastReInitializationError(containerId, t);
+            } catch (Throwable tr) {
+              handlerError = tr;
+            }
+            break;
+          case COMMIT_LAST_REINT:
+            try {
+              handler.onCommitLastReInitializationError(containerId, t);
+            } catch (Throwable tr) {
+              handlerError = tr;
+            }
+            break;
+          default:
+            LOG.warn("Event of type [" + containerEvent.getType() + "] not" +
+                " expected here..");
+            break;
+          }
+          if (handlerError != null) {
+            LOG.info("Unchecked exception is thrown in handler for event ["
+                + containerEvent.getType() + "] for Container "
+                + containerId, handlerError);
+          }
+        }
+
+        return ContainerState.FAILED;
+      }
+    }
+
     protected static class StopContainerTransition implements
         MultipleArcTransition<StatefulContainer, ContainerEvent,
         ContainerState> {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8236130b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java
index dc92cda..c81d448 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java
@@ -33,10 +33,13 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse;
+
+import org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
@@ -306,6 +309,84 @@ public class NMClientImpl extends NMClient {
     }
   }
 
+  @Override
+  public void reInitializeContainer(ContainerId containerId,
+      ContainerLaunchContext containerLaunchContex, boolean autoCommit)
+      throws YarnException, IOException {
+    ContainerManagementProtocolProxyData proxy = null;
+    StartedContainer container = startedContainers.get(containerId);
+    if (container != null) {
+      synchronized (container) {
+        proxy = cmProxy.getProxy(container.getNodeId().toString(), containerId);
+        try {
+          proxy.getContainerManagementProtocol().reInitializeContainer(
+              ReInitializeContainerRequest.newInstance(
+                  containerId, containerLaunchContex, autoCommit));
+        } finally {
+          if (proxy != null) {
+            cmProxy.mayBeCloseProxy(proxy);
+          }
+        }
+      }
+    } else {
+      throw new YarnException("Unknown container [" + containerId + "]");
+    }
+  }
+
+  @Override
+  public void restartContainer(ContainerId containerId)
+      throws YarnException, IOException {
+    restartCommitOrRollbackContainer(containerId, UpgradeOp.RESTART);
+  }
+
+  @Override
+  public void rollbackLastReInitialization(ContainerId containerId)
+      throws YarnException, IOException {
+    restartCommitOrRollbackContainer(containerId, UpgradeOp.ROLLBACK);
+  }
+
+  @Override
+  public void commitLastReInitialization(ContainerId containerId)
+      throws YarnException, IOException {
+    restartCommitOrRollbackContainer(containerId, UpgradeOp.COMMIT);
+  }
+
+
+  private void restartCommitOrRollbackContainer(ContainerId containerId,
+      UpgradeOp upgradeOp) throws YarnException, IOException {
+    ContainerManagementProtocolProxyData proxy = null;
+    StartedContainer container = startedContainers.get(containerId);
+    if (container != null) {
+      synchronized (container) {
+        proxy = cmProxy.getProxy(container.getNodeId().toString(), containerId);
+        ContainerManagementProtocol cmp =
+            proxy.getContainerManagementProtocol();
+        try {
+          switch (upgradeOp) {
+          case RESTART:
+            cmp.restartContainer(containerId);
+            break;
+          case COMMIT:
+            cmp.commitLastReInitialization(containerId);
+            break;
+          case ROLLBACK:
+            cmp.rollbackLastReInitialization(containerId);
+            break;
+          default:
+            // Should not happen..
+            break;
+          }
+        } finally {
+          if (proxy != null) {
+            cmProxy.mayBeCloseProxy(proxy);
+          }
+        }
+      }
+    } else {
+      throw new YarnException("Unknown container [" + containerId + "]");
+    }
+  }
+
   private void stopContainerInternal(ContainerId containerId, NodeId nodeId)
       throws IOException, YarnException {
     ContainerManagementProtocolProxyData proxy = null;
@@ -343,4 +424,14 @@ public class NMClientImpl extends NMClient {
       throw (IOException) t;
     }
   }
+
+  @Override
+  public NodeId getNodeIdOfStartedContainer(ContainerId containerId) {
+    StartedContainer container = startedContainers.get(containerId);
+    if (container != null) {
+      return container.getNodeId();
+    }
+    return null;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8236130b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java
index 48f3431..dda3eec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.client.api.async.impl;
 
 import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
@@ -27,6 +28,8 @@ import static org.mockito.Mockito.when;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
@@ -37,6 +40,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicIntegerArray;
 
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Records;
 import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
@@ -69,6 +73,22 @@ public class TestNMClientAsync {
   private NodeId nodeId;
   private Token containerToken;
 
+  enum OpsToTest {
+    START, QUERY, STOP, INCR, REINIT, RESTART, ROLLBACK, COMMIT
+  }
+
+  final static class TestData {
+    AtomicInteger success = new AtomicInteger(0);
+    AtomicInteger failure = new AtomicInteger(0);
+    final AtomicIntegerArray successArray;
+    final AtomicIntegerArray failureArray;
+
+    private TestData(int expectedSuccess, int expectedFailure) {
+      this.successArray = new AtomicIntegerArray(expectedSuccess);
+      this.failureArray = new AtomicIntegerArray(expectedFailure);
+    }
+  }
+
   @After
   public void teardown() {
     ServiceOperations.stop(asyncClient);
@@ -194,25 +214,7 @@ public class TestNMClientAsync {
     private int expectedSuccess;
     private int expectedFailure;
 
-    private AtomicInteger actualStartSuccess = new AtomicInteger(0);
-    private AtomicInteger actualStartFailure = new AtomicInteger(0);
-    private AtomicInteger actualQuerySuccess = new AtomicInteger(0);
-    private AtomicInteger actualQueryFailure = new AtomicInteger(0);
-    private AtomicInteger actualStopSuccess = new AtomicInteger(0);
-    private AtomicInteger actualStopFailure = new AtomicInteger(0);
-    private AtomicInteger actualIncreaseResourceSuccess =
-        new AtomicInteger(0);
-    private AtomicInteger actualIncreaseResourceFailure =
-        new AtomicInteger(0);
-
-    private AtomicIntegerArray actualStartSuccessArray;
-    private AtomicIntegerArray actualStartFailureArray;
-    private AtomicIntegerArray actualQuerySuccessArray;
-    private AtomicIntegerArray actualQueryFailureArray;
-    private AtomicIntegerArray actualStopSuccessArray;
-    private AtomicIntegerArray actualStopFailureArray;
-    private AtomicIntegerArray actualIncreaseResourceSuccessArray;
-    private AtomicIntegerArray actualIncreaseResourceFailureArray;
+    private final Map<OpsToTest, TestData> testMap = new HashMap<>();
 
     private Set<String> errorMsgs =
         Collections.synchronizedSet(new HashSet<String>());
@@ -221,16 +223,9 @@ public class TestNMClientAsync {
       this.expectedSuccess = expectedSuccess;
       this.expectedFailure = expectedFailure;
 
-      actualStartSuccessArray = new AtomicIntegerArray(expectedSuccess);
-      actualStartFailureArray = new AtomicIntegerArray(expectedFailure);
-      actualQuerySuccessArray = new AtomicIntegerArray(expectedSuccess);
-      actualQueryFailureArray = new AtomicIntegerArray(expectedFailure);
-      actualStopSuccessArray = new AtomicIntegerArray(expectedSuccess);
-      actualStopFailureArray = new AtomicIntegerArray(expectedFailure);
-      actualIncreaseResourceSuccessArray =
-          new AtomicIntegerArray(expectedSuccess);
-      actualIncreaseResourceFailureArray =
-          new AtomicIntegerArray(expectedFailure);
+      for (OpsToTest op : OpsToTest.values()) {
+        testMap.put(op, new TestData(expectedSuccess, expectedFailure));
+      }
     }
 
     @SuppressWarnings("deprecation")
@@ -243,8 +238,9 @@ public class TestNMClientAsync {
               " should throw the exception onContainerStarted");
           return;
         }
-        actualStartSuccess.addAndGet(1);
-        actualStartSuccessArray.set(containerId.getId(), 1);
+        TestData td = testMap.get(OpsToTest.START);
+        td.success.addAndGet(1);
+        td.successArray.set(containerId.getId(), 1);
 
         // move on to the following success tests
         asyncClient.getContainerStatusAsync(containerId, nodeId);
@@ -254,7 +250,28 @@ public class TestNMClientAsync {
         // containerId
         Container container = Container.newInstance(
             containerId, nodeId, null, null, null, containerToken);
-        asyncClient.increaseContainerResourceAsync(container);
+        int t = containerId.getId() % 5;
+        switch (t) {
+        case 0:
+          asyncClient.increaseContainerResourceAsync(container);
+          break;
+        case 1:
+          asyncClient.reInitializeContainerAsync(containerId,
+              recordFactory.newRecordInstance(ContainerLaunchContext.class),
+              true);
+          break;
+        case 2:
+          asyncClient.restartContainerAsync(containerId);
+          break;
+        case 3:
+          asyncClient.rollbackLastReInitializationAsync(containerId);
+          break;
+        case 4:
+          asyncClient.commitLastReInitializationAsync(containerId);
+          break;
+        default:
+          break;
+        }
       }
 
       // Shouldn't crash the test thread
@@ -270,8 +287,9 @@ public class TestNMClientAsync {
             " should throw the exception onContainerStatusReceived");
         return;
       }
-      actualQuerySuccess.addAndGet(1);
-      actualQuerySuccessArray.set(containerId.getId(), 1);
+      TestData td = testMap.get(OpsToTest.QUERY);
+      td.success.addAndGet(1);
+      td.successArray.set(containerId.getId(), 1);
       // move on to the following success tests
       // make sure we pass in the container with the same
       // containerId
@@ -292,8 +310,78 @@ public class TestNMClientAsync {
             " should throw the exception onContainerResourceIncreased");
         return;
       }
-      actualIncreaseResourceSuccess.addAndGet(1);
-      actualIncreaseResourceSuccessArray.set(containerId.getId(), 1);
+      TestData td = testMap.get(OpsToTest.INCR);
+      td.success.addAndGet(1);
+      td.successArray.set(containerId.getId(), 1);
+      // move on to the following success tests
+      asyncClient.reInitializeContainerAsync(containerId,
+          Records.newRecord(ContainerLaunchContext.class), true);
+      // throw a fake user exception, and shouldn't crash the test
+      throw new RuntimeException("Ignorable Exception");
+    }
+
+    @SuppressWarnings("deprecation")
+    @Override
+    public void onContainerReInitialize(ContainerId containerId) {
+      if (containerId.getId() >= expectedSuccess) {
+        errorMsgs.add("Container " + containerId +
+            " should throw the exception onContainerReInitialize");
+        return;
+      }
+      TestData td = testMap.get(OpsToTest.REINIT);
+      td.success.addAndGet(1);
+      td.successArray.set(containerId.getId(), 1);
+      // move on to the following success tests
+      asyncClient.restartContainerAsync(containerId);
+      // throw a fake user exception, and shouldn't crash the test
+      throw new RuntimeException("Ignorable Exception");
+    }
+
+    @SuppressWarnings("deprecation")
+    @Override
+    public void onContainerRestart(ContainerId containerId) {
+      if (containerId.getId() >= expectedSuccess) {
+        errorMsgs.add("Container " + containerId +
+            " should throw the exception onContainerReInitialize");
+        return;
+      }
+      TestData td = testMap.get(OpsToTest.RESTART);
+      td.success.addAndGet(1);
+      td.successArray.set(containerId.getId(), 1);
+      // move on to the following success tests
+      asyncClient.rollbackLastReInitializationAsync(containerId);
+      // throw a fake user exception, and shouldn't crash the test
+      throw new RuntimeException("Ignorable Exception");
+    }
+
+    @SuppressWarnings("deprecation")
+    @Override
+    public void onRollbackLastReInitialization(ContainerId containerId) {
+      if (containerId.getId() >= expectedSuccess) {
+        errorMsgs.add("Container " + containerId +
+            " should throw the exception onContainerReInitialize");
+        return;
+      }
+      TestData td = testMap.get(OpsToTest.ROLLBACK);
+      td.success.addAndGet(1);
+      td.successArray.set(containerId.getId(), 1);
+      // move on to the following success tests
+      asyncClient.commitLastReInitializationAsync(containerId);
+      // throw a fake user exception, and shouldn't crash the test
+      throw new RuntimeException("Ignorable Exception");
+    }
+
+    @SuppressWarnings("deprecation")
+    @Override
+    public void onCommitLastReInitialization(ContainerId containerId) {
+      if (containerId.getId() >= expectedSuccess) {
+        errorMsgs.add("Container " + containerId +
+            " should throw the exception onContainerReInitialize");
+        return;
+      }
+      TestData td = testMap.get(OpsToTest.COMMIT);
+      td.success.addAndGet(1);
+      td.successArray.set(containerId.getId(), 1);
       // move on to the following success tests
       asyncClient.stopContainerAsync(containerId, nodeId);
       // throw a fake user exception, and shouldn't crash the test
@@ -308,8 +396,9 @@ public class TestNMClientAsync {
             " should throw the exception onContainerStopped");
         return;
       }
-      actualStopSuccess.addAndGet(1);
-      actualStopSuccessArray.set(containerId.getId(), 1);
+      TestData td = testMap.get(OpsToTest.STOP);
+      td.success.addAndGet(1);
+      td.successArray.set(containerId.getId(), 1);
 
       // Shouldn't crash the test thread
       throw new RuntimeException("Ignorable Exception");
@@ -330,8 +419,9 @@ public class TestNMClientAsync {
             " shouldn't throw the exception onStartContainerError");
         return;
       }
-      actualStartFailure.addAndGet(1);
-      actualStartFailureArray.set(containerId.getId() - expectedSuccess, 1);
+      TestData td = testMap.get(OpsToTest.START);
+      td.failure.addAndGet(1);
+      td.failureArray.set(containerId.getId() - expectedSuccess, 1);
       // move on to the following failure tests
       asyncClient.getContainerStatusAsync(containerId, nodeId);
 
@@ -348,8 +438,9 @@ public class TestNMClientAsync {
             " shouldn't throw the exception onIncreaseContainerResourceError");
         return;
       }
-      actualIncreaseResourceFailure.addAndGet(1);
-      actualIncreaseResourceFailureArray.set(
+      TestData td = testMap.get(OpsToTest.INCR);
+      td.failure.addAndGet(1);
+      td.failureArray.set(
           containerId.getId() - expectedSuccess - expectedFailure, 1);
       // increase container resource error should NOT change the
       // the container status to FAILED
@@ -361,6 +452,102 @@ public class TestNMClientAsync {
 
     @SuppressWarnings("deprecation")
     @Override
+    public void onContainerReInitializeError(ContainerId containerId,
+        Throwable t) {
+      if (containerId.getId() < expectedSuccess + expectedFailure) {
+        errorMsgs.add("Container " + containerId +
+            " shouldn't throw the exception onContainerReInitializeError");
+        return;
+      }
+      TestData td = testMap.get(OpsToTest.REINIT);
+      td.failure.addAndGet(1);
+      td.failureArray.set(
+          containerId.getId() - expectedSuccess - expectedFailure, 1);
+
+      // increment the stop counters here.. since the container will fail
+      td = testMap.get(OpsToTest.STOP);
+      td.failure.addAndGet(1);
+      td.failureArray.set(
+          containerId.getId() - expectedSuccess - expectedFailure, 1);
+      // reInit container changes the container status to FAILED
+      // Shouldn't crash the test thread
+      throw new RuntimeException("Ignorable Exception");
+    }
+
+    @SuppressWarnings("deprecation")
+    @Override
+    public void onContainerRestartError(ContainerId containerId, Throwable t) {
+      if (containerId.getId() < expectedSuccess + expectedFailure) {
+        errorMsgs.add("Container " + containerId +
+            " shouldn't throw the exception onContainerRestartError");
+        return;
+      }
+      TestData td = testMap.get(OpsToTest.RESTART);
+      td.failure.addAndGet(1);
+      td.failureArray.set(
+          containerId.getId() - expectedSuccess - expectedFailure, 1);
+
+      // increment the stop counters here.. since the container will fail
+      td = testMap.get(OpsToTest.STOP);
+      td.failure.addAndGet(1);
+      td.failureArray.set(
+          containerId.getId() - expectedSuccess - expectedFailure, 1);
+      // restart container changes the container status to FAILED
+      // Shouldn't crash the test thread
+      throw new RuntimeException("Ignorable Exception");
+    }
+
+    @SuppressWarnings("deprecation")
+    @Override
+    public void onRollbackLastReInitializationError(ContainerId containerId,
+        Throwable t) {
+      if (containerId.getId() < expectedSuccess + expectedFailure) {
+        errorMsgs.add("Container " + containerId +
+            " shouldn't throw the exception" +
+            " onRollbackLastReInitializationError");
+        return;
+      }
+      TestData td = testMap.get(OpsToTest.ROLLBACK);
+      td.failure.addAndGet(1);
+      td.failureArray.set(
+          containerId.getId() - expectedSuccess - expectedFailure, 1);
+
+      // increment the stop counters here.. since the container will fail
+      td = testMap.get(OpsToTest.STOP);
+      td.failure.addAndGet(1);
+      td.failureArray.set(
+          containerId.getId() - expectedSuccess - expectedFailure, 1);
+      // rollback container changes the container status to FAILED
+      // Shouldn't crash the test thread
+      throw new RuntimeException("Ignorable Exception");
+    }
+
+    @SuppressWarnings("deprecation")
+    @Override
+    public void onCommitLastReInitializationError(ContainerId containerId,
+        Throwable t) {
+      if (containerId.getId() < expectedSuccess + expectedFailure) {
+        errorMsgs.add("Container " + containerId +
+            " shouldn't throw the exception onCommitLastReInitializationError");
+        return;
+      }
+      TestData td = testMap.get(OpsToTest.COMMIT);
+      td.failure.addAndGet(1);
+      td.failureArray.set(
+          containerId.getId() - expectedSuccess - expectedFailure, 1);
+
+      // increment the stop counters here.. since the container will fail
+      td = testMap.get(OpsToTest.STOP);
+      td.failure.addAndGet(1);
+      td.failureArray.set(
+          containerId.getId() - expectedSuccess - expectedFailure, 1);
+      // commit container changes the container status to FAILED
+      // Shouldn't crash the test thread
+      throw new RuntimeException("Ignorable Exception");
+    }
+
+    @SuppressWarnings("deprecation")
+    @Override
     public void onStopContainerError(ContainerId containerId, Throwable t) {
       if (t instanceof RuntimeException) {
         errorMsgs.add("Unexpected throwable from callback functions should be" +
@@ -371,9 +558,9 @@ public class TestNMClientAsync {
             " shouldn't throw the exception onStopContainerError");
         return;
       }
-
-      actualStopFailure.addAndGet(1);
-      actualStopFailureArray.set(
+      TestData td = testMap.get(OpsToTest.STOP);
+      td.failure.addAndGet(1);
+      td.failureArray.set(
           containerId.getId() - expectedSuccess - expectedFailure, 1);
 
       // Shouldn't crash the test thread
@@ -393,8 +580,9 @@ public class TestNMClientAsync {
             " shouldn't throw the exception onGetContainerStatusError");
         return;
       }
-      actualQueryFailure.addAndGet(1);
-      actualQueryFailureArray.set(containerId.getId() - expectedSuccess, 1);
+      TestData td = testMap.get(OpsToTest.QUERY);
+      td.failure.addAndGet(1);
+      td.failureArray.set(containerId.getId() - expectedSuccess, 1);
 
       // Shouldn't crash the test thread
       throw new RuntimeException("Ignorable Exception");
@@ -402,44 +590,67 @@ public class TestNMClientAsync {
 
     public boolean isAllSuccessCallsExecuted() {
       boolean isAllSuccessCallsExecuted =
-          actualStartSuccess.get() == expectedSuccess &&
-          actualQuerySuccess.get() == expectedSuccess &&
-          actualIncreaseResourceSuccess.get() == expectedSuccess &&
-          actualStopSuccess.get() == expectedSuccess;
+          testMap.get(OpsToTest.START).success.get() == expectedSuccess &&
+              testMap.get(OpsToTest.QUERY).success.get() == expectedSuccess &&
+              testMap.get(OpsToTest.INCR).success.get() == expectedSuccess &&
+              testMap.get(OpsToTest.REINIT).success.get() == expectedSuccess &&
+              testMap.get(OpsToTest.RESTART).success.get() == expectedSuccess &&
+              testMap.get(OpsToTest.ROLLBACK).success.get() ==
+                  expectedSuccess &&
+              testMap.get(OpsToTest.COMMIT).success.get() == expectedSuccess &&
+              testMap.get(OpsToTest.STOP).success.get() == expectedSuccess;
       if (isAllSuccessCallsExecuted) {
-        assertAtomicIntegerArray(actualStartSuccessArray);
-        assertAtomicIntegerArray(actualQuerySuccessArray);
-        assertAtomicIntegerArray(actualIncreaseResourceSuccessArray);
-        assertAtomicIntegerArray(actualStopSuccessArray);
+        assertAtomicIntegerArray(testMap.get(OpsToTest.START).successArray);
+        assertAtomicIntegerArray(testMap.get(OpsToTest.QUERY).successArray);
+        assertAtomicIntegerArray(testMap.get(OpsToTest.INCR).successArray);
+        assertAtomicIntegerArray(testMap.get(OpsToTest.REINIT).successArray);
+        assertAtomicIntegerArray(testMap.get(OpsToTest.RESTART).successArray);
+        assertAtomicIntegerArray(testMap.get(OpsToTest.ROLLBACK).successArray);
+        assertAtomicIntegerArray(testMap.get(OpsToTest.COMMIT).successArray);
+        assertAtomicIntegerArray(testMap.get(OpsToTest.STOP).successArray);
       }
       return isAllSuccessCallsExecuted;
     }
 
     public boolean isStartAndQueryFailureCallsExecuted() {
       boolean isStartAndQueryFailureCallsExecuted =
-          actualStartFailure.get() == expectedFailure &&
-          actualQueryFailure.get() == expectedFailure;
+          testMap.get(OpsToTest.START).failure.get() == expectedFailure &&
+              testMap.get(OpsToTest.QUERY).failure.get() == expectedFailure;
       if (isStartAndQueryFailureCallsExecuted) {
-        assertAtomicIntegerArray(actualStartFailureArray);
-        assertAtomicIntegerArray(actualQueryFailureArray);
+        assertAtomicIntegerArray(testMap.get(OpsToTest.START).failureArray);
+        assertAtomicIntegerArray(testMap.get(OpsToTest.QUERY).failureArray);
       }
       return isStartAndQueryFailureCallsExecuted;
     }
 
     public boolean isIncreaseResourceFailureCallsExecuted() {
       boolean isIncreaseResourceFailureCallsExecuted =
-          actualIncreaseResourceFailure.get() == expectedFailure;
+          testMap.get(OpsToTest.INCR).failure.get()
+              + testMap.get(OpsToTest.REINIT).failure.get()
+              + testMap.get(OpsToTest.RESTART).failure.get()
+              + testMap.get(OpsToTest.ROLLBACK).failure.get()
+              + testMap.get(OpsToTest.COMMIT).failure.get()
+              == expectedFailure;
       if (isIncreaseResourceFailureCallsExecuted) {
-        assertAtomicIntegerArray(actualIncreaseResourceFailureArray);
+        AtomicIntegerArray testArray =
+            new AtomicIntegerArray(
+                testMap.get(OpsToTest.INCR).failureArray.length());
+        for (int i = 0; i < testArray.length(); i++) {
+          for (OpsToTest op : EnumSet.of(OpsToTest.REINIT, OpsToTest.RESTART,
+              OpsToTest.ROLLBACK, OpsToTest.COMMIT, OpsToTest.INCR)) {
+            testArray.addAndGet(i, testMap.get(op).failureArray.get(i));
+          }
+        }
+        assertAtomicIntegerArray(testArray);
       }
       return isIncreaseResourceFailureCallsExecuted;
     }
 
     public boolean isStopFailureCallsExecuted() {
       boolean isStopFailureCallsExecuted =
-          actualStopFailure.get() == expectedFailure;
+          testMap.get(OpsToTest.STOP).failure.get() == expectedFailure;
       if (isStopFailureCallsExecuted) {
-        assertAtomicIntegerArray(actualStopFailureArray);
+        assertAtomicIntegerArray(testMap.get(OpsToTest.STOP).failureArray);
       }
       return isStopFailureCallsExecuted;
     }
@@ -464,6 +675,14 @@ public class TestNMClientAsync {
                 recordFactory.newRecordInstance(ContainerStatus.class));
         doNothing().when(client).increaseContainerResource(
             any(Container.class));
+        doNothing().when(client).reInitializeContainer(
+            any(ContainerId.class), any(ContainerLaunchContext.class),
+            anyBoolean());
+        doNothing().when(client).restartContainer(any(ContainerId.class));
+        doNothing().when(client).rollbackLastReInitialization(
+            any(ContainerId.class));
+        doNothing().when(client).commitLastReInitialization(
+            any(ContainerId.class));
         doNothing().when(client).stopContainer(any(ContainerId.class),
             any(NodeId.class));
         break;
@@ -485,9 +704,23 @@ public class TestNMClientAsync {
                 recordFactory.newRecordInstance(ContainerStatus.class));
         doThrow(RPCUtil.getRemoteException("Increase Resource Exception"))
             .when(client).increaseContainerResource(any(Container.class));
+        doThrow(RPCUtil.getRemoteException("ReInitialize Exception"))
+            .when(client).reInitializeContainer(
+            any(ContainerId.class), any(ContainerLaunchContext.class),
+            anyBoolean());
+        doThrow(RPCUtil.getRemoteException("Restart Exception"))
+            .when(client).restartContainer(any(ContainerId.class));
+        doThrow(RPCUtil.getRemoteException("Rollback upgrade Exception"))
+            .when(client).rollbackLastReInitialization(
+            any(ContainerId.class));
+        doThrow(RPCUtil.getRemoteException("Commit upgrade Exception"))
+            .when(client).commitLastReInitialization(
+            any(ContainerId.class));
         doThrow(RPCUtil.getRemoteException("Stop Exception")).when(client)
             .stopContainer(any(ContainerId.class), any(NodeId.class));
     }
+    when(client.getNodeIdOfStartedContainer(any(ContainerId.class)))
+        .thenReturn(NodeId.newInstance("localhost", 0));
     return client;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8236130b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
index d211d6d..1034f7e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -69,6 +70,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -310,6 +312,36 @@ public class TestNMClient {
             e.getMessage().contains("is not handled by this NodeManager"));
       }
 
+      // restart shouldn't be called before startContainer,
+      // otherwise, NodeManager cannot find the container
+      try {
+        nmClient.restartContainer(container.getId());
+        fail("Exception is expected");
+      } catch (YarnException e) {
+        assertTrue("The thrown exception is not expected",
+            e.getMessage().contains("Unknown container"));
+      }
+
+      // rollback shouldn't be called before startContainer,
+      // otherwise, NodeManager cannot find the container
+      try {
+        nmClient.rollbackLastReInitialization(container.getId());
+        fail("Exception is expected");
+      } catch (YarnException e) {
+        assertTrue("The thrown exception is not expected",
+            e.getMessage().contains("Unknown container"));
+      }
+
+      // commit shouldn't be called before startContainer,
+      // otherwise, NodeManager cannot find the container
+      try {
+        nmClient.commitLastReInitialization(container.getId());
+        fail("Exception is expected");
+      } catch (YarnException e) {
+        assertTrue("The thrown exception is not expected",
+            e.getMessage().contains("Unknown container"));
+      }
+
       // stopContainer shouldn't be called before startContainer,
       // otherwise, an exception will be thrown
       try {
@@ -353,6 +385,28 @@ public class TestNMClient {
         // Test increase container API and make sure requests can reach NM
         testIncreaseContainerResource(container);
 
+        testRestartContainer(container.getId());
+        testGetContainerStatus(container, i, ContainerState.RUNNING,
+            "will be Restarted", Arrays.asList(new Integer[] {-1000}));
+
+        if (i % 2 == 0) {
+          testReInitializeContainer(container.getId(), clc, false);
+          testGetContainerStatus(container, i, ContainerState.RUNNING,
+              "will be Re-initialized", Arrays.asList(new Integer[] {-1000}));
+          testRollbackContainer(container.getId(), false);
+          testGetContainerStatus(container, i, ContainerState.RUNNING,
+              "will be Rolled-back", Arrays.asList(new Integer[] {-1000}));
+          testCommitContainer(container.getId(), true);
+          testReInitializeContainer(container.getId(), clc, false);
+          testCommitContainer(container.getId(), false);
+        } else {
+          testReInitializeContainer(container.getId(), clc, true);
+          testGetContainerStatus(container, i, ContainerState.RUNNING,
+              "will be Re-initialized", Arrays.asList(new Integer[] {-1000}));
+          testRollbackContainer(container.getId(), true);
+          testCommitContainer(container.getId(), true);
+        }
+
         try {
           nmClient.stopContainer(container.getId(), container.getNodeId());
         } catch (YarnException e) {
@@ -432,4 +486,91 @@ public class TestNMClient {
       }
     }
   }
+
+  private void testRestartContainer(ContainerId containerId)
+      throws YarnException, IOException {
+    try {
+      sleep(250);
+      nmClient.restartContainer(containerId);
+      sleep(250);
+    } catch (YarnException e) {
+      // NM container will only be in SCHEDULED state, so expect the increase
+      // action to fail.
+      if (!e.getMessage().contains(
+          "can only be changed when a container is in RUNNING state")) {
+        throw (AssertionError)
+            (new AssertionError("Exception is not expected: " + e)
+                .initCause(e));
+      }
+    }
+  }
+
+  private void testRollbackContainer(ContainerId containerId,
+      boolean notRollbackable) throws YarnException, IOException {
+    try {
+      sleep(250);
+      nmClient.rollbackLastReInitialization(containerId);
+      if (notRollbackable) {
+        fail("Should not be able to rollback..");
+      }
+      sleep(250);
+    } catch (YarnException e) {
+      // NM container will only be in SCHEDULED state, so expect the increase
+      // action to fail.
+      if (notRollbackable) {
+        Assert.assertTrue(e.getMessage().contains(
+            "Nothing to rollback to"));
+      } else {
+        if (!e.getMessage().contains(
+            "can only be changed when a container is in RUNNING state")) {
+          throw (AssertionError)
+              (new AssertionError("Exception is not expected: " + e)
+                  .initCause(e));
+        }
+      }
+    }
+  }
+
+  private void testCommitContainer(ContainerId containerId,
+      boolean notCommittable) throws YarnException, IOException {
+    try {
+      nmClient.commitLastReInitialization(containerId);
+      if (notCommittable) {
+        fail("Should not be able to commit..");
+      }
+    } catch (YarnException e) {
+      // NM container will only be in SCHEDULED state, so expect the increase
+      // action to fail.
+      if (notCommittable) {
+        Assert.assertTrue(e.getMessage().contains(
+            "Nothing to Commit"));
+      } else {
+        if (!e.getMessage().contains(
+            "can only be changed when a container is in RUNNING state")) {
+          throw (AssertionError)
+              (new AssertionError("Exception is not expected: " + e)
+                  .initCause(e));
+        }
+      }
+    }
+  }
+
+  private void testReInitializeContainer(ContainerId containerId,
+      ContainerLaunchContext clc, boolean autoCommit)
+      throws YarnException, IOException {
+    try {
+      sleep(250);
+      nmClient.reInitializeContainer(containerId, clc, autoCommit);
+      sleep(250);
+    } catch (YarnException e) {
+      // NM container will only be in SCHEDULED state, so expect the increase
+      // action to fail.
+      if (!e.getMessage().contains(
+          "can only be changed when a container is in RUNNING state")) {
+        throw (AssertionError)
+            (new AssertionError("Exception is not expected: " + e)
+                .initCause(e));
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8236130b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 055e12c..46f8fa0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -22,8 +22,10 @@ import java.io.File;
 import java.io.IOException;
 import java.net.URISyntaxException;
 import java.nio.ByteBuffer;
+import java.text.SimpleDateFormat;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Date;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
@@ -137,6 +139,8 @@ public class ContainerImpl implements Container {
     }
   }
 
+  private final SimpleDateFormat dateFormat =
+      new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
   private final Lock readLock;
   private final Lock writeLock;
   private final Dispatcher dispatcher;
@@ -767,7 +771,7 @@ public class ContainerImpl implements Container {
 
   private void addDiagnostics(String... diags) {
     for (String s : diags) {
-      this.diagnostics.append(s);
+      this.diagnostics.append("[" + dateFormat.format(new Date()) + "]" + s);
     }
     if (diagnostics.length() > diagnosticsMaxSize) {
       diagnostics.delete(0, diagnostics.length() - diagnosticsMaxSize);
@@ -991,6 +995,7 @@ public class ContainerImpl implements Container {
         // We also need to make sure that if Rollback is possible, the
         // rollback state should be retained in the
         // oldLaunchContext and oldResourceSet
+        container.addDiagnostics("Container will be Restarted.\n");
         return new ReInitializationContext(
             container.launchContext, container.resourceSet,
             container.canRollback() ?
@@ -998,6 +1003,7 @@ public class ContainerImpl implements Container {
             container.canRollback() ?
                 container.reInitContext.oldResourceSet : null);
       } else {
+        container.addDiagnostics("Container will be Re-initialized.\n");
         return new ReInitializationContext(
             reInitEvent.getReInitLaunchContext(),
             reInitEvent.getResourceSet(),
@@ -1018,6 +1024,7 @@ public class ContainerImpl implements Container {
     @Override
     protected ReInitializationContext createReInitContext(ContainerImpl
         container, ContainerEvent event) {
+      container.addDiagnostics("Container upgrade will be Rolled-back.\n");
       LOG.warn("Container [" + container.getContainerId() + "]" +
           " about to be explicitly Rolledback !!");
       return container.reInitContext.createContextForRollback();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/29] hadoop git commit: HDFS-11663. [READ] Fix NullPointerException in ProvidedBlocksBuilder

Posted by vi...@apache.org.
HDFS-11663. [READ] Fix NullPointerException in ProvidedBlocksBuilder


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eee69dd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eee69dd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eee69dd3

Branch: refs/heads/HDFS-9806
Commit: eee69dd3ac2243255fb084ec162ae8d2c2c8b30b
Parents: 20f2d7f
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Thu May 4 13:06:53 2017 -0700
Committer: Virajith Jalaparti <vi...@apache.org>
Committed: Wed May 17 12:41:52 2017 -0700

----------------------------------------------------------------------
 .../blockmanagement/ProvidedStorageMap.java     | 40 ++++++-----
 .../TestNameNodeProvidedImplementation.java     | 70 +++++++++++++++-----
 2 files changed, 77 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee69dd3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
index d222344..518b7e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
@@ -134,11 +134,13 @@ public class ProvidedStorageMap {
   class ProvidedBlocksBuilder extends LocatedBlockBuilder {
 
     private ShadowDatanodeInfoWithStorage pending;
+    private boolean hasProvidedLocations;
 
     ProvidedBlocksBuilder(int maxBlocks) {
       super(maxBlocks);
       pending = new ShadowDatanodeInfoWithStorage(
           providedDescriptor, storageId);
+      hasProvidedLocations = false;
     }
 
     @Override
@@ -154,6 +156,7 @@ public class ProvidedStorageMap {
         types[i] = storages[i].getStorageType();
         if (StorageType.PROVIDED.equals(storages[i].getStorageType())) {
           locs[i] = pending;
+          hasProvidedLocations = true;
         } else {
           locs[i] = new DatanodeInfoWithStorage(
               storages[i].getDatanodeDescriptor(), sids[i], types[i]);
@@ -165,25 +168,28 @@ public class ProvidedStorageMap {
     @Override
     LocatedBlocks build(DatanodeDescriptor client) {
       // TODO: to support multiple provided storages, need to pass/maintain map
-      // set all fields of pending DatanodeInfo
-      List<String> excludedUUids = new ArrayList<String>();
-      for (LocatedBlock b: blocks) {
-        DatanodeInfo[] infos = b.getLocations();
-        StorageType[] types = b.getStorageTypes();
-
-        for (int i = 0; i < types.length; i++) {
-          if (!StorageType.PROVIDED.equals(types[i])) {
-            excludedUUids.add(infos[i].getDatanodeUuid());
+      if (hasProvidedLocations) {
+        // set all fields of pending DatanodeInfo
+        List<String> excludedUUids = new ArrayList<String>();
+        for (LocatedBlock b : blocks) {
+          DatanodeInfo[] infos = b.getLocations();
+          StorageType[] types = b.getStorageTypes();
+
+          for (int i = 0; i < types.length; i++) {
+            if (!StorageType.PROVIDED.equals(types[i])) {
+              excludedUUids.add(infos[i].getDatanodeUuid());
+            }
           }
         }
-      }
 
-      DatanodeDescriptor dn = providedDescriptor.choose(client, excludedUUids);
-      if (dn == null) {
-        dn = providedDescriptor.choose(client);
+        DatanodeDescriptor dn =
+                providedDescriptor.choose(client, excludedUUids);
+        if (dn == null) {
+          dn = providedDescriptor.choose(client);
+        }
+        pending.replaceInternal(dn);
       }
 
-      pending.replaceInternal(dn);
       return new LocatedBlocks(
           flen, isUC, blocks, last, lastComplete, feInfo, ecPolicy);
     }
@@ -278,7 +284,8 @@ public class ProvidedStorageMap {
 
     DatanodeDescriptor choose(DatanodeDescriptor client) {
       // exact match for now
-      DatanodeDescriptor dn = dns.get(client.getDatanodeUuid());
+      DatanodeDescriptor dn = client != null ?
+              dns.get(client.getDatanodeUuid()) : null;
       if (null == dn) {
         dn = chooseRandom();
       }
@@ -288,7 +295,8 @@ public class ProvidedStorageMap {
     DatanodeDescriptor choose(DatanodeDescriptor client,
         List<String> excludedUUids) {
       // exact match for now
-      DatanodeDescriptor dn = dns.get(client.getDatanodeUuid());
+      DatanodeDescriptor dn = client != null ?
+              dns.get(client.getDatanodeUuid()) : null;
 
       if (null == dn || excludedUUids.contains(client.getDatanodeUuid())) {
         dn = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee69dd3/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
index 3b75806..5062439 100644
--- a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockFormatProvider;
@@ -69,6 +70,10 @@ public class TestNameNodeProvidedImplementation {
   final Path BLOCKFILE = new Path(NNDIRPATH, "blocks.csv");
   final String SINGLEUSER = "usr1";
   final String SINGLEGROUP = "grp1";
+  private final int numFiles = 10;
+  private final String filePrefix = "file";
+  private final String fileSuffix = ".dat";
+  private final int baseFileLen = 1024;
 
   Configuration conf;
   MiniDFSCluster cluster;
@@ -114,15 +119,16 @@ public class TestNameNodeProvidedImplementation {
     }
 
     // create 10 random files under BASE
-    for (int i=0; i < 10; i++) {
-      File newFile = new File(new Path(NAMEPATH, "file" + i).toUri());
+    for (int i=0; i < numFiles; i++) {
+      File newFile = new File(
+          new Path(NAMEPATH, filePrefix + i + fileSuffix).toUri());
       if(!newFile.exists()) {
         try {
           LOG.info("Creating " + newFile.toString());
           newFile.createNewFile();
           Writer writer = new OutputStreamWriter(
               new FileOutputStream(newFile.getAbsolutePath()), "utf-8");
-          for(int j=0; j < 10*i; j++) {
+          for(int j=0; j < baseFileLen*i; j++) {
             writer.write("0");
           }
           writer.flush();
@@ -161,29 +167,30 @@ public class TestNameNodeProvidedImplementation {
 
   void startCluster(Path nspath, int numDatanodes,
       StorageType[] storageTypes,
-      StorageType[][] storageTypesPerDatanode)
+      StorageType[][] storageTypesPerDatanode,
+      boolean doFormat)
       throws IOException {
     conf.set(DFS_NAMENODE_NAME_DIR_KEY, nspath.toString());
 
     if (storageTypesPerDatanode != null) {
       cluster = new MiniDFSCluster.Builder(conf)
-          .format(false)
-          .manageNameDfsDirs(false)
+          .format(doFormat)
+          .manageNameDfsDirs(doFormat)
           .numDataNodes(numDatanodes)
           .storageTypes(storageTypesPerDatanode)
           .build();
     } else if (storageTypes != null) {
       cluster = new MiniDFSCluster.Builder(conf)
-          .format(false)
-          .manageNameDfsDirs(false)
+          .format(doFormat)
+          .manageNameDfsDirs(doFormat)
           .numDataNodes(numDatanodes)
           .storagesPerDatanode(storageTypes.length)
           .storageTypes(storageTypes)
           .build();
     } else {
       cluster = new MiniDFSCluster.Builder(conf)
-          .format(false)
-          .manageNameDfsDirs(false)
+          .format(doFormat)
+          .manageNameDfsDirs(doFormat)
           .numDataNodes(numDatanodes)
           .build();
     }
@@ -195,7 +202,8 @@ public class TestNameNodeProvidedImplementation {
     final long seed = r.nextLong();
     LOG.info("NAMEPATH: " + NAMEPATH);
     createImage(new RandomTreeWalk(seed), NNDIRPATH, FixedBlockResolver.class);
-    startCluster(NNDIRPATH, 0, new StorageType[] {StorageType.PROVIDED}, null);
+    startCluster(NNDIRPATH, 0, new StorageType[] {StorageType.PROVIDED},
+        null, false);
 
     FileSystem fs = cluster.getFileSystem();
     for (TreePath e : new RandomTreeWalk(seed)) {
@@ -220,7 +228,8 @@ public class TestNameNodeProvidedImplementation {
         SingleUGIResolver.class, UGIResolver.class);
     createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
         FixedBlockResolver.class);
-    startCluster(NNDIRPATH, 1, new StorageType[] {StorageType.PROVIDED}, null);
+    startCluster(NNDIRPATH, 1, new StorageType[] {StorageType.PROVIDED},
+        null, false);
   }
 
   @Test(timeout=500000)
@@ -232,10 +241,10 @@ public class TestNameNodeProvidedImplementation {
     // make the last Datanode with only DISK
     startCluster(NNDIRPATH, 3, null,
         new StorageType[][] {
-          {StorageType.PROVIDED},
-          {StorageType.PROVIDED},
-          {StorageType.DISK}}
-        );
+            {StorageType.PROVIDED},
+            {StorageType.PROVIDED},
+            {StorageType.DISK}},
+        false);
     // wait for the replication to finish
     Thread.sleep(50000);
 
@@ -290,7 +299,8 @@ public class TestNameNodeProvidedImplementation {
         FsUGIResolver.class, UGIResolver.class);
     createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
         FixedBlockResolver.class);
-    startCluster(NNDIRPATH, 3, new StorageType[] {StorageType.PROVIDED}, null);
+    startCluster(NNDIRPATH, 3, new StorageType[] {StorageType.PROVIDED},
+        null, false);
     FileSystem fs = cluster.getFileSystem();
     Thread.sleep(2000);
     int count = 0;
@@ -342,4 +352,30 @@ public class TestNameNodeProvidedImplementation {
       }
     }
   }
+
+  private BlockLocation[] createFile(Path path, short replication,
+      long fileLen, long blockLen) throws IOException {
+    FileSystem fs = cluster.getFileSystem();
+    //create a sample file that is not provided
+    DFSTestUtil.createFile(fs, path, false, (int) blockLen,
+        fileLen, blockLen, replication, 0, true);
+    return fs.getFileBlockLocations(path, 0, fileLen);
+  }
+
+  @Test
+  public void testClusterWithEmptyImage() throws IOException {
+    // start a cluster with 2 datanodes without any provided storage
+    startCluster(NNDIRPATH, 2, null,
+        new StorageType[][] {
+            {StorageType.DISK},
+            {StorageType.DISK}},
+        true);
+    assertTrue(cluster.isClusterUp());
+    assertTrue(cluster.isDataNodeUp());
+
+    BlockLocation[] locations = createFile(new Path("/testFile1.dat"),
+        (short) 2, 1024*1024, 1024*1024);
+    assertEquals(1, locations.length);
+    assertEquals(2, locations[0].getHosts().length);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/29] hadoop git commit: HADOOP-14412. HostsFileReader#getHostDetails is very expensive on large clusters. Contributed by Jason Lowe.

Posted by vi...@apache.org.
HADOOP-14412. HostsFileReader#getHostDetails is very expensive on large clusters. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d87a63a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d87a63a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d87a63a9

Branch: refs/heads/HDFS-9806
Commit: d87a63a9019d74a1c338c724e050952843a153e5
Parents: ec21ce4
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Wed May 17 08:26:29 2017 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Wed May 17 08:27:45 2017 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/util/HostsFileReader.java | 263 ++++++++++---------
 .../apache/hadoop/util/TestHostsFileReader.java |  19 +-
 .../resourcemanager/NodesListManager.java       |  30 +--
 3 files changed, 161 insertions(+), 151 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d87a63a9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
index 2ef1ead..2913b87 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
@@ -20,13 +20,12 @@ package org.apache.hadoop.util;
 
 import java.io.*;
 import java.nio.charset.StandardCharsets;
+import java.util.Collections;
 import java.util.Set;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
 
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
@@ -48,39 +47,26 @@ import org.xml.sax.SAXException;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Unstable
 public class HostsFileReader {
-  private Set<String> includes;
-  // exclude host list with optional timeout.
-  // If the value is null, it indicates default timeout.
-  private Map<String, Integer> excludes;
-  private String includesFile;
-  private String excludesFile;
-  private WriteLock writeLock;
-  private ReadLock readLock;
-  
   private static final Log LOG = LogFactory.getLog(HostsFileReader.class);
 
-  public HostsFileReader(String inFile, 
+  private final AtomicReference<HostDetails> current;
+
+  public HostsFileReader(String inFile,
                          String exFile) throws IOException {
-    includes = new HashSet<String>();
-    excludes = new HashMap<String, Integer>();
-    includesFile = inFile;
-    excludesFile = exFile;
-    ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
-    this.writeLock = rwLock.writeLock();
-    this.readLock = rwLock.readLock();
-    refresh();
+    HostDetails hostDetails = new HostDetails(
+        inFile, Collections.emptySet(),
+        exFile, Collections.emptyMap());
+    current = new AtomicReference<>(hostDetails);
+    refresh(inFile, exFile);
   }
 
   @Private
   public HostsFileReader(String includesFile, InputStream inFileInputStream,
       String excludesFile, InputStream exFileInputStream) throws IOException {
-    includes = new HashSet<String>();
-    excludes = new HashMap<String, Integer>();
-    this.includesFile = includesFile;
-    this.excludesFile = excludesFile;
-    ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
-    this.writeLock = rwLock.writeLock();
-    this.readLock = rwLock.readLock();
+    HostDetails hostDetails = new HostDetails(
+        includesFile, Collections.emptySet(),
+        excludesFile, Collections.emptyMap());
+    current = new AtomicReference<>(hostDetails);
     refresh(inFileInputStream, exFileInputStream);
   }
 
@@ -126,12 +112,8 @@ public class HostsFileReader {
   }
 
   public void refresh() throws IOException {
-    this.writeLock.lock();
-    try {
-      refresh(includesFile, excludesFile);
-    } finally {
-      this.writeLock.unlock();
-    }
+    HostDetails hostDetails = current.get();
+    refresh(hostDetails.includesFile, hostDetails.excludesFile);
   }
 
   public static void readFileToMap(String type,
@@ -201,128 +183,163 @@ public class HostsFileReader {
     return (nodes.getLength() == 0)? null : nodes.item(0).getTextContent();
   }
 
-  public void refresh(String includeFiles, String excludeFiles)
+  public void refresh(String includesFile, String excludesFile)
       throws IOException {
     LOG.info("Refreshing hosts (include/exclude) list");
-    this.writeLock.lock();
-    try {
-      // update instance variables
-      updateFileNames(includeFiles, excludeFiles);
-      Set<String> newIncludes = new HashSet<String>();
-      Map<String, Integer> newExcludes = new HashMap<String, Integer>();
-      boolean switchIncludes = false;
-      boolean switchExcludes = false;
-      if (includeFiles != null && !includeFiles.isEmpty()) {
-        readFileToSet("included", includeFiles, newIncludes);
-        switchIncludes = true;
-      }
-      if (excludeFiles != null && !excludeFiles.isEmpty()) {
-        readFileToMap("excluded", excludeFiles, newExcludes);
-        switchExcludes = true;
-      }
-
-      if (switchIncludes) {
-        // switch the new hosts that are to be included
-        includes = newIncludes;
-      }
-      if (switchExcludes) {
-        // switch the excluded hosts
-        excludes = newExcludes;
-      }
-    } finally {
-      this.writeLock.unlock();
+    HostDetails oldDetails = current.get();
+    Set<String> newIncludes = oldDetails.includes;
+    Map<String, Integer> newExcludes = oldDetails.excludes;
+    if (includesFile != null && !includesFile.isEmpty()) {
+      newIncludes = new HashSet<>();
+      readFileToSet("included", includesFile, newIncludes);
+      newIncludes = Collections.unmodifiableSet(newIncludes);
+    }
+    if (excludesFile != null && !excludesFile.isEmpty()) {
+      newExcludes = new HashMap<>();
+      readFileToMap("excluded", excludesFile, newExcludes);
+      newExcludes = Collections.unmodifiableMap(newExcludes);
     }
+    HostDetails newDetails = new HostDetails(includesFile, newIncludes,
+        excludesFile, newExcludes);
+    current.set(newDetails);
   }
 
   @Private
   public void refresh(InputStream inFileInputStream,
       InputStream exFileInputStream) throws IOException {
     LOG.info("Refreshing hosts (include/exclude) list");
-    this.writeLock.lock();
-    try {
-      Set<String> newIncludes = new HashSet<String>();
-      Map<String, Integer> newExcludes = new HashMap<String, Integer>();
-      boolean switchIncludes = false;
-      boolean switchExcludes = false;
-      if (inFileInputStream != null) {
-        readFileToSetWithFileInputStream("included", includesFile,
-            inFileInputStream, newIncludes);
-        switchIncludes = true;
-      }
-      if (exFileInputStream != null) {
-        readFileToMapWithFileInputStream("excluded", excludesFile,
-            exFileInputStream, newExcludes);
-        switchExcludes = true;
-      }
-      if (switchIncludes) {
-        // switch the new hosts that are to be included
-        includes = newIncludes;
-      }
-      if (switchExcludes) {
-        // switch the excluded hosts
-        excludes = newExcludes;
-      }
-    } finally {
-      this.writeLock.unlock();
+    HostDetails oldDetails = current.get();
+    Set<String> newIncludes = oldDetails.includes;
+    Map<String, Integer> newExcludes = oldDetails.excludes;
+    if (inFileInputStream != null) {
+      newIncludes = new HashSet<>();
+      readFileToSetWithFileInputStream("included", oldDetails.includesFile,
+          inFileInputStream, newIncludes);
+      newIncludes = Collections.unmodifiableSet(newIncludes);
+    }
+    if (exFileInputStream != null) {
+      newExcludes = new HashMap<>();
+      readFileToMapWithFileInputStream("excluded", oldDetails.excludesFile,
+          exFileInputStream, newExcludes);
+      newExcludes = Collections.unmodifiableMap(newExcludes);
     }
+    HostDetails newDetails = new HostDetails(
+        oldDetails.includesFile, newIncludes,
+        oldDetails.excludesFile, newExcludes);
+    current.set(newDetails);
   }
 
   public Set<String> getHosts() {
-    this.readLock.lock();
-    try {
-      return includes;
-    } finally {
-      this.readLock.unlock();
-    }
+    HostDetails hostDetails = current.get();
+    return hostDetails.getIncludedHosts();
   }
 
   public Set<String> getExcludedHosts() {
-    this.readLock.lock();
-    try {
-      return excludes.keySet();
-    } finally {
-      this.readLock.unlock();
-    }
+    HostDetails hostDetails = current.get();
+    return hostDetails.getExcludedHosts();
   }
 
+  /**
+   * Retrieve an atomic view of the included and excluded hosts.
+   *
+   * @param includes set to populate with included hosts
+   * @param excludes set to populate with excluded hosts
+   * @deprecated use {@link #getHostDetails() instead}
+   */
+  @Deprecated
   public void getHostDetails(Set<String> includes, Set<String> excludes) {
-    this.readLock.lock();
-    try {
-      includes.addAll(this.includes);
-      excludes.addAll(this.excludes.keySet());
-    } finally {
-      this.readLock.unlock();
-    }
+    HostDetails hostDetails = current.get();
+    includes.addAll(hostDetails.getIncludedHosts());
+    excludes.addAll(hostDetails.getExcludedHosts());
   }
 
+  /**
+   * Retrieve an atomic view of the included and excluded hosts.
+   *
+   * @param includeHosts set to populate with included hosts
+   * @param excludeHosts map to populate with excluded hosts
+   * @deprecated use {@link #getHostDetails() instead}
+   */
+  @Deprecated
   public void getHostDetails(Set<String> includeHosts,
                              Map<String, Integer> excludeHosts) {
-    this.readLock.lock();
-    try {
-      includeHosts.addAll(this.includes);
-      excludeHosts.putAll(this.excludes);
-    } finally {
-      this.readLock.unlock();
-    }
+    HostDetails hostDetails = current.get();
+    includeHosts.addAll(hostDetails.getIncludedHosts());
+    excludeHosts.putAll(hostDetails.getExcludedMap());
+  }
+
+  /**
+   * Retrieve an atomic view of the included and excluded hosts.
+   *
+   * @return the included and excluded hosts
+   */
+  public HostDetails getHostDetails() {
+    return current.get();
   }
 
   public void setIncludesFile(String includesFile) {
     LOG.info("Setting the includes file to " + includesFile);
-    this.includesFile = includesFile;
+    HostDetails oldDetails = current.get();
+    HostDetails newDetails = new HostDetails(includesFile, oldDetails.includes,
+        oldDetails.excludesFile, oldDetails.excludes);
+    current.set(newDetails);
   }
   
   public void setExcludesFile(String excludesFile) {
     LOG.info("Setting the excludes file to " + excludesFile);
-    this.excludesFile = excludesFile;
+    HostDetails oldDetails = current.get();
+    HostDetails newDetails = new HostDetails(
+        oldDetails.includesFile, oldDetails.includes,
+        excludesFile, oldDetails.excludes);
+    current.set(newDetails);
   }
 
-  public void updateFileNames(String includeFiles, String excludeFiles) {
-    this.writeLock.lock();
-    try {
-      setIncludesFile(includeFiles);
-      setExcludesFile(excludeFiles);
-    } finally {
-      this.writeLock.unlock();
+  public void updateFileNames(String includesFile, String excludesFile) {
+    LOG.info("Setting the includes file to " + includesFile);
+    LOG.info("Setting the excludes file to " + excludesFile);
+    HostDetails oldDetails = current.get();
+    HostDetails newDetails = new HostDetails(includesFile, oldDetails.includes,
+        excludesFile, oldDetails.excludes);
+    current.set(newDetails);
+  }
+
+  /**
+   * An atomic view of the included and excluded hosts
+   */
+  public static class HostDetails {
+    private final String includesFile;
+    private final Set<String> includes;
+    private final String excludesFile;
+    // exclude host list with optional timeout.
+    // If the value is null, it indicates default timeout.
+    private final Map<String, Integer> excludes;
+
+    HostDetails(String includesFile, Set<String> includes,
+        String excludesFile, Map<String, Integer> excludes) {
+      this.includesFile = includesFile;
+      this.includes = includes;
+      this.excludesFile = excludesFile;
+      this.excludes = excludes;
+    }
+
+    public String getIncludesFile() {
+      return includesFile;
+    }
+
+    public Set<String> getIncludedHosts() {
+      return includes;
+    }
+
+    public String getExcludesFile() {
+      return excludesFile;
+    }
+
+    public Set<String> getExcludedHosts() {
+      return excludes.keySet();
+    }
+
+    public Map<String, Integer> getExcludedMap() {
+      return excludes;
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d87a63a9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java
index 5766591..2462114 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java
@@ -20,12 +20,10 @@ package org.apache.hadoop.util;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileWriter;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Set;
 import java.util.Map;
 
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.HostsFileReader.HostDetails;
 import org.junit.*;
 
 import static org.junit.Assert.*;
@@ -121,11 +119,11 @@ public class TestHostsFileReader {
     assertTrue(hfp.getExcludedHosts().contains("node1"));
     assertTrue(hfp.getHosts().contains("node2"));
 
-    Set<String> hostsList = new HashSet<String>();
-    Set<String> excludeList = new HashSet<String>();
-    hfp.getHostDetails(hostsList, excludeList);
-    assertTrue(excludeList.contains("node1"));
-    assertTrue(hostsList.contains("node2"));
+    HostDetails hostDetails = hfp.getHostDetails();
+    assertTrue(hostDetails.getExcludedHosts().contains("node1"));
+    assertTrue(hostDetails.getIncludedHosts().contains("node2"));
+    assertEquals(newIncludesFile, hostDetails.getIncludesFile());
+    assertEquals(newExcludesFile, hostDetails.getExcludesFile());
   }
 
   /*
@@ -328,9 +326,8 @@ public class TestHostsFileReader {
     assertEquals(4, includesLen);
     assertEquals(9, excludesLen);
 
-    Set<String> includes = new HashSet<String>();
-    Map<String, Integer> excludes = new HashMap<String, Integer>();
-    hfp.getHostDetails(includes, excludes);
+    HostDetails hostDetails = hfp.getHostDetails();
+    Map<String, Integer> excludes = hostDetails.getExcludedMap();
     assertTrue(excludes.containsKey("host1"));
     assertTrue(excludes.containsKey("host2"));
     assertTrue(excludes.containsKey("host3"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d87a63a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
index 7d69f93..edd173b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
@@ -40,6 +39,7 @@ import org.apache.hadoop.net.Node;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.HostsFileReader;
+import org.apache.hadoop.util.HostsFileReader.HostDetails;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -192,14 +192,11 @@ public class NodesListManager extends CompositeService implements
         conf.get(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,
             YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH));
 
-    Set<String> hostsList = new HashSet<String>();
-    Set<String> excludeList = new HashSet<String>();
-    hostsReader.getHostDetails(hostsList, excludeList);
-
-    for (String include : hostsList) {
+    HostDetails hostDetails = hostsReader.getHostDetails();
+    for (String include : hostDetails.getIncludedHosts()) {
       LOG.debug("include: " + include);
     }
-    for (String exclude : excludeList) {
+    for (String exclude : hostDetails.getExcludedHosts()) {
       LOG.debug("exclude: " + exclude);
     }
   }
@@ -262,9 +259,9 @@ public class NodesListManager extends CompositeService implements
     // Nodes need to be decommissioned (graceful or forceful);
     List<RMNode> nodesToDecom = new ArrayList<RMNode>();
 
-    Set<String> includes = new HashSet<String>();
-    Map<String, Integer> excludes = new HashMap<String, Integer>();
-    hostsReader.getHostDetails(includes, excludes);
+    HostDetails hostDetails = hostsReader.getHostDetails();
+    Set<String> includes = hostDetails.getIncludedHosts();
+    Map<String, Integer> excludes = hostDetails.getExcludedMap();
 
     for (RMNode n : this.rmContext.getRMNodes().values()) {
       NodeState s = n.getState();
@@ -453,10 +450,9 @@ public class NodesListManager extends CompositeService implements
   }
 
   public boolean isValidNode(String hostName) {
-    Set<String> hostsList = new HashSet<String>();
-    Set<String> excludeList = new HashSet<String>();
-    hostsReader.getHostDetails(hostsList, excludeList);
-    return isValidNode(hostName, hostsList, excludeList);
+    HostDetails hostDetails = hostsReader.getHostDetails();
+    return isValidNode(hostName, hostDetails.getIncludedHosts(),
+        hostDetails.getExcludedHosts());
   }
 
   private boolean isValidNode(
@@ -563,9 +559,9 @@ public class NodesListManager extends CompositeService implements
   public boolean isUntrackedNode(String hostName) {
     String ip = resolver.resolve(hostName);
 
-    Set<String> hostsList = new HashSet<String>();
-    Set<String> excludeList = new HashSet<String>();
-    hostsReader.getHostDetails(hostsList, excludeList);
+    HostDetails hostDetails = hostsReader.getHostDetails();
+    Set<String> hostsList = hostDetails.getIncludedHosts();
+    Set<String> excludeList = hostDetails.getExcludedHosts();
 
     return !hostsList.isEmpty() && !hostsList.contains(hostName)
         && !hostsList.contains(ip) && !excludeList.contains(hostName)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/29] hadoop git commit: YARN-6580. Incorrect logger for FairSharePolicy. (Vrushali C via Haibo Chen)

Posted by vi...@apache.org.
YARN-6580. Incorrect logger for FairSharePolicy. (Vrushali C via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c35001b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c35001b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c35001b

Branch: refs/heads/HDFS-9806
Commit: 6c35001b9f93fd85a02c3465e87bfd1612f4cce9
Parents: a9e24a1
Author: Haibo Chen <ha...@cloudera.com>
Authored: Fri May 12 13:05:40 2017 -0700
Committer: Haibo Chen <ha...@cloudera.com>
Committed: Fri May 12 13:05:40 2017 -0700

----------------------------------------------------------------------
 .../resourcemanager/scheduler/fair/policies/FairSharePolicy.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c35001b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index f8cdb45..c3ec47a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -42,7 +42,7 @@ import com.google.common.annotations.VisibleForTesting;
 @Private
 @Unstable
 public class FairSharePolicy extends SchedulingPolicy {
-  private static final Log LOG = LogFactory.getLog(FifoPolicy.class);
+  private static final Log LOG = LogFactory.getLog(FairSharePolicy.class);
   @VisibleForTesting
   public static final String NAME = "fair";
   private static final DefaultResourceCalculator RESOURCE_CALCULATOR =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/29] hadoop git commit: HADOOP-14376. Memory leak when reading a compressed file using the native library. Contributed by Eli Acherkan

Posted by vi...@apache.org.
HADOOP-14376. Memory leak when reading a compressed file using the native library. Contributed by Eli Acherkan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bc21722
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bc21722
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bc21722

Branch: refs/heads/HDFS-9806
Commit: 7bc217224891b7f7f0a2e35e37e46b36d8c5309d
Parents: 6c35001
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Fri May 12 16:54:08 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Fri May 12 16:54:08 2017 -0500

----------------------------------------------------------------------
 .../apache/hadoop/io/compress/BZip2Codec.java   |  20 ++--
 .../apache/hadoop/io/compress/CodecPool.java    |  10 +-
 .../io/compress/CompressionInputStream.java     |  11 +-
 .../io/compress/CompressionOutputStream.java    |  16 ++-
 .../hadoop/io/compress/CompressorStream.java    |   3 +-
 .../hadoop/io/compress/DecompressorStream.java  |   7 +-
 .../apache/hadoop/io/compress/TestCodec.java    | 103 +++++++++++--------
 7 files changed, 102 insertions(+), 68 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bc21722/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
index 08b4d4d..331606e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
@@ -336,15 +336,11 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
     }
 
     public void close() throws IOException {
-      if (needsReset) {
-        // In the case that nothing is written to this stream, we still need to
-        // write out the header before closing, otherwise the stream won't be
-        // recognized by BZip2CompressionInputStream.
-        internalReset();
+      try {
+        super.close();
+      } finally {
+        output.close();
       }
-      this.output.flush();
-      this.output.close();
-      needsReset = true;
     }
 
   }// end of class BZip2CompressionOutputStream
@@ -454,8 +450,12 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
 
     public void close() throws IOException {
       if (!needsReset) {
-        input.close();
-        needsReset = true;
+        try {
+          input.close();
+          needsReset = true;
+        } finally {
+          super.close();
+        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bc21722/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
index bb566de..01bffa7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
@@ -157,7 +157,10 @@ public class CodecPool {
         LOG.debug("Got recycled compressor");
       }
     }
-    updateLeaseCount(compressorCounts, compressor, 1);
+    if (compressor != null &&
+        !compressor.getClass().isAnnotationPresent(DoNotPool.class)) {
+      updateLeaseCount(compressorCounts, compressor, 1);
+    }
     return compressor;
   }
   
@@ -184,7 +187,10 @@ public class CodecPool {
         LOG.debug("Got recycled decompressor");
       }
     }
-    updateLeaseCount(decompressorCounts, decompressor, 1);
+    if (decompressor != null &&
+        !decompressor.getClass().isAnnotationPresent(DoNotPool.class)) {
+      updateLeaseCount(decompressorCounts, decompressor, 1);
+    }
     return decompressor;
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bc21722/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
index cf3ac40..2dfa30b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
@@ -59,10 +59,13 @@ public abstract class CompressionInputStream extends InputStream implements Seek
 
   @Override
   public void close() throws IOException {
-    in.close();
-    if (trackedDecompressor != null) {
-      CodecPool.returnDecompressor(trackedDecompressor);
-      trackedDecompressor = null;
+    try {
+      in.close();
+    } finally {
+      if (trackedDecompressor != null) {
+        CodecPool.returnDecompressor(trackedDecompressor);
+        trackedDecompressor = null;
+      }
     }
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bc21722/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
index 00e272a..71c7f32 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
@@ -56,11 +56,17 @@ public abstract class CompressionOutputStream extends OutputStream {
 
   @Override
   public void close() throws IOException {
-    finish();
-    out.close();
-    if (trackedCompressor != null) {
-      CodecPool.returnCompressor(trackedCompressor);
-      trackedCompressor = null;
+    try {
+      finish();
+    } finally {
+      try {
+        out.close();
+      } finally {
+        if (trackedCompressor != null) {
+          CodecPool.returnCompressor(trackedCompressor);
+          trackedCompressor = null;
+        }
+      }
     }
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bc21722/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java
index 34426f8..be5eee0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java
@@ -103,10 +103,9 @@ public class CompressorStream extends CompressionOutputStream {
   public void close() throws IOException {
     if (!closed) {
       try {
-        finish();
+        super.close();
       }
       finally {
-        out.close();
         closed = true;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bc21722/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
index dab366a..756ccf3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
@@ -221,8 +221,11 @@ public class DecompressorStream extends CompressionInputStream {
   @Override
   public void close() throws IOException {
     if (!closed) {
-      in.close();
-      closed = true;
+      try {
+        super.close();
+      } finally {
+        closed = true;
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bc21722/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
index 3955aa2..1ea9dc8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
@@ -205,66 +205,83 @@ public class TestCodec {
     
     // Compress data
     DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
-    CompressionOutputStream deflateFilter = 
+    int leasedCompressorsBefore = codec.getCompressorType() == null ? -1
+        : CodecPool.getLeasedCompressorsCount(codec);
+    try (CompressionOutputStream deflateFilter =
       codec.createOutputStream(compressedDataBuffer);
-    DataOutputStream deflateOut = 
-      new DataOutputStream(new BufferedOutputStream(deflateFilter));
-    deflateOut.write(data.getData(), 0, data.getLength());
-    deflateOut.flush();
-    deflateFilter.finish();
+      DataOutputStream deflateOut =
+        new DataOutputStream(new BufferedOutputStream(deflateFilter))) {
+      deflateOut.write(data.getData(), 0, data.getLength());
+      deflateOut.flush();
+      deflateFilter.finish();
+    }
+    if (leasedCompressorsBefore > -1) {
+      assertEquals("leased compressor not returned to the codec pool",
+          leasedCompressorsBefore, CodecPool.getLeasedCompressorsCount(codec));
+    }
     LOG.info("Finished compressing data");
     
     // De-compress data
     DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
     deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0, 
                                  compressedDataBuffer.getLength());
-    CompressionInputStream inflateFilter = 
-      codec.createInputStream(deCompressedDataBuffer);
-    DataInputStream inflateIn = 
-      new DataInputStream(new BufferedInputStream(inflateFilter));
-
-    // Check
     DataInputBuffer originalData = new DataInputBuffer();
-    originalData.reset(data.getData(), 0, data.getLength());
-    DataInputStream originalIn = new DataInputStream(new BufferedInputStream(originalData));
-    for(int i=0; i < count; ++i) {
-      RandomDatum k1 = new RandomDatum();
-      RandomDatum v1 = new RandomDatum();
-      k1.readFields(originalIn);
-      v1.readFields(originalIn);
+    int leasedDecompressorsBefore =
+        CodecPool.getLeasedDecompressorsCount(codec);
+    try (CompressionInputStream inflateFilter =
+      codec.createInputStream(deCompressedDataBuffer);
+      DataInputStream inflateIn =
+        new DataInputStream(new BufferedInputStream(inflateFilter))) {
+
+      // Check
+      originalData.reset(data.getData(), 0, data.getLength());
+      DataInputStream originalIn =
+          new DataInputStream(new BufferedInputStream(originalData));
+      for(int i=0; i < count; ++i) {
+        RandomDatum k1 = new RandomDatum();
+        RandomDatum v1 = new RandomDatum();
+        k1.readFields(originalIn);
+        v1.readFields(originalIn);
       
-      RandomDatum k2 = new RandomDatum();
-      RandomDatum v2 = new RandomDatum();
-      k2.readFields(inflateIn);
-      v2.readFields(inflateIn);
-      assertTrue("original and compressed-then-decompressed-output not equal",
-                 k1.equals(k2) && v1.equals(v2));
+        RandomDatum k2 = new RandomDatum();
+        RandomDatum v2 = new RandomDatum();
+        k2.readFields(inflateIn);
+        v2.readFields(inflateIn);
+        assertTrue("original and compressed-then-decompressed-output not equal",
+                   k1.equals(k2) && v1.equals(v2));
       
-      // original and compressed-then-decompressed-output have the same hashCode
-      Map<RandomDatum, String> m = new HashMap<RandomDatum, String>();
-      m.put(k1, k1.toString());
-      m.put(v1, v1.toString());
-      String result = m.get(k2);
-      assertEquals("k1 and k2 hashcode not equal", result, k1.toString());
-      result = m.get(v2);
-      assertEquals("v1 and v2 hashcode not equal", result, v1.toString());
+        // original and compressed-then-decompressed-output have the same
+        // hashCode
+        Map<RandomDatum, String> m = new HashMap<RandomDatum, String>();
+        m.put(k1, k1.toString());
+        m.put(v1, v1.toString());
+        String result = m.get(k2);
+        assertEquals("k1 and k2 hashcode not equal", result, k1.toString());
+        result = m.get(v2);
+        assertEquals("v1 and v2 hashcode not equal", result, v1.toString());
+      }
     }
+    assertEquals("leased decompressor not returned to the codec pool",
+        leasedDecompressorsBefore,
+        CodecPool.getLeasedDecompressorsCount(codec));
 
     // De-compress data byte-at-a-time
     originalData.reset(data.getData(), 0, data.getLength());
     deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0, 
                                  compressedDataBuffer.getLength());
-    inflateFilter = 
+    try (CompressionInputStream inflateFilter =
       codec.createInputStream(deCompressedDataBuffer);
-
-    // Check
-    originalIn = new DataInputStream(new BufferedInputStream(originalData));
-    int expected;
-    do {
-      expected = originalIn.read();
-      assertEquals("Inflated stream read by byte does not match",
-        expected, inflateFilter.read());
-    } while (expected != -1);
+      DataInputStream originalIn =
+        new DataInputStream(new BufferedInputStream(originalData))) {
+
+      // Check
+      int expected;
+      do {
+        expected = originalIn.read();
+        assertEquals("Inflated stream read by byte does not match",
+            expected, inflateFilter.read());
+      } while (expected != -1);
+    }
 
     LOG.info("SUCCESS! Completed checking " + count + " records");
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/29] hadoop git commit: HDFS-11703. [READ] Tests for ProvidedStorageMap

Posted by vi...@apache.org.
HDFS-11703. [READ] Tests for ProvidedStorageMap


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d021f38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d021f38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d021f38

Branch: refs/heads/HDFS-9806
Commit: 5d021f38e393c941b84e6fa151f545725aa35e3a
Parents: eee69dd
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Thu May 4 13:14:41 2017 -0700
Committer: Virajith Jalaparti <vi...@apache.org>
Committed: Wed May 17 12:41:52 2017 -0700

----------------------------------------------------------------------
 .../blockmanagement/ProvidedStorageMap.java     |   6 +
 .../blockmanagement/TestProvidedStorageMap.java | 153 +++++++++++++++++++
 2 files changed, 159 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d021f38/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
index 518b7e9..0faf16d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
@@ -28,6 +28,7 @@ import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentSkipListMap;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -121,6 +122,11 @@ public class ProvidedStorageMap {
     return dn.getStorageInfo(s.getStorageID());
   }
 
+  @VisibleForTesting
+  public DatanodeStorageInfo getProvidedStorageInfo() {
+    return providedStorageInfo;
+  }
+
   public LocatedBlockBuilder newLocatedBlocks(int maxValue) {
     if (!providedEnabled) {
       return new LocatedBlockBuilder(maxValue);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d021f38/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
new file mode 100644
index 0000000..50e2fed
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.util.RwLock;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * This class tests the {@link ProvidedStorageMap}.
+ */
+public class TestProvidedStorageMap {
+
+  private Configuration conf;
+  private BlockManager bm;
+  private RwLock nameSystemLock;
+  private String providedStorageID;
+
+  static class TestBlockProvider extends BlockProvider
+          implements Configurable {
+
+    @Override
+    public void setConf(Configuration conf) {
+    }
+
+    @Override
+    public Configuration getConf() {
+      return null;
+    }
+
+    @Override
+    public Iterator<Block> iterator() {
+      return new Iterator<Block>() {
+        @Override
+        public boolean hasNext() {
+          return false;
+        }
+        @Override
+        public Block next() {
+          return null;
+        }
+        @Override
+        public void remove() {
+          throw new UnsupportedOperationException();
+        }
+      };
+    }
+  }
+
+  @Before
+  public void setup() {
+    providedStorageID = DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT;
+    conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID,
+            providedStorageID);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, true);
+    conf.setClass(DFSConfigKeys.DFS_NAMENODE_BLOCK_PROVIDER_CLASS,
+            TestBlockProvider.class, BlockProvider.class);
+
+    bm = mock(BlockManager.class);
+    nameSystemLock = mock(RwLock.class);
+  }
+
+  private DatanodeDescriptor createDatanodeDescriptor(int port) {
+    return DFSTestUtil.getDatanodeDescriptor("127.0.0.1", port, "defaultRack",
+            "localhost");
+  }
+
+  @Test
+  public void testProvidedStorageMap() throws IOException {
+    ProvidedStorageMap providedMap = new ProvidedStorageMap(
+            nameSystemLock, bm, conf);
+    DatanodeStorageInfo providedMapStorage =
+            providedMap.getProvidedStorageInfo();
+    //the provided storage cannot be null
+    assertNotNull(providedMapStorage);
+
+    //create a datanode
+    DatanodeDescriptor dn1 = createDatanodeDescriptor(5000);
+
+    //associate two storages to the datanode
+    DatanodeStorage dn1ProvidedStorage = new DatanodeStorage(
+            providedStorageID,
+            DatanodeStorage.State.NORMAL,
+            StorageType.PROVIDED);
+    DatanodeStorage dn1DiskStorage = new DatanodeStorage(
+            "sid-1", DatanodeStorage.State.NORMAL, StorageType.DISK);
+
+    when(nameSystemLock.hasWriteLock()).thenReturn(true);
+    DatanodeStorageInfo dns1Provided = providedMap.getStorage(dn1,
+            dn1ProvidedStorage);
+    DatanodeStorageInfo dns1Disk = providedMap.getStorage(dn1,
+            dn1DiskStorage);
+
+    assertTrue("The provided storages should be equal",
+            dns1Provided == providedMapStorage);
+    assertTrue("Disk storage has not yet been registered with block manager",
+            dns1Disk == null);
+    //add the disk storage to the datanode.
+    DatanodeStorageInfo dnsDisk = new DatanodeStorageInfo(dn1, dn1DiskStorage);
+    dn1.injectStorage(dnsDisk);
+    assertTrue("Disk storage must match the injected storage info",
+            dnsDisk == providedMap.getStorage(dn1, dn1DiskStorage));
+
+    //create a 2nd datanode
+    DatanodeDescriptor dn2 = createDatanodeDescriptor(5010);
+    //associate a provided storage with the datanode
+    DatanodeStorage dn2ProvidedStorage = new DatanodeStorage(
+            providedStorageID,
+            DatanodeStorage.State.NORMAL,
+            StorageType.PROVIDED);
+
+    DatanodeStorageInfo dns2Provided = providedMap.getStorage(
+            dn2, dn2ProvidedStorage);
+    assertTrue("The provided storages should be equal",
+            dns2Provided == providedMapStorage);
+    assertTrue("The DatanodeDescriptor should contain the provided storage",
+            dn2.getStorageInfo(providedStorageID) == providedMapStorage);
+
+
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/29] hadoop git commit: HDFS-11833. HDFS architecture documentation descibes outdated placement policy. Contributed by Chen Liang.

Posted by vi...@apache.org.
HDFS-11833. HDFS architecture documentation descibes outdated placement policy. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d1c52b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d1c52b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d1c52b4

Branch: refs/heads/HDFS-9806
Commit: 1d1c52b42feae5a4271ef4b771d0d8de43e83c15
Parents: 489f859
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue May 16 11:52:33 2017 -0400
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue May 16 11:52:55 2017 -0400

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d1c52b4/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
index bda53a9..4bf1897 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
@@ -102,7 +102,7 @@ Large HDFS instances run on a cluster of computers that commonly spread across m
 The NameNode determines the rack id each DataNode belongs to via the process outlined in [Hadoop Rack Awareness](../hadoop-common/RackAwareness.html).
 A simple but non-optimal policy is to place replicas on unique racks. This prevents losing data when an entire rack fails and allows use of bandwidth from multiple racks when reading data. This policy evenly distributes replicas in the cluster which makes it easy to balance load on component failure. However, this policy increases the cost of writes because a write needs to transfer blocks to multiple racks.
 
-For the common case, when the replication factor is three, HDFS’s placement policy is to put one replica on one node in the local rack, another on a different node in the local rack, and the last on a different node in a different rack. This policy cuts the inter-rack write traffic which generally improves write performance. The chance of rack failure is far less than that of node failure; this policy does not impact data reliability and availability guarantees. However, it does reduce the aggregate network bandwidth used when reading data since a block is placed in only two unique racks rather than three. With this policy, the replicas of a file do not evenly distribute across the racks. One third of replicas are on one node, two thirds of replicas are on one rack, and the other third are evenly distributed across the remaining racks. This policy improves write performance without compromising data reliability or read performance.
+For the common case, when the replication factor is three, HDFS’s placement policy is to put one replica on the local machine if the writer is on a datanode, otherwise on a random datanode, another replica on a node in a different (remote) rack, and the last on a different node in the same remote rack. This policy cuts the inter-rack write traffic which generally improves write performance. The chance of rack failure is far less than that of node failure; this policy does not impact data reliability and availability guarantees. However, it does reduce the aggregate network bandwidth used when reading data since a block is placed in only two unique racks rather than three. With this policy, the replicas of a file do not evenly distribute across the racks. One third of replicas are on one node, two thirds of replicas are on one rack, and the other third are evenly distributed across the remaining racks. This policy improves write performance without compromising data reliability or 
 read performance.
 
 If the replication factor is greater than 3,
 the placement of the 4th and following replicas are determined randomly


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/29] hadoop git commit: HDFS-11827. NPE is thrown when log level changed in BlockPlacementPolicyDefault#chooseRandom() method. Contributed by xupeng.

Posted by vi...@apache.org.
HDFS-11827. NPE is thrown when log level changed in BlockPlacementPolicyDefault#chooseRandom() method. Contributed by xupeng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec21ce42
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec21ce42
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec21ce42

Branch: refs/heads/HDFS-9806
Commit: ec21ce425f4e5637be716406f9d0e038921550d7
Parents: 18c494a
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue May 16 19:51:04 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Tue May 16 19:51:04 2017 -0700

----------------------------------------------------------------------
 .../hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec21ce42/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index a245f0c..a479397 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -735,7 +735,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       }
       Preconditions.checkState(excludedNodes.add(chosenNode), "chosenNode "
           + chosenNode + " is already in excludedNodes " + excludedNodes);
-      if (LOG.isDebugEnabled()) {
+      if (LOG.isDebugEnabled() && builder != null) {
         builder.append("\nNode ").append(NodeBase.getPath(chosenNode))
             .append(" [");
       }
@@ -771,7 +771,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (LOG.isDebugEnabled() && builder != null) {
           builder.append("\n]");
         }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/29] hadoop git commit: HDFS-10706. [READ] Add tool generating FSImage from external store

Posted by vi...@apache.org.
HDFS-10706. [READ] Add tool generating FSImage from external store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/616765e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/616765e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/616765e9

Branch: refs/heads/HDFS-9806
Commit: 616765e9683f122b3eb7c37e73239c7b8014ff1a
Parents: 2630e4f
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Sat Apr 15 12:15:08 2017 -0700
Committer: Virajith Jalaparti <vi...@apache.org>
Committed: Wed May 17 12:41:52 2017 -0700

----------------------------------------------------------------------
 hadoop-tools/hadoop-fs2img/pom.xml              |  87 +++
 .../hdfs/server/namenode/BlockResolver.java     |  95 +++
 .../hadoop/hdfs/server/namenode/FSTreeWalk.java | 105 ++++
 .../hdfs/server/namenode/FileSystemImage.java   | 139 +++++
 .../FixedBlockMultiReplicaResolver.java         |  44 ++
 .../server/namenode/FixedBlockResolver.java     |  93 +++
 .../hdfs/server/namenode/FsUGIResolver.java     |  58 ++
 .../hdfs/server/namenode/ImageWriter.java       | 600 +++++++++++++++++++
 .../hdfs/server/namenode/NullBlockFormat.java   |  87 +++
 .../hdfs/server/namenode/SingleUGIResolver.java |  90 +++
 .../hadoop/hdfs/server/namenode/TreePath.java   | 167 ++++++
 .../hadoop/hdfs/server/namenode/TreeWalk.java   | 103 ++++
 .../hdfs/server/namenode/UGIResolver.java       | 131 ++++
 .../hdfs/server/namenode/package-info.java      |  23 +
 .../hdfs/server/namenode/RandomTreeWalk.java    | 186 ++++++
 .../server/namenode/TestFixedBlockResolver.java | 121 ++++
 .../server/namenode/TestRandomTreeWalk.java     | 130 ++++
 .../server/namenode/TestSingleUGIResolver.java  | 148 +++++
 .../src/test/resources/log4j.properties         |  24 +
 hadoop-tools/hadoop-tools-dist/pom.xml          |   6 +
 hadoop-tools/pom.xml                            |   1 +
 21 files changed, 2438 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/pom.xml b/hadoop-tools/hadoop-fs2img/pom.xml
new file mode 100644
index 0000000..36096b7
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/pom.xml
@@ -0,0 +1,87 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-fs2img</artifactId>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <description>fs2img</description>
+  <name>fs2img</name>
+  <packaging>jar</packaging>
+
+  <properties>
+    <hadoop.log.dir>${project.build.directory}/log</hadoop.log.dir>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minicluster</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-cli</groupId>
+      <artifactId>commons-cli</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+         <configuration>
+          <archive>
+           <manifest>
+            <mainClass>org.apache.hadoop.hdfs.server.namenode.FileSystemImage</mainClass>
+           </manifest>
+         </archive>
+        </configuration>
+       </plugin>
+    </plugins>
+  </build>
+
+</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockResolver.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockResolver.java
new file mode 100644
index 0000000..94b92b8
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockResolver.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
+
+/**
+ * Given an external reference, create a sequence of blocks and associated
+ * metadata.
+ */
+public abstract class BlockResolver {
+
+  protected BlockProto buildBlock(long blockId, long bytes) {
+    return buildBlock(blockId, bytes, 1001);
+  }
+
+  protected BlockProto buildBlock(long blockId, long bytes, long genstamp) {
+    BlockProto.Builder b = BlockProto.newBuilder()
+        .setBlockId(blockId)
+        .setNumBytes(bytes)
+        .setGenStamp(genstamp);
+    return b.build();
+  }
+
+  /**
+   * @param s the external reference.
+   * @return sequence of blocks that make up the reference.
+   */
+  public Iterable<BlockProto> resolve(FileStatus s) {
+    List<Long> lengths = blockLengths(s);
+    ArrayList<BlockProto> ret = new ArrayList<>(lengths.size());
+    long tot = 0;
+    for (long l : lengths) {
+      tot += l;
+      ret.add(buildBlock(nextId(), l));
+    }
+    if (tot != s.getLen()) {
+      // log a warning?
+      throw new IllegalStateException(
+          "Expected " + s.getLen() + " found " + tot);
+    }
+    return ret;
+  }
+
+  /**
+   * @return the next block id.
+   */
+  public abstract long nextId();
+
+  /**
+   * @return the maximum sequentially allocated block ID for this filesystem.
+   */
+  protected abstract long lastId();
+
+  /**
+   * @param status the external reference.
+   * @return the lengths of the resultant blocks.
+   */
+  protected abstract List<Long> blockLengths(FileStatus status);
+
+
+  /**
+   * @param status the external reference.
+   * @return the block size to assign to this external reference.
+   */
+  public long preferredBlockSize(FileStatus status) {
+    return status.getBlockSize();
+  }
+
+  /**
+   * @param status the external reference.
+   * @return the replication to assign to this external reference.
+   */
+  public abstract int getReplication(FileStatus status);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeWalk.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeWalk.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeWalk.java
new file mode 100644
index 0000000..f736112
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeWalk.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.ConcurrentModificationException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * Traversal of an external FileSystem.
+ */
+public class FSTreeWalk extends TreeWalk {
+
+  private final Path root;
+  private final FileSystem fs;
+
+  public FSTreeWalk(Path root, Configuration conf) throws IOException {
+    this.root = root;
+    fs = root.getFileSystem(conf);
+  }
+
+  @Override
+  protected Iterable<TreePath> getChildren(TreePath path, long id,
+      TreeIterator i) {
+    // TODO symlinks
+    if (!path.getFileStatus().isDirectory()) {
+      return Collections.emptyList();
+    }
+    try {
+      ArrayList<TreePath> ret = new ArrayList<>();
+      for (FileStatus s : fs.listStatus(path.getFileStatus().getPath())) {
+        ret.add(new TreePath(s, id, i));
+      }
+      return ret;
+    } catch (FileNotFoundException e) {
+      throw new ConcurrentModificationException("FS modified");
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  class FSTreeIterator extends TreeIterator {
+
+    private FSTreeIterator() {
+    }
+
+    FSTreeIterator(TreePath p) {
+      getPendingQueue().addFirst(
+          new TreePath(p.getFileStatus(), p.getParentId(), this));
+    }
+
+    FSTreeIterator(Path p) throws IOException {
+      try {
+        FileStatus s = fs.getFileStatus(root);
+        getPendingQueue().addFirst(new TreePath(s, -1L, this));
+      } catch (FileNotFoundException e) {
+        if (p.equals(root)) {
+          throw e;
+        }
+        throw new ConcurrentModificationException("FS modified");
+      }
+    }
+
+    @Override
+    public TreeIterator fork() {
+      if (getPendingQueue().isEmpty()) {
+        return new FSTreeIterator();
+      }
+      return new FSTreeIterator(getPendingQueue().removeFirst());
+    }
+
+  }
+
+  @Override
+  public TreeIterator iterator() {
+    try {
+      return new FSTreeIterator(root);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java
new file mode 100644
index 0000000..e1e85c1
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.File;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.common.BlockFormat;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * Create FSImage from an external namespace.
+ */
+public class FileSystemImage implements Tool {
+
+  private Configuration conf;
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+    // require absolute URI to write anywhere but local
+    FileSystem.setDefaultUri(conf, new File(".").toURI().toString());
+  }
+
+  protected void printUsage() {
+    HelpFormatter formatter = new HelpFormatter();
+    formatter.printHelp("fs2img [OPTIONS] URI", new Options());
+    formatter.setSyntaxPrefix("");
+    formatter.printHelp("Options", options());
+    ToolRunner.printGenericCommandUsage(System.out);
+  }
+
+  static Options options() {
+    Options options = new Options();
+    options.addOption("o", "outdir", true, "Output directory");
+    options.addOption("u", "ugiclass", true, "UGI resolver class");
+    options.addOption("b", "blockclass", true, "Block output class");
+    options.addOption("i", "blockidclass", true, "Block resolver class");
+    options.addOption("c", "cachedirs", true, "Max active dirents");
+    options.addOption("h", "help", false, "Print usage");
+    return options;
+  }
+
+  @Override
+  public int run(String[] argv) throws Exception {
+    Options options = options();
+    CommandLineParser parser = new PosixParser();
+    CommandLine cmd;
+    try {
+      cmd = parser.parse(options, argv);
+    } catch (ParseException e) {
+      System.out.println(
+          "Error parsing command-line options: " + e.getMessage());
+      printUsage();
+      return -1;
+    }
+
+    if (cmd.hasOption("h")) {
+      printUsage();
+      return -1;
+    }
+
+    ImageWriter.Options opts =
+        ReflectionUtils.newInstance(ImageWriter.Options.class, getConf());
+    for (Option o : cmd.getOptions()) {
+      switch (o.getOpt()) {
+      case "o":
+        opts.output(o.getValue());
+        break;
+      case "u":
+        opts.ugi(Class.forName(o.getValue()).asSubclass(UGIResolver.class));
+        break;
+      case "b":
+        opts.blocks(
+            Class.forName(o.getValue()).asSubclass(BlockFormat.class));
+        break;
+      case "i":
+        opts.blockIds(
+            Class.forName(o.getValue()).asSubclass(BlockResolver.class));
+        break;
+      case "c":
+        opts.cache(Integer.parseInt(o.getValue()));
+        break;
+      default:
+        throw new UnsupportedOperationException("Internal error");
+      }
+    }
+
+    String[] rem = cmd.getArgs();
+    if (rem.length != 1) {
+      printUsage();
+      return -1;
+    }
+
+    try (ImageWriter w = new ImageWriter(opts)) {
+      for (TreePath e : new FSTreeWalk(new Path(rem[0]), getConf())) {
+        w.accept(e); // add and continue
+      }
+    }
+    return 0;
+  }
+
+  public static void main(String[] argv) throws Exception {
+    int ret = ToolRunner.run(new FileSystemImage(), argv);
+    System.exit(ret);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FixedBlockMultiReplicaResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FixedBlockMultiReplicaResolver.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FixedBlockMultiReplicaResolver.java
new file mode 100644
index 0000000..0c8ce6e
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FixedBlockMultiReplicaResolver.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+
+/**
+ * Resolver mapping all files to a configurable, uniform blocksize
+ * and replication.
+ */
+public class FixedBlockMultiReplicaResolver extends FixedBlockResolver {
+
+  public static final String REPLICATION =
+      "hdfs.image.writer.resolver.fixed.block.replication";
+
+  private int replication;
+
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    replication = conf.getInt(REPLICATION, 1);
+  }
+
+  public int getReplication(FileStatus s) {
+    return replication;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FixedBlockResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FixedBlockResolver.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FixedBlockResolver.java
new file mode 100644
index 0000000..8ff9695
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FixedBlockResolver.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+
+/**
+ * Resolver mapping all files to a configurable, uniform blocksize.
+ */
+public class FixedBlockResolver extends BlockResolver implements Configurable {
+
+  public static final String BLOCKSIZE =
+      "hdfs.image.writer.resolver.fixed.block.size";
+  public static final String START_BLOCK =
+      "hdfs.image.writer.resolver.fixed.block.start";
+
+  private Configuration conf;
+  private long blocksize = 256 * (1L << 20);
+  private final AtomicLong blockIds = new AtomicLong(0);
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+    blocksize = conf.getLong(BLOCKSIZE, 256 * (1L << 20));
+    blockIds.set(conf.getLong(START_BLOCK, (1L << 30)));
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  protected List<Long> blockLengths(FileStatus s) {
+    ArrayList<Long> ret = new ArrayList<>();
+    if (!s.isFile()) {
+      return ret;
+    }
+    if (0 == s.getLen()) {
+      // the file has length 0; so we will have one block of size 0
+      ret.add(0L);
+      return ret;
+    }
+    int nblocks = (int)((s.getLen() - 1) / blocksize) + 1;
+    for (int i = 0; i < nblocks - 1; ++i) {
+      ret.add(blocksize);
+    }
+    long rem = s.getLen() % blocksize;
+    ret.add(0 == (rem % blocksize) ? blocksize : rem);
+    return ret;
+  }
+
+  @Override
+  public long nextId() {
+    return blockIds.incrementAndGet();
+  }
+
+  @Override
+  public long lastId() {
+    return blockIds.get();
+  }
+
+  @Override
+  public long preferredBlockSize(FileStatus s) {
+    return blocksize;
+  }
+
+  @Override
+  public int getReplication(FileStatus s) {
+    return 1;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsUGIResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsUGIResolver.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsUGIResolver.java
new file mode 100644
index 0000000..ca16d96
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsUGIResolver.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * Dynamically assign ids to users/groups as they appear in the external
+ * filesystem.
+ */
+public class FsUGIResolver extends UGIResolver {
+
+  private int id;
+  private final Set<String> usernames;
+  private final Set<String> groupnames;
+
+  FsUGIResolver() {
+    super();
+    id = 0;
+    usernames = new HashSet<String>();
+    groupnames = new HashSet<String>();
+  }
+
+  @Override
+  public synchronized void addUser(String name) {
+    if (!usernames.contains(name)) {
+      addUser(name, id);
+      id++;
+      usernames.add(name);
+    }
+  }
+
+  @Override
+  public synchronized void addGroup(String name) {
+    if (!groupnames.contains(name)) {
+      addGroup(name, id);
+      id++;
+      groupnames.add(name);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
new file mode 100644
index 0000000..a3603a1
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
@@ -0,0 +1,600 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.BufferedOutputStream;
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.FilterOutputStream;
+import java.io.OutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.security.DigestOutputStream;
+import java.security.MessageDigest;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.google.common.base.Charsets;
+import com.google.protobuf.CodedOutputStream;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.common.BlockFormat;
+import org.apache.hadoop.hdfs.server.common.FileRegion;
+import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressorStream;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
+
+import static org.apache.hadoop.hdfs.server.namenode.FSImageUtil.MAGIC_HEADER;
+
+/**
+ * Utility crawling an existing hierarchical FileSystem and emitting
+ * a valid FSImage/NN storage.
+ */
+// TODO: generalize to types beyond FileRegion
+public class ImageWriter implements Closeable {
+
+  private static final int ONDISK_VERSION = 1;
+  private static final int LAYOUT_VERSION = -64; // see NameNodeLayoutVersion
+
+  private final Path outdir;
+  private final FileSystem outfs;
+  private final File dirsTmp;
+  private final OutputStream dirs;
+  private final File inodesTmp;
+  private final OutputStream inodes;
+  private final MessageDigest digest;
+  private final FSImageCompression compress;
+  private final long startBlock;
+  private final long startInode;
+  private final UGIResolver ugis;
+  private final BlockFormat.Writer<FileRegion> blocks;
+  private final BlockResolver blockIds;
+  private final Map<Long, DirEntry.Builder> dircache;
+  private final TrackedOutputStream<DigestOutputStream> raw;
+
+  private boolean closed = false;
+  private long curSec;
+  private long curBlock;
+  private final AtomicLong curInode;
+  private final FileSummary.Builder summary = FileSummary.newBuilder()
+      .setOndiskVersion(ONDISK_VERSION)
+      .setLayoutVersion(LAYOUT_VERSION);
+
+  private final String blockPoolID;
+
+  public static Options defaults() {
+    return new Options();
+  }
+
+  @SuppressWarnings("unchecked")
+  public ImageWriter(Options opts) throws IOException {
+    final OutputStream out;
+    if (null == opts.outStream) {
+      FileSystem fs = opts.outdir.getFileSystem(opts.getConf());
+      outfs = (fs instanceof LocalFileSystem)
+          ? ((LocalFileSystem)fs).getRaw()
+          : fs;
+      Path tmp = opts.outdir;
+      if (!outfs.mkdirs(tmp)) {
+        throw new IOException("Failed to create output dir: " + tmp);
+      }
+      try (NNStorage stor = new NNStorage(opts.getConf(),
+          Arrays.asList(tmp.toUri()), Arrays.asList(tmp.toUri()))) {
+        NamespaceInfo info = NNStorage.newNamespaceInfo();
+        if (info.getLayoutVersion() != LAYOUT_VERSION) {
+          throw new IllegalStateException("Incompatible layout " +
+              info.getLayoutVersion() + " (expected " + LAYOUT_VERSION);
+        }
+        stor.format(info);
+        blockPoolID = info.getBlockPoolID();
+      }
+      outdir = new Path(tmp, "current");
+      out = outfs.create(new Path(outdir, "fsimage_0000000000000000000"));
+    } else {
+      // XXX necessary? writing a NNStorage now...
+      outdir = null;
+      outfs = null;
+      out = opts.outStream;
+      blockPoolID = "";
+    }
+    digest = MD5Hash.getDigester();
+    raw = new TrackedOutputStream<>(new DigestOutputStream(
+            new BufferedOutputStream(out), digest));
+    compress = opts.compress;
+    CompressionCodec codec = compress.getImageCodec();
+    if (codec != null) {
+      summary.setCodec(codec.getClass().getCanonicalName());
+    }
+    startBlock = opts.startBlock;
+    curBlock = startBlock;
+    startInode = opts.startInode;
+    curInode = new AtomicLong(startInode);
+    dircache = Collections.synchronizedMap(new DirEntryCache(opts.maxdircache));
+
+    ugis = null == opts.ugis
+        ? ReflectionUtils.newInstance(opts.ugisClass, opts.getConf())
+        : opts.ugis;
+    BlockFormat<FileRegion> fmt = null == opts.blocks
+        ? ReflectionUtils.newInstance(opts.blockFormatClass, opts.getConf())
+        : opts.blocks;
+    blocks = fmt.getWriter(null);
+    blockIds = null == opts.blockIds
+        ? ReflectionUtils.newInstance(opts.blockIdsClass, opts.getConf())
+        : opts.blockIds;
+
+    // create directory and inode sections as side-files.
+    // The details are written to files to avoid keeping them in memory.
+    dirsTmp = File.createTempFile("fsimg_dir", null);
+    dirsTmp.deleteOnExit();
+    dirs = beginSection(new FileOutputStream(dirsTmp));
+    try {
+      inodesTmp = File.createTempFile("fsimg_inode", null);
+      inodesTmp.deleteOnExit();
+      inodes = new FileOutputStream(inodesTmp);
+    } catch (IOException e) {
+      // appropriate to close raw?
+      IOUtils.cleanup(null, raw, dirs);
+      throw e;
+    }
+
+    raw.write(MAGIC_HEADER);
+    curSec = raw.pos;
+    assert raw.pos == MAGIC_HEADER.length;
+  }
+
+  public void accept(TreePath e) throws IOException {
+    assert e.getParentId() < curInode.get();
+    // allocate ID
+    long id = curInode.getAndIncrement();
+    e.accept(id);
+    assert e.getId() < curInode.get();
+    INode n = e.toINode(ugis, blockIds, blocks, blockPoolID);
+    writeInode(n);
+
+    if (e.getParentId() > 0) {
+      // add DirEntry to map, which may page out entries
+      DirEntry.Builder de = DirEntry.newBuilder()
+          .setParent(e.getParentId())
+          .addChildren(e.getId());
+      dircache.put(e.getParentId(), de);
+    }
+  }
+
+  @SuppressWarnings("serial")
+  class DirEntryCache extends LinkedHashMap<Long, DirEntry.Builder> {
+
+    // should cache path to root, not evict LRCached
+    private final int nEntries;
+
+    DirEntryCache(int nEntries) {
+      this.nEntries = nEntries;
+    }
+
+    @Override
+    public DirEntry.Builder put(Long p, DirEntry.Builder b) {
+      DirEntry.Builder e = get(p);
+      if (null == e) {
+        return super.put(p, b);
+      }
+      //merge
+      e.addAllChildren(b.getChildrenList());
+      // not strictly conforming
+      return e;
+    }
+
+    @Override
+    protected boolean removeEldestEntry(Entry<Long, DirEntry.Builder> be) {
+      if (size() > nEntries) {
+        DirEntry d = be.getValue().build();
+        try {
+          writeDirEntry(d);
+        } catch (IOException e) {
+          throw new RuntimeException(e);
+        }
+        return true;
+      }
+      return false;
+    }
+  }
+
+  synchronized void writeInode(INode n) throws IOException {
+    n.writeDelimitedTo(inodes);
+  }
+
+  synchronized void writeDirEntry(DirEntry e) throws IOException {
+    e.writeDelimitedTo(dirs);
+  }
+
+  // from FSImageFormatProtobuf... why not just read position from the stream?
+  private static int getOndiskSize(com.google.protobuf.GeneratedMessage s) {
+    return CodedOutputStream.computeRawVarint32Size(s.getSerializedSize())
+        + s.getSerializedSize();
+  }
+
+  @Override
+  public synchronized void close() throws IOException {
+    if (closed) {
+      return;
+    }
+    for (DirEntry.Builder b : dircache.values()) {
+      DirEntry e = b.build();
+      writeDirEntry(e);
+    }
+    dircache.clear();
+
+    // close side files
+    IOUtils.cleanup(null, dirs, inodes, blocks);
+    if (null == dirs || null == inodes) {
+      // init failed
+      if (raw != null) {
+        raw.close();
+      }
+      return;
+    }
+    try {
+      writeNameSystemSection();
+      writeINodeSection();
+      writeDirSection();
+      writeStringTableSection();
+
+      // write summary directly to raw
+      FileSummary s = summary.build();
+      s.writeDelimitedTo(raw);
+      int length = getOndiskSize(s);
+      byte[] lengthBytes = new byte[4];
+      ByteBuffer.wrap(lengthBytes).asIntBuffer().put(length);
+      raw.write(lengthBytes);
+    } finally {
+      raw.close();
+    }
+    writeMD5("fsimage_0000000000000000000");
+    closed = true;
+  }
+
+  /**
+   * Write checksum for image file. Pulled from MD5Utils/internals. Awkward to
+   * reuse existing tools/utils.
+   */
+  void writeMD5(String imagename) throws IOException {
+    if (null == outdir) {
+      //LOG.warn("Not writing MD5");
+      return;
+    }
+    MD5Hash md5 = new MD5Hash(digest.digest());
+    String digestString = StringUtils.byteToHexString(md5.getDigest());
+    Path chk = new Path(outdir, imagename + ".md5");
+    try (OutputStream out = outfs.create(chk)) {
+      String md5Line = digestString + " *" + imagename + "\n";
+      out.write(md5Line.getBytes(Charsets.UTF_8));
+    }
+  }
+
+  OutputStream beginSection(OutputStream out) throws IOException {
+    CompressionCodec codec = compress.getImageCodec();
+    if (null == codec) {
+      return out;
+    }
+    return codec.createOutputStream(out);
+  }
+
+  void endSection(OutputStream out, SectionName name) throws IOException {
+    CompressionCodec codec = compress.getImageCodec();
+    if (codec != null) {
+      ((CompressorStream)out).finish();
+    }
+    out.flush();
+    long length = raw.pos - curSec;
+    summary.addSections(FileSummary.Section.newBuilder()
+        .setName(name.toString()) // not strictly correct, but name not visible
+        .setOffset(curSec).setLength(length));
+    curSec += length;
+  }
+
+  void writeNameSystemSection() throws IOException {
+    NameSystemSection.Builder b = NameSystemSection.newBuilder()
+        .setGenstampV1(1000)
+        .setGenstampV1Limit(0)
+        .setGenstampV2(1001)
+        .setLastAllocatedBlockId(blockIds.lastId())
+        .setTransactionId(0);
+    NameSystemSection s = b.build();
+
+    OutputStream sec = beginSection(raw);
+    s.writeDelimitedTo(sec);
+    endSection(sec, SectionName.NS_INFO);
+  }
+
+  void writeINodeSection() throws IOException {
+    // could reset dict to avoid compression cost in close
+    INodeSection.Builder b = INodeSection.newBuilder()
+        .setNumInodes(curInode.get() - startInode)
+        .setLastInodeId(curInode.get());
+    INodeSection s = b.build();
+
+    OutputStream sec = beginSection(raw);
+    s.writeDelimitedTo(sec);
+    // copy inodes
+    try (FileInputStream in = new FileInputStream(inodesTmp)) {
+      IOUtils.copyBytes(in, sec, 4096, false);
+    }
+    endSection(sec, SectionName.INODE);
+  }
+
+  void writeDirSection() throws IOException {
+    // No header, so dirs can be written/compressed independently
+    //INodeDirectorySection.Builder b = INodeDirectorySection.newBuilder();
+    OutputStream sec = raw;
+    // copy dirs
+    try (FileInputStream in = new FileInputStream(dirsTmp)) {
+      IOUtils.copyBytes(in, sec, 4096, false);
+    }
+    endSection(sec, SectionName.INODE_DIR);
+  }
+
+  void writeFilesUCSection() throws IOException {
+    FilesUnderConstructionSection.Builder b =
+        FilesUnderConstructionSection.newBuilder();
+    FilesUnderConstructionSection s = b.build();
+
+    OutputStream sec = beginSection(raw);
+    s.writeDelimitedTo(sec);
+    endSection(sec, SectionName.FILES_UNDERCONSTRUCTION);
+  }
+
+  void writeSnapshotDiffSection() throws IOException {
+    SnapshotDiffSection.Builder b = SnapshotDiffSection.newBuilder();
+    SnapshotDiffSection s = b.build();
+
+    OutputStream sec = beginSection(raw);
+    s.writeDelimitedTo(sec);
+    endSection(sec, SectionName.SNAPSHOT_DIFF);
+  }
+
+  void writeSecretManagerSection() throws IOException {
+    SecretManagerSection.Builder b = SecretManagerSection.newBuilder()
+        .setCurrentId(0)
+        .setTokenSequenceNumber(0);
+    SecretManagerSection s = b.build();
+
+    OutputStream sec = beginSection(raw);
+    s.writeDelimitedTo(sec);
+    endSection(sec, SectionName.SECRET_MANAGER);
+  }
+
+  void writeCacheManagerSection() throws IOException {
+    CacheManagerSection.Builder b = CacheManagerSection.newBuilder()
+        .setNumPools(0)
+        .setNumDirectives(0)
+        .setNextDirectiveId(1);
+    CacheManagerSection s = b.build();
+
+    OutputStream sec = beginSection(raw);
+    s.writeDelimitedTo(sec);
+    endSection(sec, SectionName.CACHE_MANAGER);
+  }
+
+  void writeStringTableSection() throws IOException {
+    StringTableSection.Builder b = StringTableSection.newBuilder();
+    Map<Integer, String> u = ugis.ugiMap();
+    b.setNumEntry(u.size());
+    StringTableSection s = b.build();
+
+    OutputStream sec = beginSection(raw);
+    s.writeDelimitedTo(sec);
+    for (Map.Entry<Integer, String> e : u.entrySet()) {
+      StringTableSection.Entry.Builder x =
+          StringTableSection.Entry.newBuilder()
+              .setId(e.getKey())
+              .setStr(e.getValue());
+      x.build().writeDelimitedTo(sec);
+    }
+    endSection(sec, SectionName.STRING_TABLE);
+  }
+
+  @Override
+  public synchronized String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("{ codec=\"").append(compress.getImageCodec());
+    sb.append("\", startBlock=").append(startBlock);
+    sb.append(", curBlock=").append(curBlock);
+    sb.append(", startInode=").append(startInode);
+    sb.append(", curInode=").append(curInode);
+    sb.append(", ugi=").append(ugis);
+    sb.append(", blockIds=").append(blockIds);
+    sb.append(", offset=").append(raw.pos);
+    sb.append(" }");
+    return sb.toString();
+  }
+
+  static class TrackedOutputStream<T extends OutputStream>
+      extends FilterOutputStream {
+
+    private long pos = 0L;
+
+    TrackedOutputStream(T out) {
+      super(out);
+    }
+
+    @SuppressWarnings("unchecked")
+    public T getInner() {
+      return (T) out;
+    }
+
+    @Override
+    public void write(int b) throws IOException {
+      out.write(b);
+      ++pos;
+    }
+
+    @Override
+    public void write(byte[] b) throws IOException {
+      write(b, 0, b.length);
+    }
+
+    @Override
+    public void write(byte[] b, int off, int len) throws IOException {
+      out.write(b, off, len);
+      pos += len;
+    }
+
+    @Override
+    public void flush() throws IOException {
+      super.flush();
+    }
+
+    @Override
+    public void close() throws IOException {
+      super.close();
+    }
+
+  }
+
+  /**
+   * Configurable options for image generation mapping pluggable components.
+   */
+  public static class Options implements Configurable {
+
+    public static final String START_INODE = "hdfs.image.writer.start.inode";
+    public static final String CACHE_ENTRY = "hdfs.image.writer.cache.entries";
+    public static final String UGI_CLASS   = "hdfs.image.writer.ugi.class";
+    public static final String BLOCK_RESOLVER_CLASS =
+        "hdfs.image.writer.blockresolver.class";
+
+    private Path outdir;
+    private Configuration conf;
+    private OutputStream outStream;
+    private int maxdircache;
+    private long startBlock;
+    private long startInode;
+    private UGIResolver ugis;
+    private Class<? extends UGIResolver> ugisClass;
+    private BlockFormat<FileRegion> blocks;
+
+    @SuppressWarnings("rawtypes")
+    private Class<? extends BlockFormat> blockFormatClass;
+    private BlockResolver blockIds;
+    private Class<? extends BlockResolver> blockIdsClass;
+    private FSImageCompression compress =
+        FSImageCompression.createNoopCompression();
+
+    protected Options() {
+    }
+
+    @Override
+    public void setConf(Configuration conf) {
+      this.conf = conf;
+      //long lastTxn = conf.getLong(LAST_TXN, 0L);
+      String def = new File("hdfs/name").toURI().toString();
+      outdir = new Path(conf.get(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, def));
+      startBlock = conf.getLong(FixedBlockResolver.START_BLOCK, (1L << 30) + 1);
+      startInode = conf.getLong(START_INODE, (1L << 14) + 1);
+      maxdircache = conf.getInt(CACHE_ENTRY, 100);
+      ugisClass = conf.getClass(UGI_CLASS,
+          SingleUGIResolver.class, UGIResolver.class);
+      blockFormatClass = conf.getClass(
+          DFSConfigKeys.DFS_PROVIDER_BLK_FORMAT_CLASS,
+          NullBlockFormat.class, BlockFormat.class);
+      blockIdsClass = conf.getClass(BLOCK_RESOLVER_CLASS,
+          FixedBlockResolver.class, BlockResolver.class);
+    }
+
+    @Override
+    public Configuration getConf() {
+      return conf;
+    }
+
+    public Options output(String out) {
+      this.outdir = new Path(out);
+      return this;
+    }
+
+    public Options outStream(OutputStream outStream) {
+      this.outStream = outStream;
+      return this;
+    }
+
+    public Options codec(String codec) throws IOException {
+      this.compress = FSImageCompression.createCompression(getConf(), codec);
+      return this;
+    }
+
+    public Options cache(int nDirEntries) {
+      this.maxdircache = nDirEntries;
+      return this;
+    }
+
+    public Options ugi(UGIResolver ugis) {
+      this.ugis = ugis;
+      return this;
+    }
+
+    public Options ugi(Class<? extends UGIResolver> ugisClass) {
+      this.ugisClass = ugisClass;
+      return this;
+    }
+
+    public Options blockIds(BlockResolver blockIds) {
+      this.blockIds = blockIds;
+      return this;
+    }
+
+    public Options blockIds(Class<? extends BlockResolver> blockIdsClass) {
+      this.blockIdsClass = blockIdsClass;
+      return this;
+    }
+
+    public Options blocks(BlockFormat<FileRegion> blocks) {
+      this.blocks = blocks;
+      return this;
+    }
+
+    @SuppressWarnings("rawtypes")
+    public Options blocks(Class<? extends BlockFormat> blocksClass) {
+      this.blockFormatClass = blocksClass;
+      return this;
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/NullBlockFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/NullBlockFormat.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/NullBlockFormat.java
new file mode 100644
index 0000000..aabdf74
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/NullBlockFormat.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.BlockFormat;
+import org.apache.hadoop.hdfs.server.common.BlockFormat.Reader.Options;
+import org.apache.hadoop.hdfs.server.common.FileRegion;
+
+/**
+ * Null sink for region information emitted from FSImage.
+ */
+public class NullBlockFormat extends BlockFormat<FileRegion> {
+
+  @Override
+  public Reader<FileRegion> getReader(Options opts) throws IOException {
+    return new Reader<FileRegion>() {
+      @Override
+      public Iterator<FileRegion> iterator() {
+        return new Iterator<FileRegion>() {
+          @Override
+          public boolean hasNext() {
+            return false;
+          }
+          @Override
+          public FileRegion next() {
+            throw new NoSuchElementException();
+          }
+          @Override
+          public void remove() {
+            throw new UnsupportedOperationException();
+          }
+        };
+      }
+
+      @Override
+      public void close() throws IOException {
+        // do nothing
+      }
+
+      @Override
+      public FileRegion resolve(Block ident) throws IOException {
+        throw new UnsupportedOperationException();
+      }
+    };
+  }
+
+  @Override
+  public Writer<FileRegion> getWriter(Writer.Options opts) throws IOException {
+    return new Writer<FileRegion>() {
+      @Override
+      public void store(FileRegion token) throws IOException {
+        // do nothing
+      }
+
+      @Override
+      public void close() throws IOException {
+        // do nothing
+      }
+    };
+  }
+
+  @Override
+  public void refresh() throws IOException {
+    // do nothing
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/SingleUGIResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/SingleUGIResolver.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/SingleUGIResolver.java
new file mode 100644
index 0000000..0fd3f2b
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/SingleUGIResolver.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * Map all owners/groups in external system to a single user in FSImage.
+ */
+public class SingleUGIResolver extends UGIResolver implements Configurable {
+
+  public static final String UID   = "hdfs.image.writer.ugi.single.uid";
+  public static final String USER  = "hdfs.image.writer.ugi.single.user";
+  public static final String GID   = "hdfs.image.writer.ugi.single.gid";
+  public static final String GROUP = "hdfs.image.writer.ugi.single.group";
+
+  private int uid;
+  private int gid;
+  private String user;
+  private String group;
+  private Configuration conf;
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+    uid = conf.getInt(UID, 0);
+    user = conf.get(USER);
+    if (null == user) {
+      try {
+        user = UserGroupInformation.getCurrentUser().getShortUserName();
+      } catch (IOException e) {
+        user = "hadoop";
+      }
+    }
+    gid = conf.getInt(GID, 1);
+    group = conf.get(GROUP);
+    if (null == group) {
+      group = user;
+    }
+
+    resetUGInfo();
+    addUser(user, uid);
+    addGroup(group, gid);
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  public String user(FileStatus s) {
+    return user;
+  }
+
+  @Override
+  public String group(FileStatus s) {
+    return group;
+  }
+
+  @Override
+  public void addUser(String name) {
+    //do nothing
+  }
+
+  @Override
+  public void addGroup(String name) {
+    //do nothing
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java
new file mode 100644
index 0000000..14e6bed
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+
+import com.google.protobuf.ByteString;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
+import org.apache.hadoop.hdfs.server.common.BlockFormat;
+import org.apache.hadoop.hdfs.server.common.FileRegion;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile;
+import static org.apache.hadoop.hdfs.DFSUtil.string2Bytes;
+import static org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature.DEFAULT_NAMESPACE_QUOTA;
+import static org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature.DEFAULT_STORAGE_SPACE_QUOTA;
+
+/**
+ * Traversal cursor in external filesystem.
+ * TODO: generalize, move FS/FileRegion to FSTreePath
+ */
+public class TreePath {
+  private long id = -1;
+  private final long parentId;
+  private final FileStatus stat;
+  private final TreeWalk.TreeIterator i;
+
+  protected TreePath(FileStatus stat, long parentId, TreeWalk.TreeIterator i) {
+    this.i = i;
+    this.stat = stat;
+    this.parentId = parentId;
+  }
+
+  public FileStatus getFileStatus() {
+    return stat;
+  }
+
+  public long getParentId() {
+    return parentId;
+  }
+
+  public long getId() {
+    if (id < 0) {
+      throw new IllegalStateException();
+    }
+    return id;
+  }
+
+  void accept(long id) {
+    this.id = id;
+    i.onAccept(this, id);
+  }
+
+  public INode toINode(UGIResolver ugi, BlockResolver blk,
+      BlockFormat.Writer<FileRegion> out, String blockPoolID)
+          throws IOException {
+    if (stat.isFile()) {
+      return toFile(ugi, blk, out, blockPoolID);
+    } else if (stat.isDirectory()) {
+      return toDirectory(ugi);
+    } else if (stat.isSymlink()) {
+      throw new UnsupportedOperationException("symlinks not supported");
+    } else {
+      throw new UnsupportedOperationException("Unknown type: " + stat);
+    }
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (!(other instanceof TreePath)) {
+      return false;
+    }
+    TreePath o = (TreePath) other;
+    return getParentId() == o.getParentId()
+      && getFileStatus().equals(o.getFileStatus());
+  }
+
+  @Override
+  public int hashCode() {
+    long pId = getParentId() * getFileStatus().hashCode();
+    return (int)(pId ^ (pId >>> 32));
+  }
+
+  void writeBlock(long blockId, long offset, long length,
+      long genStamp, String blockPoolID,
+      BlockFormat.Writer<FileRegion> out) throws IOException {
+    FileStatus s = getFileStatus();
+    out.store(new FileRegion(blockId, s.getPath(), offset, length,
+        blockPoolID, genStamp));
+  }
+
+  INode toFile(UGIResolver ugi, BlockResolver blk,
+      BlockFormat.Writer<FileRegion> out, String blockPoolID)
+          throws IOException {
+    final FileStatus s = getFileStatus();
+    // TODO should this store resolver's user/group?
+    ugi.addUser(s.getOwner());
+    ugi.addGroup(s.getGroup());
+    INodeFile.Builder b = INodeFile.newBuilder()
+        .setReplication(blk.getReplication(s))
+        .setModificationTime(s.getModificationTime())
+        .setAccessTime(s.getAccessTime())
+        .setPreferredBlockSize(blk.preferredBlockSize(s))
+        .setPermission(ugi.resolve(s))
+        .setStoragePolicyID(HdfsConstants.PROVIDED_STORAGE_POLICY_ID);
+    //TODO: storage policy should be configurable per path; use BlockResolver
+    long off = 0L;
+    for (BlockProto block : blk.resolve(s)) {
+      b.addBlocks(block);
+      writeBlock(block.getBlockId(), off, block.getNumBytes(),
+          block.getGenStamp(), blockPoolID, out);
+      off += block.getNumBytes();
+    }
+    INode.Builder ib = INode.newBuilder()
+        .setType(INode.Type.FILE)
+        .setId(id)
+        .setName(ByteString.copyFrom(string2Bytes(s.getPath().getName())))
+        .setFile(b);
+    return ib.build();
+  }
+
+  INode toDirectory(UGIResolver ugi) {
+    final FileStatus s = getFileStatus();
+    ugi.addUser(s.getOwner());
+    ugi.addGroup(s.getGroup());
+    INodeDirectory.Builder b = INodeDirectory.newBuilder()
+        .setModificationTime(s.getModificationTime())
+        .setNsQuota(DEFAULT_NAMESPACE_QUOTA)
+        .setDsQuota(DEFAULT_STORAGE_SPACE_QUOTA)
+        .setPermission(ugi.resolve(s));
+    INode.Builder ib = INode.newBuilder()
+        .setType(INode.Type.DIRECTORY)
+        .setId(id)
+        .setName(ByteString.copyFrom(string2Bytes(s.getPath().getName())))
+        .setDirectory(b);
+    return ib.build();
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("{ stat=\"").append(getFileStatus()).append("\"");
+    sb.append(", id=").append(getId());
+    sb.append(", parentId=").append(getParentId());
+    sb.append(", iterObjId=").append(System.identityHashCode(i));
+    sb.append(" }");
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreeWalk.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreeWalk.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreeWalk.java
new file mode 100644
index 0000000..7fd26f9
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreeWalk.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.ArrayDeque;
+import java.util.Deque;
+import java.util.Iterator;
+
+/**
+ * Traversal yielding a hierarchical sequence of paths.
+ */
+public abstract class TreeWalk implements Iterable<TreePath> {
+
+  /**
+   * @param path path to the node being explored.
+   * @param id the id of the node.
+   * @param iterator the {@link TreeIterator} to use.
+   * @return paths representing the children of the current node.
+   */
+  protected abstract Iterable<TreePath> getChildren(
+      TreePath path, long id, TreeWalk.TreeIterator iterator);
+
+  public abstract TreeIterator iterator();
+
+  /**
+   * Enumerator class for hierarchies. Implementations SHOULD support a fork()
+   * operation yielding a subtree of the current cursor.
+   */
+  public abstract class TreeIterator implements Iterator<TreePath> {
+
+    private final Deque<TreePath> pending;
+
+    TreeIterator() {
+      this(new ArrayDeque<TreePath>());
+    }
+
+    protected TreeIterator(Deque<TreePath> pending) {
+      this.pending = pending;
+    }
+
+    public abstract TreeIterator fork();
+
+    @Override
+    public boolean hasNext() {
+      return !pending.isEmpty();
+    }
+
+    @Override
+    public TreePath next() {
+      return pending.removeFirst();
+    }
+
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException();
+    }
+
+    protected void onAccept(TreePath p, long id) {
+      for (TreePath k : getChildren(p, id, this)) {
+        pending.addFirst(k);
+      }
+    }
+
+    /**
+     * @return the Deque containing the pending paths.
+     */
+    protected Deque<TreePath> getPendingQueue() {
+      return pending;
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder();
+      sb.append("{ Treewalk=\"").append(TreeWalk.this.toString());
+      sb.append(", pending=[");
+      Iterator<TreePath> i = pending.iterator();
+      if (i.hasNext()) {
+        sb.append("\"").append(i.next()).append("\"");
+      }
+      while (i.hasNext()) {
+        sb.append(", \"").append(i.next()).append("\"");
+      }
+      sb.append("]");
+      sb.append(" }");
+      return sb.toString();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/UGIResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/UGIResolver.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/UGIResolver.java
new file mode 100644
index 0000000..2d50668
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/UGIResolver.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+/**
+ * Pluggable class for mapping ownership and permissions from an external
+ * store to an FSImage.
+ */
+public abstract class UGIResolver {
+
+  static final int USER_STRID_OFFSET = 40;
+  static final int GROUP_STRID_OFFSET = 16;
+  static final long USER_GROUP_STRID_MASK = (1 << 24) - 1;
+
+  /**
+   * Permission is serialized as a 64-bit long. [0:24):[25:48):[48:64) (in Big
+   * Endian).
+   * The first and the second parts are the string ids of the user and
+   * group name, and the last 16 bits are the permission bits.
+   * @param owner name of owner
+   * @param group name of group
+   * @param permission Permission octects
+   * @return FSImage encoding of permissions
+   */
+  protected final long buildPermissionStatus(
+      String owner, String group, short permission) {
+
+    long userId = users.get(owner);
+    if (0L != ((~USER_GROUP_STRID_MASK) & userId)) {
+      throw new IllegalArgumentException("UID must fit in 24 bits");
+    }
+
+    long groupId = groups.get(group);
+    if (0L != ((~USER_GROUP_STRID_MASK) & groupId)) {
+      throw new IllegalArgumentException("GID must fit in 24 bits");
+    }
+    return ((userId & USER_GROUP_STRID_MASK) << USER_STRID_OFFSET)
+        | ((groupId & USER_GROUP_STRID_MASK) << GROUP_STRID_OFFSET)
+        | permission;
+  }
+
+  private final Map<String, Integer> users;
+  private final Map<String, Integer> groups;
+
+  public UGIResolver() {
+    this(new HashMap<String, Integer>(), new HashMap<String, Integer>());
+  }
+
+  UGIResolver(Map<String, Integer> users, Map<String, Integer> groups) {
+    this.users = users;
+    this.groups = groups;
+  }
+
+  public Map<Integer, String> ugiMap() {
+    Map<Integer, String> ret = new HashMap<>();
+    for (Map<String, Integer> m : Arrays.asList(users, groups)) {
+      for (Map.Entry<String, Integer> e : m.entrySet()) {
+        String s = ret.put(e.getValue(), e.getKey());
+        if (s != null) {
+          throw new IllegalStateException("Duplicate mapping: " +
+              e.getValue() + " " + s + " " + e.getKey());
+        }
+      }
+    }
+    return ret;
+  }
+
+  public abstract void addUser(String name);
+
+  protected void addUser(String name, int id) {
+    Integer uid = users.put(name, id);
+    if (uid != null) {
+      throw new IllegalArgumentException("Duplicate mapping: " + name +
+          " " + uid + " " + id);
+    }
+  }
+
+  public abstract void addGroup(String name);
+
+  protected void addGroup(String name, int id) {
+    Integer gid = groups.put(name, id);
+    if (gid != null) {
+      throw new IllegalArgumentException("Duplicate mapping: " + name +
+          " " + gid + " " + id);
+    }
+  }
+
+  protected void resetUGInfo() {
+    users.clear();
+    groups.clear();
+  }
+
+  public long resolve(FileStatus s) {
+    return buildPermissionStatus(user(s), group(s), permission(s).toShort());
+  }
+
+  public String user(FileStatus s) {
+    return s.getOwner();
+  }
+
+  public String group(FileStatus s) {
+    return s.getGroup();
+  }
+
+  public FsPermission permission(FileStatus s) {
+    return s.getPermission();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/package-info.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/package-info.java
new file mode 100644
index 0000000..956292e
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java
new file mode 100644
index 0000000..c82c489
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * Random, repeatable hierarchy generator.
+ */
+public class RandomTreeWalk extends TreeWalk {
+
+  private final Path root;
+  private final long seed;
+  private final float depth;
+  private final int children;
+  private final Map<Long, Long> mSeed;
+  //private final AtomicLong blockIds = new AtomicLong(1L << 30);
+
+  RandomTreeWalk(long seed) {
+    this(seed, 10);
+  }
+
+  RandomTreeWalk(long seed, int children) {
+    this(seed, children, 0.15f);
+  }
+
+  RandomTreeWalk(long seed, int children, float depth) {
+    this(randomRoot(seed), seed, children, 0.15f);
+  }
+
+  RandomTreeWalk(Path root, long seed, int children, float depth) {
+    this.seed = seed;
+    this.depth = depth;
+    this.children = children;
+    mSeed = Collections.synchronizedMap(new HashMap<Long, Long>());
+    mSeed.put(-1L, seed);
+    this.root = root;
+  }
+
+  static Path randomRoot(long seed) {
+    Random r = new Random(seed);
+    String scheme;
+    do {
+      scheme = genName(r, 3, 5).toLowerCase();
+    } while (Character.isDigit(scheme.charAt(0)));
+    String authority = genName(r, 3, 15).toLowerCase();
+    int port = r.nextInt(1 << 13) + 1000;
+    return new Path(scheme, authority + ":" + port, "/");
+  }
+
+  @Override
+  public TreeIterator iterator() {
+    return new RandomTreeIterator(seed);
+  }
+
+  @Override
+  protected Iterable<TreePath> getChildren(TreePath p, long id,
+      TreeIterator walk) {
+    final FileStatus pFs = p.getFileStatus();
+    if (pFs.isFile()) {
+      return Collections.emptyList();
+    }
+    // seed is f(parent seed, attrib)
+    long cseed = mSeed.get(p.getParentId()) * p.getFileStatus().hashCode();
+    mSeed.put(p.getId(), cseed);
+    Random r = new Random(cseed);
+
+    int nChildren = r.nextInt(children);
+    ArrayList<TreePath> ret = new ArrayList<TreePath>();
+    for (int i = 0; i < nChildren; ++i) {
+      ret.add(new TreePath(genFileStatus(p, r), p.getId(), walk));
+    }
+    return ret;
+  }
+
+  FileStatus genFileStatus(TreePath parent, Random r) {
+    final int blocksize = 128 * (1 << 20);
+    final Path name;
+    final boolean isDir;
+    if (null == parent) {
+      name = root;
+      isDir = true;
+    } else {
+      Path p = parent.getFileStatus().getPath();
+      name = new Path(p, genName(r, 3, 10));
+      isDir = r.nextFloat() < depth;
+    }
+    final long len = isDir ? 0 : r.nextInt(Integer.MAX_VALUE);
+    final int nblocks = 0 == len ? 0 : (((int)((len - 1) / blocksize)) + 1);
+    BlockLocation[] blocks = genBlocks(r, nblocks, blocksize, len);
+    try {
+      return new LocatedFileStatus(new FileStatus(
+          len,              /* long length,             */
+          isDir,            /* boolean isdir,           */
+          1,                /* int block_replication,   */
+          blocksize,        /* long blocksize,          */
+          0L,               /* long modification_time,  */
+          0L,               /* long access_time,        */
+          null,             /* FsPermission permission, */
+          "hadoop",         /* String owner,            */
+          "hadoop",         /* String group,            */
+          name),            /* Path path                */
+          blocks);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  BlockLocation[] genBlocks(Random r, int nblocks, int blocksize, long len) {
+    BlockLocation[] blocks = new BlockLocation[nblocks];
+    if (0 == nblocks) {
+      return blocks;
+    }
+    for (int i = 0; i < nblocks - 1; ++i) {
+      blocks[i] = new BlockLocation(null, null, i * blocksize, blocksize);
+    }
+    blocks[nblocks - 1] = new BlockLocation(null, null,
+        (nblocks - 1) * blocksize,
+        0 == (len % blocksize) ? blocksize : len % blocksize);
+    return blocks;
+  }
+
+  static String genName(Random r, int min, int max) {
+    int len = r.nextInt(max - min + 1) + min;
+    char[] ret = new char[len];
+    while (len > 0) {
+      int c = r.nextInt() & 0x7F; // restrict to ASCII
+      if (Character.isLetterOrDigit(c)) {
+        ret[--len] = (char) c;
+      }
+    }
+    return new String(ret);
+  }
+
+  class RandomTreeIterator extends TreeIterator {
+
+    RandomTreeIterator() {
+    }
+
+    RandomTreeIterator(long seed) {
+      Random r = new Random(seed);
+      FileStatus iroot = genFileStatus(null, r);
+      getPendingQueue().addFirst(new TreePath(iroot, -1, this));
+    }
+
+    RandomTreeIterator(TreePath p) {
+      getPendingQueue().addFirst(
+          new TreePath(p.getFileStatus(), p.getParentId(), this));
+    }
+
+    @Override
+    public TreeIterator fork() {
+      if (getPendingQueue().isEmpty()) {
+        return new RandomTreeIterator();
+      }
+      return new RandomTreeIterator(getPendingQueue().removeFirst());
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFixedBlockResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFixedBlockResolver.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFixedBlockResolver.java
new file mode 100644
index 0000000..8b52ffd
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFixedBlockResolver.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.Iterator;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import static org.junit.Assert.*;
+
+/**
+ * Validate fixed-size block partitioning.
+ */
+public class TestFixedBlockResolver {
+
+  @Rule public TestName name = new TestName();
+
+  private final FixedBlockResolver blockId = new FixedBlockResolver();
+
+  @Before
+  public void setup() {
+    Configuration conf = new Configuration(false);
+    conf.setLong(FixedBlockResolver.BLOCKSIZE, 512L * (1L << 20));
+    conf.setLong(FixedBlockResolver.START_BLOCK, 512L * (1L << 20));
+    blockId.setConf(conf);
+    System.out.println(name.getMethodName());
+  }
+
+  @Test
+  public void testExactBlock() throws Exception {
+    FileStatus f = file(512, 256);
+    int nblocks = 0;
+    for (BlockProto b : blockId.resolve(f)) {
+      ++nblocks;
+      assertEquals(512L * (1L << 20), b.getNumBytes());
+    }
+    assertEquals(1, nblocks);
+
+    FileStatus g = file(1024, 256);
+    nblocks = 0;
+    for (BlockProto b : blockId.resolve(g)) {
+      ++nblocks;
+      assertEquals(512L * (1L << 20), b.getNumBytes());
+    }
+    assertEquals(2, nblocks);
+
+    FileStatus h = file(5120, 256);
+    nblocks = 0;
+    for (BlockProto b : blockId.resolve(h)) {
+      ++nblocks;
+      assertEquals(512L * (1L << 20), b.getNumBytes());
+    }
+    assertEquals(10, nblocks);
+  }
+
+  @Test
+  public void testEmpty() throws Exception {
+    FileStatus f = file(0, 100);
+    Iterator<BlockProto> b = blockId.resolve(f).iterator();
+    assertTrue(b.hasNext());
+    assertEquals(0, b.next().getNumBytes());
+    assertFalse(b.hasNext());
+  }
+
+  @Test
+  public void testRandomFile() throws Exception {
+    Random r = new Random();
+    long seed = r.nextLong();
+    System.out.println("seed: " + seed);
+    r.setSeed(seed);
+
+    int len = r.nextInt(4096) + 512;
+    int blk = r.nextInt(len - 128) + 128;
+    FileStatus s = file(len, blk);
+    long nbytes = 0;
+    for (BlockProto b : blockId.resolve(s)) {
+      nbytes += b.getNumBytes();
+      assertTrue(512L * (1L << 20) >= b.getNumBytes());
+    }
+    assertEquals(s.getLen(), nbytes);
+  }
+
+  FileStatus file(long lenMB, long blocksizeMB) {
+    Path p = new Path("foo://bar:4344/baz/dingo");
+    return new FileStatus(
+          lenMB * (1 << 20),       /* long length,             */
+          false,                   /* boolean isdir,           */
+          1,                       /* int block_replication,   */
+          blocksizeMB * (1 << 20), /* long blocksize,          */
+          0L,                      /* long modification_time,  */
+          0L,                      /* long access_time,        */
+          null,                    /* FsPermission permission, */
+          "hadoop",                /* String owner,            */
+          "hadoop",                /* String group,            */
+          p);                      /* Path path                */
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616765e9/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRandomTreeWalk.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRandomTreeWalk.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRandomTreeWalk.java
new file mode 100644
index 0000000..b8e6ac9
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRandomTreeWalk.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import static org.junit.Assert.*;
+
+/**
+ * Validate randomly generated hierarchies, including fork() support in
+ * base class.
+ */
+public class TestRandomTreeWalk {
+
+  @Rule public TestName name = new TestName();
+
+  private Random r = new Random();
+
+  @Before
+  public void setSeed() {
+    long seed = r.nextLong();
+    r.setSeed(seed);
+    System.out.println(name.getMethodName() + " seed: " + seed);
+  }
+
+  @Test
+  public void testRandomTreeWalkRepeat() throws Exception {
+    Set<TreePath> ns = new HashSet<>();
+    final long seed = r.nextLong();
+    RandomTreeWalk t1 = new RandomTreeWalk(seed, 10, .1f);
+    int i = 0;
+    for (TreePath p : t1) {
+      p.accept(i++);
+      assertTrue(ns.add(p));
+    }
+
+    RandomTreeWalk t2 = new RandomTreeWalk(seed, 10, .1f);
+    int j = 0;
+    for (TreePath p : t2) {
+      p.accept(j++);
+      assertTrue(ns.remove(p));
+    }
+    assertTrue(ns.isEmpty());
+  }
+
+  @Test
+  public void testRandomTreeWalkFork() throws Exception {
+    Set<FileStatus> ns = new HashSet<>();
+
+    final long seed = r.nextLong();
+    RandomTreeWalk t1 = new RandomTreeWalk(seed, 10, .15f);
+    int i = 0;
+    for (TreePath p : t1) {
+      p.accept(i++);
+      assertTrue(ns.add(p.getFileStatus()));
+    }
+
+    RandomTreeWalk t2 = new RandomTreeWalk(seed, 10, .15f);
+    int j = 0;
+    ArrayList<TreeWalk.TreeIterator> iters = new ArrayList<>();
+    iters.add(t2.iterator());
+    while (!iters.isEmpty()) {
+      for (TreeWalk.TreeIterator sub = iters.remove(iters.size() - 1);
+           sub.hasNext();) {
+        TreePath p = sub.next();
+        if (0 == (r.nextInt() % 4)) {
+          iters.add(sub.fork());
+          Collections.shuffle(iters, r);
+        }
+        p.accept(j++);
+        assertTrue(ns.remove(p.getFileStatus()));
+      }
+    }
+    assertTrue(ns.isEmpty());
+  }
+
+  @Test
+  public void testRandomRootWalk() throws Exception {
+    Set<FileStatus> ns = new HashSet<>();
+    final long seed = r.nextLong();
+    Path root = new Path("foo://bar:4344/dingos");
+    String sroot = root.toString();
+    int nroot = sroot.length();
+    RandomTreeWalk t1 = new RandomTreeWalk(root, seed, 10, .1f);
+    int i = 0;
+    for (TreePath p : t1) {
+      p.accept(i++);
+      FileStatus stat = p.getFileStatus();
+      assertTrue(ns.add(stat));
+      assertEquals(sroot, stat.getPath().toString().substring(0, nroot));
+    }
+
+    RandomTreeWalk t2 = new RandomTreeWalk(root, seed, 10, .1f);
+    int j = 0;
+    for (TreePath p : t2) {
+      p.accept(j++);
+      FileStatus stat = p.getFileStatus();
+      assertTrue(ns.remove(stat));
+      assertEquals(sroot, stat.getPath().toString().substring(0, nroot));
+    }
+    assertTrue(ns.isEmpty());
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/29] hadoop git commit: HDFS-11818. TestBlockManager.testSufficientlyReplBlocksUsesNewRack fails intermittently. Contributed by Nathan Roberts

Posted by vi...@apache.org.
HDFS-11818. TestBlockManager.testSufficientlyReplBlocksUsesNewRack fails intermittently. Contributed by Nathan Roberts


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2397a262
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2397a262
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2397a262

Branch: refs/heads/HDFS-9806
Commit: 2397a2626e22d002174f4a36891d713a7e1f1b20
Parents: 7bc2172
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Fri May 12 17:42:47 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Fri May 12 17:42:47 2017 -0500

----------------------------------------------------------------------
 .../hadoop/hdfs/server/blockmanagement/TestBlockManager.java      | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2397a262/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index beaef4a..3088b7b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -159,7 +159,6 @@ public class TestBlockManager {
     Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
     Mockito.when(fsn.getHAContext()).thenReturn(haContext);
     bm = new BlockManager(fsn, false, conf);
-    bm.setInitializedReplQueues(true);
     CacheManager cm = Mockito.mock(CacheManager.class);
     Mockito.doReturn(cm).when(fsn).getCacheManager();
     GSet<CachedBlock, CachedBlock> cb =
@@ -967,10 +966,12 @@ public class TestBlockManager {
     StorageReceivedDeletedBlocks srdb =
         new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()),
             rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()]));
+    bm.setInitializedReplQueues(true);
     bm.processIncrementalBlockReport(node, srdb);
     // Needed replications should still be 0.
     assertEquals("UC block was incorrectly added to needed Replications",
         0, bm.neededReconstruction.size());
+    bm.setInitializedReplQueues(false);
   }
 
   private BlockInfo addBlockToBM(long blkId) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/29] hadoop git commit: HADOOP-14415. Use java.lang.AssertionError instead of junit.framework.AssertionFailedError. Contributed by Chen Liang.

Posted by vi...@apache.org.
HADOOP-14415. Use java.lang.AssertionError instead of junit.framework.AssertionFailedError. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4aa9e3c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4aa9e3c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4aa9e3c

Branch: refs/heads/HDFS-9806
Commit: d4aa9e3c699f6a584ed020851189a81f825ce915
Parents: 035d468
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed May 17 15:38:28 2017 -0400
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed May 17 15:38:28 2017 -0400

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/fs/TestFsShell.java   |  3 +--
 .../src/test/java/org/apache/hadoop/net/TestNetUtils.java | 10 ++++------
 .../hadoop/fs/swift/TestSwiftFileSystemContract.java      |  3 +--
 3 files changed, 6 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4aa9e3c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java
index 162a942..da93196 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.fs;
 
-import junit.framework.AssertionFailedError;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFactory;
@@ -45,7 +44,7 @@ public class TestFsShell {
     }
 
     if (!(th instanceof RuntimeException)) {
-      throw new AssertionFailedError("Expected Runtime exception, got: " + th)
+      throw new AssertionError("Expected Runtime exception, got: " + th)
           .initCause(th);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4aa9e3c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index e59ac77..1375d9b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -37,8 +37,6 @@ import java.util.Enumeration;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 
-import junit.framework.AssertionFailedError;
-
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -328,7 +326,7 @@ public class TestNetUtils {
   private void assertInException(Exception e, String text) throws Throwable {
     String message = extractExceptionMessage(e);
     if (!(message.contains(text))) {
-      throw new AssertionFailedError("Wrong text in message "
+      throw new AssertionError("Wrong text in message "
         + "\"" + message + "\""
         + " expected \"" + text + "\"")
           .initCause(e);
@@ -339,7 +337,7 @@ public class TestNetUtils {
     assertNotNull("Null Exception", e);
     String message = e.getMessage();
     if (message == null) {
-      throw new AssertionFailedError("Empty text in exception " + e)
+      throw new AssertionError("Empty text in exception " + e)
           .initCause(e);
     }
     return message;
@@ -349,7 +347,7 @@ public class TestNetUtils {
       throws Throwable{
     String message = extractExceptionMessage(e);
     if (message.contains(text)) {
-      throw new AssertionFailedError("Wrong text in message "
+      throw new AssertionError("Wrong text in message "
            + "\"" + message + "\""
            + " did not expect \"" + text + "\"")
           .initCause(e);
@@ -364,7 +362,7 @@ public class TestNetUtils {
          "localhost", LOCAL_PORT, e);
     LOG.info(wrapped.toString(), wrapped);
     if(!(wrapped.getClass().equals(expectedClass))) {
-      throw new AssertionFailedError("Wrong exception class; expected "
+      throw new AssertionError("Wrong exception class; expected "
          + expectedClass
          + " got " + wrapped.getClass() + ": " + wrapped).initCause(wrapped);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4aa9e3c/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemContract.java b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemContract.java
index 46a5f0f..76716b2 100644
--- a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemContract.java
+++ b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemContract.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.fs.swift;
 
-import junit.framework.AssertionFailedError;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -119,7 +118,7 @@ public class TestSwiftFileSystemContract
   public void testWriteReadAndDeleteEmptyFile() throws Exception {
     try {
       super.testWriteReadAndDeleteEmptyFile();
-    } catch (AssertionFailedError e) {
+    } catch (AssertionError e) {
       SwiftTestUtils.downgrade("empty files get mistaken for directories", e);
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/29] hadoop git commit: HDFS-11641. Reduce cost of audit logging by using FileStatus instead of HdfsFileStatus. Contributed by Daryn Sharp.

Posted by vi...@apache.org.
HDFS-11641. Reduce cost of audit logging by using FileStatus instead of HdfsFileStatus. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b90e52f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b90e52f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b90e52f

Branch: refs/heads/HDFS-9806
Commit: 9b90e52f1ec22c18cd535af2a569defcef65b093
Parents: 1d1c52b
Author: Kihwal Lee <ki...@apache.org>
Authored: Tue May 16 11:28:04 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Tue May 16 11:28:46 2017 -0500

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java | 13 +++--
 .../hdfs/server/namenode/FSDirAttrOp.java       | 14 ++---
 .../hdfs/server/namenode/FSDirConcatOp.java     |  4 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  | 10 ++--
 .../server/namenode/FSDirErasureCodingOp.java   | 10 ++--
 .../hdfs/server/namenode/FSDirMkdirOp.java      |  4 +-
 .../hdfs/server/namenode/FSDirRenameOp.java     |  8 +--
 .../hdfs/server/namenode/FSDirSymlinkOp.java    |  4 +-
 .../hdfs/server/namenode/FSDirTruncateOp.java   |  8 +--
 .../hdfs/server/namenode/FSDirXAttrOp.java      |  6 +-
 .../hdfs/server/namenode/FSDirectory.java       | 43 +++++++++++++-
 .../hdfs/server/namenode/FSNamesystem.java      | 60 +++++++++++---------
 12 files changed, 115 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b90e52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index efededd..cc51430 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
@@ -26,7 +28,6 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.AclException;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 
 import java.io.IOException;
@@ -34,7 +35,7 @@ import java.util.Collections;
 import java.util.List;
 
 class FSDirAclOp {
-  static HdfsFileStatus modifyAclEntries(
+  static FileStatus modifyAclEntries(
       FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
       throws IOException {
     String src = srcArg;
@@ -59,7 +60,7 @@ class FSDirAclOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static HdfsFileStatus removeAclEntries(
+  static FileStatus removeAclEntries(
       FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
       throws IOException {
     String src = srcArg;
@@ -84,7 +85,7 @@ class FSDirAclOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg)
+  static FileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg)
       throws IOException {
     String src = srcArg;
     checkAclsConfigFlag(fsd);
@@ -108,7 +109,7 @@ class FSDirAclOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg)
+  static FileStatus removeAcl(FSDirectory fsd, final String srcArg)
       throws IOException {
     String src = srcArg;
     checkAclsConfigFlag(fsd);
@@ -127,7 +128,7 @@ class FSDirAclOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static HdfsFileStatus setAcl(
+  static FileStatus setAcl(
       FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
       throws IOException {
     String src = srcArg;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b90e52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 4d26885..d4b24f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.StorageType;
@@ -28,7 +29,6 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -50,7 +50,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENAB
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
 
 public class FSDirAttrOp {
-  static HdfsFileStatus setPermission(
+  static FileStatus setPermission(
       FSDirectory fsd, final String src, FsPermission permission)
       throws IOException {
     if (FSDirectory.isExactReservedName(src)) {
@@ -70,7 +70,7 @@ public class FSDirAttrOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static HdfsFileStatus setOwner(
+  static FileStatus setOwner(
       FSDirectory fsd, String src, String username, String group)
       throws IOException {
     if (FSDirectory.isExactReservedName(src)) {
@@ -100,7 +100,7 @@ public class FSDirAttrOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static HdfsFileStatus setTimes(
+  static FileStatus setTimes(
       FSDirectory fsd, String src, long mtime, long atime)
       throws IOException {
     FSPermissionChecker pc = fsd.getPermissionChecker();
@@ -153,13 +153,13 @@ public class FSDirAttrOp {
     return isFile;
   }
 
-  static HdfsFileStatus unsetStoragePolicy(FSDirectory fsd, BlockManager bm,
+  static FileStatus unsetStoragePolicy(FSDirectory fsd, BlockManager bm,
       String src) throws IOException {
     return setStoragePolicy(fsd, bm, src,
         HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, "unset");
   }
 
-  static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
+  static FileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
       String src, final String policyName) throws IOException {
     // get the corresponding policy and make sure the policy name is valid
     BlockStoragePolicy policy = bm.getStoragePolicy(policyName);
@@ -171,7 +171,7 @@ public class FSDirAttrOp {
     return setStoragePolicy(fsd, bm, src, policy.getId(), "set");
   }
 
-  static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
+  static FileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
       String src, final byte policyId, final String operation)
       throws IOException {
     if (!fsd.isStoragePolicyEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b90e52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 40df120..6a41cd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -21,9 +21,9 @@ import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
@@ -48,7 +48,7 @@ import static org.apache.hadoop.util.Time.now;
  */
 class FSDirConcatOp {
 
-  static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs,
+  static FileStatus concat(FSDirectory fsd, String target, String[] srcs,
     boolean logRetryCache) throws IOException {
     validatePath(target, srcs);
     assert srcs != null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b90e52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
index d5f6be0..22039d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
@@ -41,7 +42,6 @@ import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -143,10 +143,10 @@ final class FSDirEncryptionZoneOp {
    *                KeyProvider
    * @param logRetryCache whether to record RPC ids in editlog for retry cache
    *                      rebuilding
-   * @return HdfsFileStatus
+   * @return FileStatus
    * @throws IOException
    */
-  static HdfsFileStatus createEncryptionZone(final FSDirectory fsd,
+  static FileStatus createEncryptionZone(final FSDirectory fsd,
       final String srcArg, final FSPermissionChecker pc, final String cipher,
       final String keyName, final boolean logRetryCache) throws IOException {
     final CipherSuite suite = CipherSuite.convert(cipher);
@@ -177,7 +177,7 @@ final class FSDirEncryptionZoneOp {
    * @param pc permission checker to check fs permission
    * @return the EZ with file status.
    */
-  static Map.Entry<EncryptionZone, HdfsFileStatus> getEZForPath(
+  static Map.Entry<EncryptionZone, FileStatus> getEZForPath(
       final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc)
       throws IOException {
     final INodesInPath iip;
@@ -192,7 +192,7 @@ final class FSDirEncryptionZoneOp {
     } finally {
       fsd.readUnlock();
     }
-    HdfsFileStatus auditStat = fsd.getAuditFileInfo(iip);
+    FileStatus auditStat = fsd.getAuditFileInfo(iip);
     return new AbstractMap.SimpleImmutableEntry<>(ret, auditStat);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b90e52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index aa0babd..60a89e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -32,6 +32,7 @@ import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.WritableUtils;
@@ -100,12 +100,12 @@ final class FSDirErasureCodingOp {
    *                    directory.
    * @param logRetryCache whether to record RPC ids in editlog for retry
    *          cache rebuilding
-   * @return {@link HdfsFileStatus}
+   * @return {@link FileStatus}
    * @throws IOException
    * @throws HadoopIllegalArgumentException if the policy is not enabled
    * @throws AccessControlException if the user does not have write access
    */
-  static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn,
+  static FileStatus setErasureCodingPolicy(final FSNamesystem fsn,
       final String srcArg, final String ecPolicyName,
       final FSPermissionChecker pc, final boolean logRetryCache)
       throws IOException, AccessControlException {
@@ -179,11 +179,11 @@ final class FSDirErasureCodingOp {
    * @param srcArg The path of the target directory.
    * @param logRetryCache whether to record RPC ids in editlog for retry
    *          cache rebuilding
-   * @return {@link HdfsFileStatus}
+   * @return {@link FileStatus}
    * @throws IOException
    * @throws AccessControlException if the user does not have write access
    */
-  static HdfsFileStatus unsetErasureCodingPolicy(final FSNamesystem fsn,
+  static FileStatus unsetErasureCodingPolicy(final FSNamesystem fsn,
       final String srcArg, final FSPermissionChecker pc,
       final boolean logRetryCache) throws IOException {
     assert fsn.hasWriteLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b90e52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 6f7c5eb..89fd8a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -27,7 +28,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.AclException;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@@ -39,7 +39,7 @@ import static org.apache.hadoop.util.Time.now;
 
 class FSDirMkdirOp {
 
-  static HdfsFileStatus mkdirs(FSNamesystem fsn, String src,
+  static FileStatus mkdirs(FSNamesystem fsn, String src,
       PermissionStatus permissions, boolean createParent) throws IOException {
     FSDirectory fsd = fsn.getFSDirectory();
     if(NameNode.stateChangeLog.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b90e52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 3beb3c0..bbbb724 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -19,13 +19,13 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
@@ -781,18 +781,18 @@ class FSDirRenameOp {
       INodesInPath dst, boolean filesDeleted,
       BlocksMapUpdateInfo collectedBlocks) throws IOException {
     boolean success = (dst != null);
-    HdfsFileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null;
+    FileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null;
     return new RenameResult(
         success, auditStat, filesDeleted, collectedBlocks);
   }
 
   static class RenameResult {
     final boolean success;
-    final HdfsFileStatus auditStat;
+    final FileStatus auditStat;
     final boolean filesDeleted;
     final BlocksMapUpdateInfo collectedBlocks;
 
-    RenameResult(boolean success, HdfsFileStatus auditStat,
+    RenameResult(boolean success, FileStatus auditStat,
         boolean filesDeleted, BlocksMapUpdateInfo collectedBlocks) {
       this.success = success;
       this.auditStat = auditStat;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b90e52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
index 3b5f19d..ff7a3d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 
@@ -33,7 +33,7 @@ import static org.apache.hadoop.util.Time.now;
 
 class FSDirSymlinkOp {
 
-  static HdfsFileStatus createSymlinkInt(
+  static FileStatus createSymlinkInt(
       FSNamesystem fsn, String target, final String linkArg,
       PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
       throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b90e52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
index 9546c59..a6305f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
@@ -21,11 +21,11 @@ import java.io.IOException;
 import java.util.Set;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -348,9 +348,9 @@ final class FSDirTruncateOp {
    */
   static class TruncateResult {
     private final boolean result;
-    private final HdfsFileStatus stat;
+    private final FileStatus stat;
 
-    public TruncateResult(boolean result, HdfsFileStatus stat) {
+    public TruncateResult(boolean result, FileStatus stat) {
       this.result = result;
       this.stat = stat;
     }
@@ -366,7 +366,7 @@ final class FSDirTruncateOp {
     /**
      * @return file information.
      */
-    HdfsFileStatus getFileStatus() {
+    FileStatus getFileStatus() {
       return stat;
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b90e52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index f676f36..e5243ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -21,13 +21,13 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
@@ -59,7 +59,7 @@ class FSDirXAttrOp {
    *          - xAttrs flags
    * @throws IOException
    */
-  static HdfsFileStatus setXAttr(
+  static FileStatus setXAttr(
       FSDirectory fsd, String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
       boolean logRetryCache)
       throws IOException {
@@ -153,7 +153,7 @@ class FSDirXAttrOp {
    *          - xAttr to remove
    * @throws IOException
    */
-  static HdfsFileStatus removeXAttr(
+  static FileStatus removeXAttr(
       FSDirectory fsd, String src, XAttr xAttr, boolean logRetryCache)
       throws IOException {
     FSDirXAttrOp.checkXAttrsConfigFlag(fsd);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b90e52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 9ac6149..07dc5c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
@@ -38,6 +39,7 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
@@ -1796,10 +1798,45 @@ public class FSDirectory implements Closeable {
     }
   }
 
-  HdfsFileStatus getAuditFileInfo(INodesInPath iip)
+  FileStatus getAuditFileInfo(INodesInPath iip)
       throws IOException {
-    return (namesystem.isAuditEnabled() && namesystem.isExternalInvocation())
-        ? FSDirStatAndListingOp.getFileInfo(this, iip, false) : null;
+    if (!namesystem.isAuditEnabled() || !namesystem.isExternalInvocation()) {
+      return null;
+    }
+
+    final INode inode = iip.getLastINode();
+    if (inode == null) {
+      return null;
+    }
+    final int snapshot = iip.getPathSnapshotId();
+
+    Path symlink = null;
+    long size = 0;     // length is zero for directories
+    short replication = 0;
+    long blocksize = 0;
+
+    if (inode.isFile()) {
+      final INodeFile fileNode = inode.asFile();
+      size = fileNode.computeFileSize(snapshot);
+      replication = fileNode.getFileReplication(snapshot);
+      blocksize = fileNode.getPreferredBlockSize();
+    } else if (inode.isSymlink()) {
+      symlink = new Path(
+          DFSUtilClient.bytes2String(inode.asSymlink().getSymlink()));
+    }
+
+    return new FileStatus(
+        size,
+        inode.isDirectory(),
+        replication,
+        blocksize,
+        inode.getModificationTime(snapshot),
+        inode.getAccessTime(snapshot),
+        inode.getFsPermission(snapshot),
+        inode.getUserName(snapshot),
+        inode.getGroupName(snapshot),
+        symlink,
+        new Path(iip.getPath()));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b90e52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index afcc717..eb423cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -351,26 +351,34 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
   
   private void logAuditEvent(boolean succeeded, String cmd, String src,
-      String dst, HdfsFileStatus stat) throws IOException {
+      String dst, FileStatus stat) throws IOException {
     if (isAuditEnabled() && isExternalInvocation()) {
       logAuditEvent(succeeded, Server.getRemoteUser(), Server.getRemoteIp(),
                     cmd, src, dst, stat);
     }
   }
 
-  private void logAuditEvent(boolean succeeded,
-      UserGroupInformation ugi, InetAddress addr, String cmd, String src,
-      String dst, HdfsFileStatus stat) {
+  private void logAuditEvent(boolean succeeded, String cmd, String src,
+      HdfsFileStatus stat) throws IOException {
+    if (!isAuditEnabled() || !isExternalInvocation()) {
+      return;
+    }
     FileStatus status = null;
     if (stat != null) {
       Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null;
-      Path path = dst != null ? new Path(dst) : new Path(src);
+      Path path = new Path(src);
       status = new FileStatus(stat.getLen(), stat.isDir(),
           stat.getReplication(), stat.getBlockSize(),
           stat.getModificationTime(),
           stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
           stat.getGroup(), symlink, path);
     }
+    logAuditEvent(succeeded, cmd, src, null, status);
+  }
+
+  private void logAuditEvent(boolean succeeded,
+      UserGroupInformation ugi, InetAddress addr, String cmd, String src,
+      String dst, FileStatus status) {
     final String ugiStr = ugi.toString();
     for (AuditLogger logger : auditLoggers) {
       if (logger instanceof HdfsAuditLogger) {
@@ -1725,7 +1733,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   void setPermission(String src, FsPermission permission) throws IOException {
     final String operationName = "setPermission";
-    HdfsFileStatus auditStat;
+    FileStatus auditStat;
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -1749,7 +1757,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   void setOwner(String src, String username, String group)
       throws IOException {
     final String operationName = "setOwner";
-    HdfsFileStatus auditStat;
+    FileStatus auditStat;
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -1886,7 +1894,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   void concat(String target, String [] srcs, boolean logRetryCache)
       throws IOException {
     final String operationName = "concat";
-    HdfsFileStatus stat = null;
+    FileStatus stat = null;
     boolean success = false;
     writeLock();
     try {
@@ -1914,7 +1922,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   void setTimes(String src, long mtime, long atime) throws IOException {
     final String operationName = "setTimes";
-    HdfsFileStatus auditStat;
+    FileStatus auditStat;
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -1989,7 +1997,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     if (!FileSystem.areSymlinksEnabled()) {
       throw new UnsupportedOperationException("Symlinks not supported");
     }
-    HdfsFileStatus auditStat = null;
+    FileStatus auditStat = null;
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2050,7 +2058,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   void setStoragePolicy(String src, String policyName) throws IOException {
     final String operationName = "setStoragePolicy";
-    HdfsFileStatus auditStat;
+    FileStatus auditStat;
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -2075,7 +2083,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   void unsetStoragePolicy(String src) throws IOException {
     final String operationName = "unsetStoragePolicy";
-    HdfsFileStatus auditStat;
+    FileStatus auditStat;
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -2195,7 +2203,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       logAuditEvent(false, "create", src);
       throw e;
     }
-    logAuditEvent(true, "create", src, null, status);
+    logAuditEvent(true, "create", src, status);
     return status;
   }
 
@@ -2954,7 +2962,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   boolean mkdirs(String src, PermissionStatus permissions,
       boolean createParent) throws IOException {
     final String operationName = "mkdirs";
-    HdfsFileStatus auditStat = null;
+    FileStatus auditStat = null;
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -6589,7 +6597,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   void modifyAclEntries(final String src, List<AclEntry> aclSpec)
       throws IOException {
     final String operationName = "modifyAclEntries";
-    HdfsFileStatus auditStat = null;
+    FileStatus auditStat = null;
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -6610,7 +6618,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       throws IOException {
     final String operationName = "removeAclEntries";
     checkOperation(OperationCategory.WRITE);
-    HdfsFileStatus auditStat = null;
+    FileStatus auditStat = null;
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -6628,7 +6636,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   void removeDefaultAcl(final String src) throws IOException {
     final String operationName = "removeDefaultAcl";
-    HdfsFileStatus auditStat = null;
+    FileStatus auditStat = null;
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -6647,7 +6655,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   void removeAcl(final String src) throws IOException {
     final String operationName = "removeAcl";
-    HdfsFileStatus auditStat = null;
+    FileStatus auditStat = null;
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -6666,7 +6674,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   void setAcl(final String src, List<AclEntry> aclSpec) throws IOException {
     final String operationName = "setAcl";
-    HdfsFileStatus auditStat = null;
+    FileStatus auditStat = null;
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -6722,7 +6730,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       checkSuperuserPrivilege();
       FSPermissionChecker pc = getPermissionChecker();
       checkOperation(OperationCategory.WRITE);
-      final HdfsFileStatus resultingStat;
+      final FileStatus resultingStat;
       writeLock();
       try {
         checkSuperuserPrivilege();
@@ -6753,7 +6761,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   EncryptionZone getEZForPath(final String srcArg)
     throws AccessControlException, UnresolvedLinkException, IOException {
     final String operationName = "getEZForPath";
-    HdfsFileStatus resultingStat = null;
+    FileStatus resultingStat = null;
     boolean success = false;
     EncryptionZone encryptionZone;
     final FSPermissionChecker pc = getPermissionChecker();
@@ -6761,7 +6769,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      Entry<EncryptionZone, HdfsFileStatus> ezForPath = FSDirEncryptionZoneOp
+      Entry<EncryptionZone, FileStatus> ezForPath = FSDirEncryptionZoneOp
           .getEZForPath(dir, srcArg, pc);
       success = true;
       resultingStat = ezForPath.getValue();
@@ -6810,7 +6818,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       UnresolvedLinkException, SafeModeException, AccessControlException {
     final String operationName = "setErasureCodingPolicy";
     checkOperation(OperationCategory.WRITE);
-    HdfsFileStatus resultingStat = null;
+    FileStatus resultingStat = null;
     final FSPermissionChecker pc = getPermissionChecker();
     boolean success = false;
     writeLock();
@@ -6868,7 +6876,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       UnresolvedLinkException, SafeModeException, AccessControlException {
     final String operationName = "unsetErasureCodingPolicy";
     checkOperation(OperationCategory.WRITE);
-    HdfsFileStatus resultingStat = null;
+    FileStatus resultingStat = null;
     final FSPermissionChecker pc = getPermissionChecker();
     boolean success = false;
     writeLock();
@@ -6926,7 +6934,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
                 boolean logRetryCache)
       throws IOException {
     final String operationName = "setXAttr";
-    HdfsFileStatus auditStat = null;
+    FileStatus auditStat = null;
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -6982,7 +6990,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   void removeXAttr(String src, XAttr xAttr, boolean logRetryCache)
       throws IOException {
     final String operationName = "removeXAttr";
-    HdfsFileStatus auditStat = null;
+    FileStatus auditStat = null;
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/29] hadoop git commit: HDFS-10675. Datanode support to read from external stores.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index adec209..15e71f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
+import org.apache.hadoop.hdfs.server.common.FileRegion;
 import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
@@ -241,10 +242,11 @@ public interface FsVolumeSpi
 
     private final FsVolumeSpi volume;
 
+    private final FileRegion fileRegion;
     /**
      * Get the file's length in async block scan
      */
-    private final long blockFileLength;
+    private final long blockLength;
 
     private final static Pattern CONDENSED_PATH_REGEX =
         Pattern.compile("(?<!^)(\\\\|/){2,}");
@@ -294,13 +296,30 @@ public interface FsVolumeSpi
      */
     public ScanInfo(long blockId, File blockFile, File metaFile,
         FsVolumeSpi vol) {
+      this(blockId, blockFile, metaFile, vol, null,
+          (blockFile != null) ? blockFile.length() : 0);
+    }
+
+    /**
+     * Create a ScanInfo object for a block. This constructor will examine
+     * the block data and meta-data files.
+     *
+     * @param blockId the block ID
+     * @param blockFile the path to the block data file
+     * @param metaFile the path to the block meta-data file
+     * @param vol the volume that contains the block
+     * @param fileRegion the file region (for provided blocks)
+     * @param length the length of the block data
+     */
+    public ScanInfo(long blockId, File blockFile, File metaFile,
+        FsVolumeSpi vol, FileRegion fileRegion, long length) {
       this.blockId = blockId;
       String condensedVolPath =
           (vol == null || vol.getBaseURI() == null) ? null :
             getCondensedPath(new File(vol.getBaseURI()).getAbsolutePath());
       this.blockSuffix = blockFile == null ? null :
         getSuffix(blockFile, condensedVolPath);
-      this.blockFileLength = (blockFile != null) ? blockFile.length() : 0;
+      this.blockLength = length;
       if (metaFile == null) {
         this.metaSuffix = null;
       } else if (blockFile == null) {
@@ -310,6 +329,7 @@ public interface FsVolumeSpi
             condensedVolPath + blockSuffix);
       }
       this.volume = vol;
+      this.fileRegion = fileRegion;
     }
 
     /**
@@ -328,8 +348,8 @@ public interface FsVolumeSpi
      *
      * @return the length of the data block
      */
-    public long getBlockFileLength() {
-      return blockFileLength;
+    public long getBlockLength() {
+      return blockLength;
     }
 
     /**
@@ -399,6 +419,10 @@ public interface FsVolumeSpi
           getMetaFile().getName()) :
             HdfsConstants.GRANDFATHER_GENERATION_STAMP;
     }
+
+    public FileRegion getFileRegion() {
+      return fileRegion;
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/DefaultProvidedVolumeDF.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/DefaultProvidedVolumeDF.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/DefaultProvidedVolumeDF.java
new file mode 100644
index 0000000..24921c4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/DefaultProvidedVolumeDF.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * The default usage statistics for a provided volume.
+ */
+public class DefaultProvidedVolumeDF
+    implements ProvidedVolumeDF, Configurable {
+
+  @Override
+  public void setConf(Configuration conf) {
+  }
+
+  @Override
+  public Configuration getConf() {
+    return null;
+  }
+
+  @Override
+  public long getCapacity() {
+    return Long.MAX_VALUE;
+  }
+
+  @Override
+  public long getSpaceUsed() {
+    return 0;
+  }
+
+  @Override
+  public long getBlockPoolUsed(String bpid) {
+    return 0;
+  }
+
+  @Override
+  public long getAvailable() {
+    return Long.MAX_VALUE;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index e7d4d25..f740c1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -85,6 +85,7 @@ import org.apache.hadoop.hdfs.server.datanode.UnexpectedReplicaStateException;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
@@ -1702,6 +1703,10 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
       Set<String> missingVolumesReported = new HashSet<>();
       for (ReplicaInfo b : volumeMap.replicas(bpid)) {
+        //skip blocks in PROVIDED storage
+        if (b.getVolume().getStorageType() == StorageType.PROVIDED) {
+          continue;
+        }
         String volStorageID = b.getVolume().getStorageID();
         if (!builders.containsKey(volStorageID)) {
           if (!missingVolumesReported.contains(volStorageID)) {
@@ -1837,7 +1842,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     try (AutoCloseableLock lock = datasetLock.acquire()) {
       r = volumeMap.get(bpid, blockId);
     }
-
     if (r != null) {
       if (r.blockDataExists()) {
         return r;
@@ -2178,13 +2182,20 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    * @param vol Volume of the block file
    */
   @Override
-  public void checkAndUpdate(String bpid, long blockId, File diskFile,
-      File diskMetaFile, FsVolumeSpi vol) throws IOException {
+  public void checkAndUpdate(String bpid, ScanInfo scanInfo)
+      throws IOException {
+
+    long blockId = scanInfo.getBlockId();
+    File diskFile = scanInfo.getBlockFile();
+    File diskMetaFile = scanInfo.getMetaFile();
+    FsVolumeSpi vol = scanInfo.getVolume();
+
     Block corruptBlock = null;
     ReplicaInfo memBlockInfo;
     try (AutoCloseableLock lock = datasetLock.acquire()) {
       memBlockInfo = volumeMap.get(bpid, blockId);
-      if (memBlockInfo != null && memBlockInfo.getState() != ReplicaState.FINALIZED) {
+      if (memBlockInfo != null &&
+          memBlockInfo.getState() != ReplicaState.FINALIZED) {
         // Block is not finalized - ignore the difference
         return;
       }
@@ -2199,6 +2210,26 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           Block.getGenerationStamp(diskMetaFile.getName()) :
           HdfsConstants.GRANDFATHER_GENERATION_STAMP;
 
+      if (vol.getStorageType() == StorageType.PROVIDED) {
+        if (memBlockInfo == null) {
+          //replica exists on provided store but not in memory
+          ReplicaInfo diskBlockInfo =
+              new ReplicaBuilder(ReplicaState.FINALIZED)
+              .setFileRegion(scanInfo.getFileRegion())
+              .setFsVolume(vol)
+              .setConf(conf)
+              .build();
+
+          volumeMap.add(bpid, diskBlockInfo);
+          LOG.warn("Added missing block to memory " + diskBlockInfo);
+        } else {
+          //replica exists in memory but not in the provided store
+          volumeMap.remove(bpid, blockId);
+          LOG.warn("Deleting missing provided block " + memBlockInfo);
+        }
+        return;
+      }
+
       if (!diskFileExists) {
         if (memBlockInfo == null) {
           // Block file does not exist and block does not exist in memory
@@ -2974,7 +3005,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           newReplicaInfo =
               replicaState.getLazyPersistVolume().activateSavedReplica(bpid,
                   replicaInfo, replicaState);
-
           // Update the volumeMap entry.
           volumeMap.add(bpid, newReplicaInfo);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
index 32759c4..9f115a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileDescriptor;
 import java.io.FileInputStream;
@@ -32,10 +34,12 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.DataChecksum;
 
 /** Utility methods. */
 @InterfaceAudience.Private
@@ -44,6 +48,22 @@ public class FsDatasetUtil {
     return f.getName().endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX);
   }
 
+  public static byte[] createNullChecksumByteArray() {
+    DataChecksum csum =
+        DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 512);
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    DataOutputStream dataOut = new DataOutputStream(out);
+    try {
+      BlockMetadataHeader.writeHeader(dataOut, csum);
+      dataOut.close();
+    } catch (IOException e) {
+      FsVolumeImpl.LOG.error(
+          "Exception in creating null checksum stream: " + e);
+      return null;
+    }
+    return out.toByteArray();
+  }
+
   static File getOrigFile(File unlinkTmpFile) {
     final String name = unlinkTmpFile.getName();
     if (!name.endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX)) {
@@ -135,8 +155,9 @@ public class FsDatasetUtil {
    * Compute the checksum for a block file that does not already have
    * its checksum computed, and save it to dstMeta file.
    */
-  public static void computeChecksum(File srcMeta, File dstMeta, File blockFile,
-      int smallBufferSize, Configuration conf) throws IOException {
+  public static void computeChecksum(File srcMeta, File dstMeta,
+      File blockFile, int smallBufferSize, Configuration conf)
+          throws IOException {
     Preconditions.checkNotNull(srcMeta);
     Preconditions.checkNotNull(dstMeta);
     Preconditions.checkNotNull(blockFile);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index b948fb7..267a5cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -155,18 +155,24 @@ public class FsVolumeImpl implements FsVolumeSpi {
     this.reservedForReplicas = new AtomicLong(0L);
     this.storageLocation = sd.getStorageLocation();
     this.currentDir = sd.getCurrentDir();
-    File parent = currentDir.getParentFile();
-    this.usage = new DF(parent, conf);
     this.storageType = storageLocation.getStorageType();
     this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
         + "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
         DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
         DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
     this.configuredCapacity = -1;
+    if (currentDir != null) {
+      File parent = currentDir.getParentFile();
+      this.usage = new DF(parent, conf);
+      cacheExecutor = initializeCacheExecutor(parent);
+      this.metrics = DataNodeVolumeMetrics.create(conf, parent.getPath());
+    } else {
+      this.usage = null;
+      cacheExecutor = null;
+      this.metrics = null;
+    }
     this.conf = conf;
     this.fileIoProvider = fileIoProvider;
-    cacheExecutor = initializeCacheExecutor(parent);
-    this.metrics = DataNodeVolumeMetrics.create(conf, getBaseURI().getPath());
   }
 
   protected ThreadPoolExecutor initializeCacheExecutor(File parent) {
@@ -446,7 +452,8 @@ public class FsVolumeImpl implements FsVolumeSpi {
   /**
    * Unplanned Non-DFS usage, i.e. Extra usage beyond reserved.
    *
-   * @return
+   * @return Disk usage excluding space used by HDFS and excluding space
+   * reserved for blocks open for write.
    * @throws IOException
    */
   public long getNonDfsUsed() throws IOException {
@@ -524,7 +531,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   public String[] getBlockPoolList() {
     return bpSlices.keySet().toArray(new String[bpSlices.keySet().size()]);   
   }
-    
+
   /**
    * Temporary files. They get moved to the finalized block directory when
    * the block is finalized.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
index 427f81b..2da9170 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
 
@@ -67,6 +68,11 @@ public class FsVolumeImplBuilder {
   }
 
   FsVolumeImpl build() throws IOException {
+    if (sd.getStorageLocation().getStorageType() == StorageType.PROVIDED) {
+      return new ProvidedVolumeImpl(dataset, storageID, sd,
+          fileIoProvider != null ? fileIoProvider :
+            new FileIoProvider(null, null), conf);
+    }
     return new FsVolumeImpl(
         dataset, storageID, sd,
         fileIoProvider != null ? fileIoProvider :

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeDF.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeDF.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeDF.java
new file mode 100644
index 0000000..4d28883
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeDF.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+/**
+ * This interface is used to define the usage statistics
+ * of the provided storage.
+ */
+public interface ProvidedVolumeDF {
+
+  long getCapacity();
+
+  long getSpaceUsed();
+
+  long getBlockPoolUsed(String bpid);
+
+  long getAvailable();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
new file mode 100644
index 0000000..a48e117
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
@@ -0,0 +1,526 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Set;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.common.FileRegion;
+import org.apache.hadoop.hdfs.server.common.FileRegionProvider;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.common.TextFileRegionProvider;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
+import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
+import org.apache.hadoop.util.Timer;
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.AutoCloseableLock;
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
+import org.codehaus.jackson.map.ObjectWriter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Time;
+
+/**
+ * This class is used to create provided volumes.
+ */
+public class ProvidedVolumeImpl extends FsVolumeImpl {
+
+  static class ProvidedBlockPoolSlice {
+    private FsVolumeImpl providedVolume;
+
+    private FileRegionProvider provider;
+    private Configuration conf;
+    private String bpid;
+    private ReplicaMap bpVolumeMap;
+
+    ProvidedBlockPoolSlice(String bpid, ProvidedVolumeImpl volume,
+        Configuration conf) {
+      this.providedVolume = volume;
+      bpVolumeMap = new ReplicaMap(new AutoCloseableLock());
+      Class<? extends FileRegionProvider> fmt =
+          conf.getClass(DFSConfigKeys.DFS_PROVIDER_CLASS,
+              TextFileRegionProvider.class, FileRegionProvider.class);
+      provider = ReflectionUtils.newInstance(fmt, conf);
+      this.conf = conf;
+      this.bpid = bpid;
+      bpVolumeMap.initBlockPool(bpid);
+      LOG.info("Created provider: " + provider.getClass());
+    }
+
+    FileRegionProvider getFileRegionProvider() {
+      return provider;
+    }
+
+    public void getVolumeMap(ReplicaMap volumeMap,
+        RamDiskReplicaTracker ramDiskReplicaMap) throws IOException {
+      Iterator<FileRegion> iter = provider.iterator();
+      while(iter.hasNext()) {
+        FileRegion region = iter.next();
+        if (region.getBlockPoolId() != null &&
+            region.getBlockPoolId().equals(bpid)) {
+          ReplicaInfo newReplica = new ReplicaBuilder(ReplicaState.FINALIZED)
+              .setBlockId(region.getBlock().getBlockId())
+              .setURI(region.getPath().toUri())
+              .setOffset(region.getOffset())
+              .setLength(region.getBlock().getNumBytes())
+              .setGenerationStamp(region.getBlock().getGenerationStamp())
+              .setFsVolume(providedVolume)
+              .setConf(conf).build();
+
+          ReplicaInfo oldReplica =
+              volumeMap.get(bpid, newReplica.getBlockId());
+          if (oldReplica == null) {
+            volumeMap.add(bpid, newReplica);
+            bpVolumeMap.add(bpid, newReplica);
+          } else {
+            throw new IOException(
+                "A block with id " + newReplica.getBlockId() +
+                " already exists in the volumeMap");
+          }
+        }
+      }
+    }
+
+    public boolean isEmpty() {
+      return bpVolumeMap.replicas(bpid).size() == 0;
+    }
+
+    public void shutdown(BlockListAsLongs blocksListsAsLongs) {
+      //nothing to do!
+    }
+
+    public void compileReport(LinkedList<ScanInfo> report,
+        ReportCompiler reportCompiler)
+            throws IOException, InterruptedException {
+      /* refresh the provider and return the list of blocks found.
+       * the assumption here is that the block ids in the external
+       * block map, after the refresh, are consistent with those
+       * from before the refresh, i.e., for blocks which did not change,
+       * the ids remain the same.
+       */
+      provider.refresh();
+      Iterator<FileRegion> iter = provider.iterator();
+      while(iter.hasNext()) {
+        reportCompiler.throttle();
+        FileRegion region = iter.next();
+        if (region.getBlockPoolId().equals(bpid)) {
+          LOG.info("Adding ScanInfo for blkid " +
+              region.getBlock().getBlockId());
+          report.add(new ScanInfo(region.getBlock().getBlockId(), null, null,
+              providedVolume, region, region.getLength()));
+        }
+      }
+    }
+  }
+
+  private URI baseURI;
+  private final Map<String, ProvidedBlockPoolSlice> bpSlices =
+      new ConcurrentHashMap<String, ProvidedBlockPoolSlice>();
+
+  private ProvidedVolumeDF df;
+
+  ProvidedVolumeImpl(FsDatasetImpl dataset, String storageID,
+      StorageDirectory sd, FileIoProvider fileIoProvider,
+      Configuration conf) throws IOException {
+    super(dataset, storageID, sd, fileIoProvider, conf);
+    assert getStorageLocation().getStorageType() == StorageType.PROVIDED:
+      "Only provided storages must use ProvidedVolume";
+
+    baseURI = getStorageLocation().getUri();
+    Class<? extends ProvidedVolumeDF> dfClass =
+        conf.getClass(DFSConfigKeys.DFS_PROVIDER_DF_CLASS,
+            DefaultProvidedVolumeDF.class, ProvidedVolumeDF.class);
+    df = ReflectionUtils.newInstance(dfClass, conf);
+  }
+
+  @Override
+  public String[] getBlockPoolList() {
+    return bpSlices.keySet().toArray(new String[bpSlices.keySet().size()]);
+  }
+
+  @Override
+  public long getCapacity() {
+    if (configuredCapacity < 0) {
+      return df.getCapacity();
+    }
+    return configuredCapacity;
+  }
+
+  @Override
+  public long getDfsUsed() throws IOException {
+    return df.getSpaceUsed();
+  }
+
+  @Override
+  long getBlockPoolUsed(String bpid) throws IOException {
+    return df.getBlockPoolUsed(bpid);
+  }
+
+  @Override
+  public long getAvailable() throws IOException {
+    return df.getAvailable();
+  }
+
+  @Override
+  long getActualNonDfsUsed() throws IOException {
+    return df.getSpaceUsed();
+  }
+
+  @Override
+  public long getNonDfsUsed() throws IOException {
+    return 0L;
+  }
+
+  @Override
+  public URI getBaseURI() {
+    return baseURI;
+  }
+
+  @Override
+  public File getFinalizedDir(String bpid) throws IOException {
+    return null;
+  }
+
+  @Override
+  public void reserveSpaceForReplica(long bytesToReserve) {
+    throw new UnsupportedOperationException(
+        "ProvidedVolume does not yet support writes");
+  }
+
+  @Override
+  public void releaseReservedSpace(long bytesToRelease) {
+    throw new UnsupportedOperationException(
+        "ProvidedVolume does not yet support writes");
+  }
+
+  private static final ObjectWriter WRITER =
+      new ObjectMapper().writerWithDefaultPrettyPrinter();
+  private static final ObjectReader READER =
+      new ObjectMapper().reader(ProvidedBlockIteratorState.class);
+
+  private static class ProvidedBlockIteratorState {
+    ProvidedBlockIteratorState() {
+      iterStartMs = Time.now();
+      lastSavedMs = iterStartMs;
+      atEnd = false;
+      lastBlockId = -1;
+    }
+
+    // The wall-clock ms since the epoch at which this iterator was last saved.
+    @JsonProperty
+    private long lastSavedMs;
+
+    // The wall-clock ms since the epoch at which this iterator was created.
+    @JsonProperty
+    private long iterStartMs;
+
+    @JsonProperty
+    private boolean atEnd;
+
+    //The id of the last block read when the state of the iterator is saved.
+    //This implementation assumes that provided blocks are returned
+    //in sorted order of the block ids.
+    @JsonProperty
+    private long lastBlockId;
+  }
+
+  private class ProviderBlockIteratorImpl
+      implements FsVolumeSpi.BlockIterator {
+
+    private String bpid;
+    private String name;
+    private FileRegionProvider provider;
+    private Iterator<FileRegion> blockIterator;
+    private ProvidedBlockIteratorState state;
+
+    ProviderBlockIteratorImpl(String bpid, String name,
+        FileRegionProvider provider) {
+      this.bpid = bpid;
+      this.name = name;
+      this.provider = provider;
+      rewind();
+    }
+
+    @Override
+    public void close() throws IOException {
+      //No action needed
+    }
+
+    @Override
+    public ExtendedBlock nextBlock() throws IOException {
+      if (null == blockIterator || !blockIterator.hasNext()) {
+        return null;
+      }
+      FileRegion nextRegion = null;
+      while (null == nextRegion && blockIterator.hasNext()) {
+        FileRegion temp = blockIterator.next();
+        if (temp.getBlock().getBlockId() < state.lastBlockId) {
+          continue;
+        }
+        if (temp.getBlockPoolId().equals(bpid)) {
+          nextRegion = temp;
+        }
+      }
+      if (null == nextRegion) {
+        return null;
+      }
+      state.lastBlockId = nextRegion.getBlock().getBlockId();
+      return new ExtendedBlock(bpid, nextRegion.getBlock());
+    }
+
+    @Override
+    public boolean atEnd() {
+      return blockIterator != null ? !blockIterator.hasNext(): true;
+    }
+
+    @Override
+    public void rewind() {
+      blockIterator = provider.iterator();
+      state = new ProvidedBlockIteratorState();
+    }
+
+    @Override
+    public void save() throws IOException {
+      //We do not persist the state of this iterator anywhere, locally.
+      //We just re-scan provided volumes as necessary.
+      state.lastSavedMs = Time.now();
+    }
+
+    @Override
+    public void setMaxStalenessMs(long maxStalenessMs) {
+      //do not use max staleness
+    }
+
+    @Override
+    public long getIterStartMs() {
+      return state.iterStartMs;
+    }
+
+    @Override
+    public long getLastSavedMs() {
+      return state.lastSavedMs;
+    }
+
+    @Override
+    public String getBlockPoolId() {
+      return bpid;
+    }
+
+    public void load() throws IOException {
+      //on load, we just rewind the iterator for provided volumes.
+      rewind();
+      LOG.trace("load({}, {}): loaded iterator {}: {}", getStorageID(),
+          bpid, name, WRITER.writeValueAsString(state));
+    }
+  }
+
+  @Override
+  public BlockIterator newBlockIterator(String bpid, String name) {
+    return new ProviderBlockIteratorImpl(bpid, name,
+        bpSlices.get(bpid).getFileRegionProvider());
+  }
+
+  @Override
+  public BlockIterator loadBlockIterator(String bpid, String name)
+      throws IOException {
+    ProviderBlockIteratorImpl iter = new ProviderBlockIteratorImpl(bpid, name,
+        bpSlices.get(bpid).getFileRegionProvider());
+    iter.load();
+    return iter;
+  }
+
+  @Override
+  ReplicaInfo addFinalizedBlock(String bpid, Block b,
+      ReplicaInfo replicaInfo, long bytesReserved) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedVolume does not yet support writes");
+  }
+
+  @Override
+  public VolumeCheckResult check(VolumeCheckContext ignored)
+      throws DiskErrorException {
+    return VolumeCheckResult.HEALTHY;
+  }
+
+  @Override
+  void getVolumeMap(ReplicaMap volumeMap,
+      final RamDiskReplicaTracker ramDiskReplicaMap)
+          throws IOException {
+    LOG.info("Creating volumemap for provided volume " + this);
+    for(ProvidedBlockPoolSlice s : bpSlices.values()) {
+      s.getVolumeMap(volumeMap, ramDiskReplicaMap);
+    }
+  }
+
+  private ProvidedBlockPoolSlice getProvidedBlockPoolSlice(String bpid)
+      throws IOException {
+    ProvidedBlockPoolSlice bp = bpSlices.get(bpid);
+    if (bp == null) {
+      throw new IOException("block pool " + bpid + " is not found");
+    }
+    return bp;
+  }
+
+  @Override
+  void getVolumeMap(String bpid, ReplicaMap volumeMap,
+      final RamDiskReplicaTracker ramDiskReplicaMap)
+          throws IOException {
+    getProvidedBlockPoolSlice(bpid).getVolumeMap(volumeMap, ramDiskReplicaMap);
+  }
+
+  @VisibleForTesting
+  FileRegionProvider getFileRegionProvider(String bpid) throws IOException {
+    return getProvidedBlockPoolSlice(bpid).getFileRegionProvider();
+  }
+
+  @Override
+  public String toString() {
+    return this.baseURI.toString();
+  }
+
+  @Override
+  void addBlockPool(String bpid, Configuration conf) throws IOException {
+    addBlockPool(bpid, conf, null);
+  }
+
+  @Override
+  void addBlockPool(String bpid, Configuration conf, Timer timer)
+      throws IOException {
+    LOG.info("Adding block pool " + bpid +
+        " to volume with id " + getStorageID());
+    ProvidedBlockPoolSlice bp;
+    bp = new ProvidedBlockPoolSlice(bpid, this, conf);
+    bpSlices.put(bpid, bp);
+  }
+
+  void shutdown() {
+    if (cacheExecutor != null) {
+      cacheExecutor.shutdown();
+    }
+    Set<Entry<String, ProvidedBlockPoolSlice>> set = bpSlices.entrySet();
+    for (Entry<String, ProvidedBlockPoolSlice> entry : set) {
+      entry.getValue().shutdown(null);
+    }
+  }
+
+  @Override
+  void shutdownBlockPool(String bpid, BlockListAsLongs blocksListsAsLongs) {
+    ProvidedBlockPoolSlice bp = bpSlices.get(bpid);
+    if (bp != null) {
+      bp.shutdown(blocksListsAsLongs);
+    }
+    bpSlices.remove(bpid);
+  }
+
+  @Override
+  boolean isBPDirEmpty(String bpid) throws IOException {
+    return getProvidedBlockPoolSlice(bpid).isEmpty();
+  }
+
+  @Override
+  void deleteBPDirectories(String bpid, boolean force) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedVolume does not yet support writes");
+  }
+
+  @Override
+  public LinkedList<ScanInfo> compileReport(String bpid,
+      LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+          throws InterruptedException, IOException {
+    LOG.info("Compiling report for volume: " + this + " bpid " + bpid);
+    //get the report from the appropriate block pool.
+    if(bpSlices.containsKey(bpid)) {
+      bpSlices.get(bpid).compileReport(report, reportCompiler);
+    }
+    return report;
+  }
+
+  @Override
+  public ReplicaInPipeline append(String bpid, ReplicaInfo replicaInfo,
+      long newGS, long estimateBlockLen) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedVolume does not yet support writes");
+  }
+
+  @Override
+  public ReplicaInPipeline createRbw(ExtendedBlock b) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedVolume does not yet support writes");
+  }
+
+  @Override
+  public ReplicaInPipeline convertTemporaryToRbw(ExtendedBlock b,
+      ReplicaInfo temp) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedVolume does not yet support writes");
+  }
+
+  @Override
+  public ReplicaInPipeline createTemporary(ExtendedBlock b)
+      throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedVolume does not yet support writes");
+  }
+
+  @Override
+  public ReplicaInPipeline updateRURCopyOnTruncate(ReplicaInfo rur,
+      String bpid, long newBlockId, long recoveryId, long newlength)
+          throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedVolume does not yet support writes");
+  }
+
+  @Override
+  public ReplicaInfo moveBlockToTmpLocation(ExtendedBlock block,
+      ReplicaInfo replicaInfo, int smallBufferSize,
+      Configuration conf) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedVolume does not yet support writes");
+  }
+
+  @Override
+  public File[] copyBlockToLazyPersistLocation(String bpId, long blockId,
+      long genStamp, ReplicaInfo replicaInfo, int smallBufferSize,
+      Configuration conf) throws IOException {
+    throw new UnsupportedOperationException(
+        "ProvidedVolume does not yet support writes");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index 7eac87d..24ef80c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -685,7 +685,7 @@ public class Mover {
     }
   }
 
-  static class Cli extends Configured implements Tool {
+  public static class Cli extends Configured implements Tool {
     private static final String USAGE = "Usage: hdfs mover "
         + "[-p <files/dirs> | -f <local file>]"
         + "\n\t-p <files/dirs>\ta space separated list of HDFS files/dirs to migrate."

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageCompression.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageCompression.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageCompression.java
index 872ee74..45e001d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageCompression.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageCompression.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.io.compress.CompressionCodecFactory;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-class FSImageCompression {
+public class FSImageCompression {
 
   /** Codec to use to save or load image, or null if the image is not compressed */
   private CompressionCodec imageCodec;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 63d1a28..4aae7d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -658,6 +658,10 @@ public class NNStorage extends Storage implements Closeable,
   void readProperties(StorageDirectory sd, StartupOption startupOption)
       throws IOException {
     Properties props = readPropertiesFile(sd.getVersionFile());
+    if (props == null) {
+      throw new IOException(
+          "Properties not found  for storage directory " + sd);
+    }
     if (HdfsServerConstants.RollingUpgradeStartupOption.ROLLBACK
         .matches(startupOption)) {
       int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
@@ -975,7 +979,11 @@ public class NNStorage extends Storage implements Closeable,
       StorageDirectory sd = sdit.next();
       try {
         Properties props = readPropertiesFile(sd.getVersionFile());
-        cid = props.getProperty("clusterID");
+        if (props == null) {
+          cid = null;
+        } else {
+          cid = props.getProperty("clusterID");
+        }
         LOG.info("current cluster id for sd="+sd.getCurrentDir() + 
             ";lv=" + layoutVersion + ";cid=" + cid);
         

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index f0f2220..6df243f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4461,6 +4461,84 @@
   </property>
 
   <property>
+    <name>dfs.provider.class</name>
+    <value>org.apache.hadoop.hdfs.server.common.TextFileRegionProvider</value>
+    <description>
+        The class that is used to load information about blocks stored in
+        provided storages.
+        org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TextFileRegionProvider
+        is used as the default, which expects the blocks to be specified
+        using a delimited text file.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.provided.df.class</name>
+    <value>org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.DefaultProvidedVolumeDF</value>
+    <description>
+        The class that is used to measure usage statistics of provided stores.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.provided.storage.id</name>
+    <value>DS-PROVIDED</value>
+    <description>
+        The storage ID used for provided stores.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.provided.blockformat.class</name>
+    <value>org.apache.hadoop.hdfs.server.common.TextFileRegionFormat</value>
+    <description>
+      The class that is used to specify the input format of the blocks on
+      provided storages. The default is
+      org.apache.hadoop.hdfs.server.common.TextFileRegionFormat which uses
+      file regions to describe blocks. The file regions are specified as a
+      delimited text file. Each file region is a 6-tuple containing the
+      block id, remote file path, offset into file, length of block, the
+      block pool id containing the block, and the generation stamp of the
+      block.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.provided.textprovider.delimiter</name>
+    <value>,</value>
+    <description>
+        The delimiter used when the provided block map is specified as
+        a text file.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.provided.textprovider.read.path</name>
+    <value></value>
+    <description>
+        The path specifying the provided block map as a text file, specified as
+        a URI.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.provided.textprovider.read.codec</name>
+    <value></value>
+    <description>
+        The codec used to de-compress the provided block map.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.provided.textprovider.write.path</name>
+    <value></value>
+    <description>
+        The path to which the provided block map should be written as a text
+        file, specified as a URI.
+    </description>
+  </property>
+
+  <property>
     <name>dfs.lock.suppress.warning.interval</name>
     <value>10s</value>
     <description>Instrumentation reporting long critical sections will suppress

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
index 25eb5b6..8bc8b0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
@@ -208,7 +208,7 @@ public class TestDFSRollback {
       UpgradeUtilities.createDataNodeVersionFile(
           dataCurrentDirs,
           storageInfo,
-          UpgradeUtilities.getCurrentBlockPoolID(cluster));
+          UpgradeUtilities.getCurrentBlockPoolID(cluster), conf);
 
       cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
       assertTrue(cluster.isDataNodeUp());
@@ -256,7 +256,7 @@ public class TestDFSRollback {
           NodeType.DATA_NODE);
       
       UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
-          UpgradeUtilities.getCurrentBlockPoolID(cluster));
+          UpgradeUtilities.getCurrentBlockPoolID(cluster), conf);
       
       startBlockPoolShouldFail(StartupOption.ROLLBACK, 
           cluster.getNamesystem().getBlockPoolId());
@@ -283,7 +283,7 @@ public class TestDFSRollback {
           NodeType.DATA_NODE);
      
       UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
-          UpgradeUtilities.getCurrentBlockPoolID(cluster));
+          UpgradeUtilities.getCurrentBlockPoolID(cluster), conf);
       
       startBlockPoolShouldFail(StartupOption.ROLLBACK, 
           cluster.getNamesystem().getBlockPoolId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
index d202223..0c09eda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
@@ -265,7 +265,7 @@ public class TestDFSStartupVersions {
           conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY), "current");
       log("DataNode version info", DATA_NODE, i, versions[i]);
       UpgradeUtilities.createDataNodeVersionFile(storage,
-          versions[i].storageInfo, bpid, versions[i].blockPoolId);
+          versions[i].storageInfo, bpid, versions[i].blockPoolId, conf);
       try {
         cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
       } catch (Exception ignore) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
index fe1ede0..0d9f502 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
@@ -290,7 +290,7 @@ public class TestDFSUpgrade {
           UpgradeUtilities.getCurrentFsscTime(cluster), NodeType.DATA_NODE);
       
       UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
-          UpgradeUtilities.getCurrentBlockPoolID(cluster));
+          UpgradeUtilities.getCurrentBlockPoolID(cluster), conf);
       
       startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities
           .getCurrentBlockPoolID(null));
@@ -308,7 +308,7 @@ public class TestDFSUpgrade {
           NodeType.DATA_NODE);
           
       UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, 
-          UpgradeUtilities.getCurrentBlockPoolID(cluster));
+          UpgradeUtilities.getCurrentBlockPoolID(cluster), conf);
       // Ensure corresponding block pool failed to initialized
       startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities
           .getCurrentBlockPoolID(null));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
index b0504f0..174dea8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
@@ -386,8 +386,10 @@ public class UpgradeUtilities {
           new File(datanodeStorage.toString()));
       sd.setStorageUuid(DatanodeStorage.generateUuid());
       Properties properties = Storage.readPropertiesFile(sd.getVersionFile());
-      properties.setProperty("storageID", sd.getStorageUuid());
-      Storage.writeProperties(sd.getVersionFile(), properties);
+      if (properties != null) {
+        properties.setProperty("storageID", sd.getStorageUuid());
+        Storage.writeProperties(sd.getVersionFile(), properties);
+      }
 
       retVal[i] = newDir;
     }
@@ -463,8 +465,9 @@ public class UpgradeUtilities {
    * @param bpid Block pool Id
    */
   public static void createDataNodeVersionFile(File[] parent,
-      StorageInfo version, String bpid) throws IOException {
-    createDataNodeVersionFile(parent, version, bpid, bpid);
+      StorageInfo version, String bpid, Configuration conf)
+          throws IOException {
+    createDataNodeVersionFile(parent, version, bpid, bpid, conf);
   }
   
   /**
@@ -479,7 +482,8 @@ public class UpgradeUtilities {
    * @param bpidToWrite Block pool Id to write into the version file
    */
   public static void createDataNodeVersionFile(File[] parent,
-      StorageInfo version, String bpid, String bpidToWrite) throws IOException {
+      StorageInfo version, String bpid, String bpidToWrite, Configuration conf)
+          throws IOException {
     DataStorage storage = new DataStorage(version);
     storage.setDatanodeUuid("FixedDatanodeUuid");
 
@@ -487,7 +491,7 @@ public class UpgradeUtilities {
     for (int i = 0; i < parent.length; i++) {
       File versionFile = new File(parent[i], "VERSION");
       StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
-      DataStorage.createStorageID(sd, false);
+      DataStorage.createStorageID(sd, false, conf);
       storage.writeProperties(versionFile, sd);
       versionFiles[i] = versionFile;
       File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestTextBlockFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestTextBlockFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestTextBlockFormat.java
new file mode 100644
index 0000000..eaaac22
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestTextBlockFormat.java
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.common;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStreamWriter;
+import java.util.Iterator;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.common.TextFileRegionFormat.*;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.compress.CompressionCodec;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+/**
+ * Test for the text based block format for provided block maps.
+ */
+public class TestTextBlockFormat {
+
+  static final Path OUTFILE = new Path("hdfs://dummyServer:0000/dummyFile.txt");
+
+  void check(TextWriter.Options opts, final Path vp,
+      final Class<? extends CompressionCodec> vc) throws IOException {
+    TextFileRegionFormat mFmt = new TextFileRegionFormat() {
+      @Override
+      public TextWriter createWriter(Path file, CompressionCodec codec,
+          String delim, Configuration conf) throws IOException {
+        assertEquals(vp, file);
+        if (null == vc) {
+          assertNull(codec);
+        } else {
+          assertEquals(vc, codec.getClass());
+        }
+        return null; // ignored
+      }
+    };
+    mFmt.getWriter(opts);
+  }
+
+  @Test
+  public void testWriterOptions() throws Exception {
+    TextWriter.Options opts = TextWriter.defaults();
+    assertTrue(opts instanceof WriterOptions);
+    WriterOptions wopts = (WriterOptions) opts;
+    Path def = new Path(DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_PATH_DEFAULT);
+    assertEquals(def, wopts.getFile());
+    assertNull(wopts.getCodec());
+
+    opts.filename(OUTFILE);
+    check(opts, OUTFILE, null);
+
+    opts.filename(OUTFILE);
+    opts.codec("gzip");
+    Path cp = new Path(OUTFILE.getParent(), OUTFILE.getName() + ".gz");
+    check(opts, cp, org.apache.hadoop.io.compress.GzipCodec.class);
+
+  }
+
+  @Test
+  public void testCSVReadWrite() throws Exception {
+    final DataOutputBuffer out = new DataOutputBuffer();
+    FileRegion r1 = new FileRegion(4344L, OUTFILE, 0, 1024);
+    FileRegion r2 = new FileRegion(4345L, OUTFILE, 1024, 1024);
+    FileRegion r3 = new FileRegion(4346L, OUTFILE, 2048, 512);
+    try (TextWriter csv = new TextWriter(new OutputStreamWriter(out), ",")) {
+      csv.store(r1);
+      csv.store(r2);
+      csv.store(r3);
+    }
+    Iterator<FileRegion> i3;
+    try (TextReader csv = new TextReader(null, null, null, ",") {
+      @Override
+      public InputStream createStream() {
+        DataInputBuffer in = new DataInputBuffer();
+        in.reset(out.getData(), 0, out.getLength());
+        return in;
+        }}) {
+      Iterator<FileRegion> i1 = csv.iterator();
+      assertEquals(r1, i1.next());
+      Iterator<FileRegion> i2 = csv.iterator();
+      assertEquals(r1, i2.next());
+      assertEquals(r2, i2.next());
+      assertEquals(r3, i2.next());
+      assertEquals(r2, i1.next());
+      assertEquals(r3, i1.next());
+
+      assertFalse(i1.hasNext());
+      assertFalse(i2.hasNext());
+      i3 = csv.iterator();
+    }
+    try {
+      i3.next();
+    } catch (IllegalStateException e) {
+      return;
+    }
+    fail("Invalid iterator");
+  }
+
+  @Test
+  public void testCSVReadWriteTsv() throws Exception {
+    final DataOutputBuffer out = new DataOutputBuffer();
+    FileRegion r1 = new FileRegion(4344L, OUTFILE, 0, 1024);
+    FileRegion r2 = new FileRegion(4345L, OUTFILE, 1024, 1024);
+    FileRegion r3 = new FileRegion(4346L, OUTFILE, 2048, 512);
+    try (TextWriter csv = new TextWriter(new OutputStreamWriter(out), "\t")) {
+      csv.store(r1);
+      csv.store(r2);
+      csv.store(r3);
+    }
+    Iterator<FileRegion> i3;
+    try (TextReader csv = new TextReader(null, null, null, "\t") {
+      @Override
+      public InputStream createStream() {
+        DataInputBuffer in = new DataInputBuffer();
+        in.reset(out.getData(), 0, out.getLength());
+        return in;
+      }}) {
+      Iterator<FileRegion> i1 = csv.iterator();
+      assertEquals(r1, i1.next());
+      Iterator<FileRegion> i2 = csv.iterator();
+      assertEquals(r1, i2.next());
+      assertEquals(r2, i2.next());
+      assertEquals(r3, i2.next());
+      assertEquals(r2, i1.next());
+      assertEquals(r3, i1.next());
+
+      assertFalse(i1.hasNext());
+      assertFalse(i2.hasNext());
+      i3 = csv.iterator();
+    }
+    try {
+      i3.next();
+    } catch (IllegalStateException e) {
+      return;
+    }
+    fail("Invalid iterator");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 18b4922..2c0775b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
@@ -616,7 +617,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
     this.datanode = datanode;
     if (storage != null) {
       for (int i = 0; i < storage.getNumStorageDirs(); ++i) {
-        DataStorage.createStorageID(storage.getStorageDir(i), false);
+        DataStorage.createStorageID(storage.getStorageDir(i), false, conf);
       }
       this.datanodeUuid = storage.getDatanodeUuid();
     } else {
@@ -1351,8 +1352,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   }
 
   @Override
-  public void checkAndUpdate(String bpid, long blockId, File diskFile,
-      File diskMetaFile, FsVolumeSpi vol) throws IOException {
+  public void checkAndUpdate(String bpid, ScanInfo info) throws IOException {
     throw new UnsupportedOperationException();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index 2e439d6..4539481 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.*;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
@@ -94,8 +95,8 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
   }
 
   @Override
-  public void checkAndUpdate(String bpid, long blockId, File diskFile,
-      File diskMetaFile, FsVolumeSpi vol) {
+  public void checkAndUpdate(String bpid, ScanInfo info) {
+    return;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 3293561..25ff1e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -119,11 +119,12 @@ public class TestFsDatasetImpl {
   
   private final static String BLOCKPOOL = "BP-TEST";
 
-  private static Storage.StorageDirectory createStorageDirectory(File root)
+  private static Storage.StorageDirectory createStorageDirectory(File root,
+      Configuration conf)
       throws SecurityException, IOException {
     Storage.StorageDirectory sd = new Storage.StorageDirectory(
         StorageLocation.parse(root.toURI().toString()));
-    DataStorage.createStorageID(sd, false);
+    DataStorage.createStorageID(sd, false, conf);
     return sd;
   }
 
@@ -137,7 +138,7 @@ public class TestFsDatasetImpl {
       File loc = new File(BASE_DIR + "/data" + i);
       dirStrings.add(new Path(loc.toString()).toUri().toString());
       loc.mkdirs();
-      dirs.add(createStorageDirectory(loc));
+      dirs.add(createStorageDirectory(loc, conf));
       when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
     }
 
@@ -197,7 +198,8 @@ public class TestFsDatasetImpl {
       String pathUri = new Path(path).toUri().toString();
       expectedVolumes.add(new File(pathUri).getAbsolutePath());
       StorageLocation loc = StorageLocation.parse(pathUri);
-      Storage.StorageDirectory sd = createStorageDirectory(new File(path));
+      Storage.StorageDirectory sd = createStorageDirectory(
+          new File(path), conf);
       DataStorage.VolumeBuilder builder =
           new DataStorage.VolumeBuilder(storage, sd);
       when(storage.prepareVolume(eq(datanode), eq(loc),
@@ -315,7 +317,8 @@ public class TestFsDatasetImpl {
     String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
     StorageLocation loc = StorageLocation.parse(newVolumePath);
 
-    Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
+    Storage.StorageDirectory sd = createStorageDirectory(
+        new File(newVolumePath), conf);
     DataStorage.VolumeBuilder builder =
         new DataStorage.VolumeBuilder(storage, sd);
     when(storage.prepareVolume(eq(datanode), eq(loc),
@@ -348,7 +351,7 @@ public class TestFsDatasetImpl {
         any(ReplicaMap.class),
         any(RamDiskReplicaLruTracker.class));
 
-    Storage.StorageDirectory sd = createStorageDirectory(badDir);
+    Storage.StorageDirectory sd = createStorageDirectory(badDir, conf);
     sd.lock();
     DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
     when(storage.prepareVolume(eq(datanode),
@@ -492,7 +495,7 @@ public class TestFsDatasetImpl {
     String path = BASE_DIR + "/newData0";
     String pathUri = new Path(path).toUri().toString();
     StorageLocation loc = StorageLocation.parse(pathUri);
-    Storage.StorageDirectory sd = createStorageDirectory(new File(path));
+    Storage.StorageDirectory sd = createStorageDirectory(new File(path), conf);
     DataStorage.VolumeBuilder builder =
         new DataStorage.VolumeBuilder(storage, sd);
     when(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
new file mode 100644
index 0000000..2c119fe
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
@@ -0,0 +1,426 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.nio.ByteBuffer;
+import java.nio.channels.Channels;
+import java.nio.channels.ReadableByteChannel;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.FileRegion;
+import org.apache.hadoop.hdfs.server.common.FileRegionProvider;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
+import org.apache.hadoop.hdfs.server.datanode.DNConf;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataStorage;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
+import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
+import org.apache.hadoop.util.AutoCloseableLock;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Basic test cases for provided implementation.
+ */
+public class TestProvidedImpl {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestFsDatasetImpl.class);
+  private static final String BASE_DIR =
+      new FileSystemTestHelper().getTestRootDir();
+  private static final int NUM_LOCAL_INIT_VOLUMES = 1;
+  private static final int NUM_PROVIDED_INIT_VOLUMES = 1;
+  private static final String[] BLOCK_POOL_IDS = {"bpid-0", "bpid-1"};
+  private static final int NUM_PROVIDED_BLKS = 10;
+  private static final long BLK_LEN = 128 * 1024;
+  private static final int MIN_BLK_ID = 0;
+  private static final int CHOSEN_BP_ID = 0;
+
+  private static String providedBasePath = BASE_DIR;
+
+  private Configuration conf;
+  private DataNode datanode;
+  private DataStorage storage;
+  private FsDatasetImpl dataset;
+  private static Map<Long, String> blkToPathMap;
+  private static List<FsVolumeImpl> providedVolumes;
+
+  /**
+   * A simple FileRegion iterator for tests.
+   */
+  public static class TestFileRegionIterator implements Iterator<FileRegion> {
+
+    private int numBlocks;
+    private int currentCount;
+    private String basePath;
+
+    public TestFileRegionIterator(String basePath, int minID, int numBlocks) {
+      this.currentCount = minID;
+      this.numBlocks = numBlocks;
+      this.basePath = basePath;
+    }
+
+    @Override
+    public boolean hasNext() {
+      return currentCount < numBlocks;
+    }
+
+    @Override
+    public FileRegion next() {
+      FileRegion region = null;
+      if (hasNext()) {
+        File newFile = new File(basePath, "file" + currentCount);
+        if(!newFile.exists()) {
+          try {
+            LOG.info("Creating file for blkid " + currentCount);
+            blkToPathMap.put((long) currentCount, newFile.getAbsolutePath());
+            LOG.info("Block id " + currentCount + " corresponds to file " +
+                newFile.getAbsolutePath());
+            newFile.createNewFile();
+            Writer writer = new OutputStreamWriter(
+                new FileOutputStream(newFile.getAbsolutePath()), "utf-8");
+            for(int i=0; i< BLK_LEN/(Integer.SIZE/8); i++) {
+              writer.write(currentCount);
+            }
+            writer.flush();
+            writer.close();
+          } catch (IOException e) {
+            e.printStackTrace();
+          }
+        }
+        region = new FileRegion(currentCount, new Path(newFile.toString()),
+            0, BLK_LEN, BLOCK_POOL_IDS[CHOSEN_BP_ID]);
+        currentCount++;
+      }
+      return region;
+    }
+
+    @Override
+    public void remove() {
+      //do nothing.
+    }
+
+    public void resetMinBlockId(int minId) {
+      currentCount = minId;
+    }
+
+    public void resetBlockCount(int numBlocks) {
+      this.numBlocks = numBlocks;
+    }
+
+  }
+
+  /**
+   * A simple FileRegion provider for tests.
+   */
+  public static class TestFileRegionProvider
+      extends FileRegionProvider implements Configurable {
+
+    private Configuration conf;
+    private int minId;
+    private int numBlocks;
+
+    TestFileRegionProvider() {
+      minId = MIN_BLK_ID;
+      numBlocks = NUM_PROVIDED_BLKS;
+    }
+
+    @Override
+    public Iterator<FileRegion> iterator() {
+      return new TestFileRegionIterator(providedBasePath, minId, numBlocks);
+    }
+
+    @Override
+    public void setConf(Configuration conf) {
+      this.conf = conf;
+    }
+
+    @Override
+    public Configuration getConf() {
+      return conf;
+    }
+
+    @Override
+    public void refresh() {
+      //do nothing!
+    }
+
+    public void setMinBlkId(int minId) {
+      this.minId = minId;
+    }
+
+    public void setBlockCount(int numBlocks) {
+      this.numBlocks = numBlocks;
+    }
+  }
+
+  private static Storage.StorageDirectory createLocalStorageDirectory(
+      File root, Configuration conf)
+      throws SecurityException, IOException {
+    Storage.StorageDirectory sd =
+        new Storage.StorageDirectory(
+            StorageLocation.parse(root.toURI().toString()));
+    DataStorage.createStorageID(sd, false, conf);
+    return sd;
+  }
+
+  private static Storage.StorageDirectory createProvidedStorageDirectory(
+      String confString, Configuration conf)
+      throws SecurityException, IOException {
+    Storage.StorageDirectory sd =
+        new Storage.StorageDirectory(StorageLocation.parse(confString));
+    DataStorage.createStorageID(sd, false, conf);
+    return sd;
+  }
+
+  private static void createStorageDirs(DataStorage storage,
+      Configuration conf, int numDirs, int numProvidedDirs)
+          throws IOException {
+    List<Storage.StorageDirectory> dirs =
+        new ArrayList<Storage.StorageDirectory>();
+    List<String> dirStrings = new ArrayList<String>();
+    FileUtils.deleteDirectory(new File(BASE_DIR));
+    for (int i = 0; i < numDirs; i++) {
+      File loc = new File(BASE_DIR, "data" + i);
+      dirStrings.add(new Path(loc.toString()).toUri().toString());
+      loc.mkdirs();
+      dirs.add(createLocalStorageDirectory(loc, conf));
+      when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
+    }
+
+    for (int i = numDirs; i < numDirs + numProvidedDirs; i++) {
+      File loc = new File(BASE_DIR, "data" + i);
+      providedBasePath = loc.getAbsolutePath();
+      loc.mkdirs();
+      String dirString = "[PROVIDED]" +
+          new Path(loc.toString()).toUri().toString();
+      dirStrings.add(dirString);
+      dirs.add(createProvidedStorageDirectory(dirString, conf));
+      when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
+    }
+
+    String dataDir = StringUtils.join(",", dirStrings);
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir);
+    when(storage.dirIterator()).thenReturn(dirs.iterator());
+    when(storage.getNumStorageDirs()).thenReturn(numDirs + numProvidedDirs);
+  }
+
+  private int getNumVolumes() {
+    try (FsDatasetSpi.FsVolumeReferences volumes =
+        dataset.getFsVolumeReferences()) {
+      return volumes.size();
+    } catch (IOException e) {
+      return 0;
+    }
+  }
+
+  private void compareBlkFile(InputStream ins, String filepath)
+      throws FileNotFoundException, IOException {
+    try (ReadableByteChannel i = Channels.newChannel(
+        new FileInputStream(new File(filepath)))) {
+      try (ReadableByteChannel j = Channels.newChannel(ins)) {
+        ByteBuffer ib = ByteBuffer.allocate(4096);
+        ByteBuffer jb = ByteBuffer.allocate(4096);
+        while (true) {
+          int il = i.read(ib);
+          int jl = j.read(jb);
+          if (il < 0 || jl < 0) {
+            assertEquals(il, jl);
+            break;
+          }
+          ib.flip();
+          jb.flip();
+          int cmp = Math.min(ib.remaining(), jb.remaining());
+          for (int k = 0; k < cmp; ++k) {
+            assertEquals(ib.get(), jb.get());
+          }
+          ib.compact();
+          jb.compact();
+        }
+      }
+    }
+  }
+
+  @Before
+  public void setUp() throws IOException {
+    datanode = mock(DataNode.class);
+    storage = mock(DataStorage.class);
+    this.conf = new Configuration();
+    this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
+
+    when(datanode.getConf()).thenReturn(conf);
+    final DNConf dnConf = new DNConf(datanode);
+    when(datanode.getDnConf()).thenReturn(dnConf);
+
+    final BlockScanner disabledBlockScanner = new BlockScanner(datanode, conf);
+    when(datanode.getBlockScanner()).thenReturn(disabledBlockScanner);
+    final ShortCircuitRegistry shortCircuitRegistry =
+        new ShortCircuitRegistry(conf);
+    when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);
+
+    this.conf.setClass(DFSConfigKeys.DFS_PROVIDER_CLASS,
+        TestFileRegionProvider.class, FileRegionProvider.class);
+
+    blkToPathMap = new HashMap<Long, String>();
+    providedVolumes = new LinkedList<FsVolumeImpl>();
+
+    createStorageDirs(
+        storage, conf, NUM_LOCAL_INIT_VOLUMES, NUM_PROVIDED_INIT_VOLUMES);
+
+    dataset = new FsDatasetImpl(datanode, storage, conf);
+    FsVolumeReferences volumes = dataset.getFsVolumeReferences();
+    for (int i = 0; i < volumes.size(); i++) {
+      FsVolumeSpi vol = volumes.get(i);
+      if (vol.getStorageType() == StorageType.PROVIDED) {
+        providedVolumes.add((FsVolumeImpl) vol);
+      }
+    }
+
+    for (String bpid : BLOCK_POOL_IDS) {
+      dataset.addBlockPool(bpid, conf);
+    }
+
+    assertEquals(NUM_LOCAL_INIT_VOLUMES + NUM_PROVIDED_INIT_VOLUMES,
+        getNumVolumes());
+    assertEquals(0, dataset.getNumFailedVolumes());
+  }
+
+  @Test
+  public void testProvidedStorageID() throws IOException {
+    for (int i = 0; i < providedVolumes.size(); i++) {
+      assertEquals(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT,
+          providedVolumes.get(i).getStorageID());
+    }
+  }
+
+  @Test
+  public void testBlockLoad() throws IOException {
+    for (int i = 0; i < providedVolumes.size(); i++) {
+      FsVolumeImpl vol = providedVolumes.get(i);
+      ReplicaMap volumeMap = new ReplicaMap(new AutoCloseableLock());
+      vol.getVolumeMap(volumeMap, null);
+
+      assertEquals(vol.getBlockPoolList().length, BLOCK_POOL_IDS.length);
+      for (int j = 0; j < BLOCK_POOL_IDS.length; j++) {
+        if (j != CHOSEN_BP_ID) {
+          //this block pool should not have any blocks
+          assertEquals(null, volumeMap.replicas(BLOCK_POOL_IDS[j]));
+        }
+      }
+      assertEquals(NUM_PROVIDED_BLKS,
+          volumeMap.replicas(BLOCK_POOL_IDS[CHOSEN_BP_ID]).size());
+    }
+  }
+
+  @Test
+  public void testProvidedBlockRead() throws IOException {
+    for (int id = 0; id < NUM_PROVIDED_BLKS; id++) {
+      ExtendedBlock eb = new ExtendedBlock(
+          BLOCK_POOL_IDS[CHOSEN_BP_ID], id, BLK_LEN,
+          HdfsConstants.GRANDFATHER_GENERATION_STAMP);
+      InputStream ins = dataset.getBlockInputStream(eb, 0);
+      String filepath = blkToPathMap.get((long) id);
+      compareBlkFile(ins, filepath);
+    }
+  }
+
+  @Test
+  public void testProvidedBlockIterator() throws IOException {
+    for (int i = 0; i < providedVolumes.size(); i++) {
+      FsVolumeImpl vol = providedVolumes.get(i);
+      BlockIterator iter =
+          vol.newBlockIterator(BLOCK_POOL_IDS[CHOSEN_BP_ID], "temp");
+      Set<Long> blockIdsUsed = new HashSet<Long>();
+      while(!iter.atEnd()) {
+        ExtendedBlock eb = iter.nextBlock();
+        long blkId = eb.getBlockId();
+        assertTrue(blkId >= MIN_BLK_ID && blkId < NUM_PROVIDED_BLKS);
+        //all block ids must be unique!
+        assertTrue(!blockIdsUsed.contains(blkId));
+        blockIdsUsed.add(blkId);
+      }
+      assertEquals(NUM_PROVIDED_BLKS, blockIdsUsed.size());
+    }
+  }
+
+
+  @Test
+  public void testRefresh() throws IOException {
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);
+    for (int i = 0; i < providedVolumes.size(); i++) {
+      ProvidedVolumeImpl vol = (ProvidedVolumeImpl) providedVolumes.get(i);
+      TestFileRegionProvider provider = (TestFileRegionProvider)
+          vol.getFileRegionProvider(BLOCK_POOL_IDS[CHOSEN_BP_ID]);
+      //equivalent to two new blocks appearing
+      provider.setBlockCount(NUM_PROVIDED_BLKS + 2);
+      //equivalent to deleting the first block
+      provider.setMinBlkId(MIN_BLK_ID + 1);
+
+      DirectoryScanner scanner = new DirectoryScanner(datanode, dataset, conf);
+      scanner.reconcile();
+      ReplicaInfo info = dataset.getBlockReplica(
+          BLOCK_POOL_IDS[CHOSEN_BP_ID], NUM_PROVIDED_BLKS + 1);
+      //new replica should be added to the dataset
+      assertTrue(info != null);
+      try {
+        info = dataset.getBlockReplica(BLOCK_POOL_IDS[CHOSEN_BP_ID], 0);
+      } catch(Exception ex) {
+        LOG.info("Exception expected: " + ex);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2630e4fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
index fa3399b..236627e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
@@ -64,7 +64,10 @@ public class TestClusterId {
       fsImage.getStorage().dirIterator(NNStorage.NameNodeDirType.IMAGE);
     StorageDirectory sd = sdit.next();
     Properties props = Storage.readPropertiesFile(sd.getVersionFile());
-    String cid = props.getProperty("clusterID");
+    String cid = null;
+    if (props != null) {
+      cid = props.getProperty("clusterID");
+    }
     LOG.info("successfully formated : sd="+sd.getCurrentDir() + ";cid="+cid);
     return cid;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org