You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/01/29 19:05:44 UTC

[01/34] hadoop git commit: HADOOP-4297. Enable Java assertions when running tests. Contributed by Tsz Wo Nicholas Sze.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-EC 7c85bb3a4 -> 570914e8a


HADOOP-4297. Enable Java assertions when running tests. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/803dd112
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/803dd112
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/803dd112

Branch: refs/heads/HDFS-EC
Commit: 803dd1129ea142456088cd0b63cbed31e5637cc0
Parents: cfb00e7
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Jan 27 11:05:55 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:23 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../org/apache/hadoop/test/TestJUnitSetup.java  | 39 ++++++++++++++++++++
 2 files changed, 42 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/803dd112/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index f1aab62..fce2c81 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -502,6 +502,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11450. Cleanup DistCpV1 not to use deprecated methods and fix
     javadocs. (Varun Saxena via ozawa)
 
+    HADOOP-4297. Enable Java assertions when running tests.
+    (Tsz Wo Nicholas Sze via wheat9)
+
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/803dd112/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestJUnitSetup.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestJUnitSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestJUnitSetup.java
new file mode 100644
index 0000000..d6ae04d
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestJUnitSetup.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.test;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestJUnitSetup {
+  public static final Log LOG = LogFactory.getLog(TestJUnitSetup.class);
+
+  @Test
+  public void testJavaAssert() {
+    try {
+      assert false : "Good! Java assert is on.";
+    } catch(AssertionError ae) {
+      LOG.info("The AssertionError is expected.", ae);
+      return;
+    }
+    Assert.fail("Java assert does not work.");
+  }
+}


[25/34] hadoop git commit: HADOOP-11316. "mvn package -Pdist, docs -DskipTests -Dtar" fails because of non-ascii characters. (ozawa)

Posted by zh...@apache.org.
HADOOP-11316. "mvn package -Pdist,docs -DskipTests -Dtar" fails because of non-ascii characters. (ozawa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a5a90de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a5a90de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a5a90de

Branch: refs/heads/HDFS-EC
Commit: 3a5a90de82bf36b9e66ab1f44d6485b9c5690b67
Parents: 3397c23
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Jan 28 22:01:16 2015 +0900
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:26 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java  | 2 +-
 .../src/main/java/org/apache/hadoop/util/ComparableVersion.java   | 2 --
 3 files changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a5a90de/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7ba6148..f78de23 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -779,6 +779,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11469. KMS should skip default.key.acl and whitelist.key.acl when
     loading key acl. (Dian Fu via yliu)
 
+    HADOOP-11316. "mvn package -Pdist,docs -DskipTests -Dtar" fails because
+    of non-ascii characters. (ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a5a90de/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index e4ee78a..36989bd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -307,7 +307,7 @@ public class CommonConfigurationKeysPublic {
   public static final String  HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS =
     "hadoop.security.impersonation.provider.class";
 
-  //  <!--- KMSClientProvider configurations —>
+  //  <!-- KMSClientProvider configurations -->
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String KMS_CLIENT_ENC_KEY_CACHE_SIZE =
       "hadoop.security.kms.client.encrypted.key.cache.size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a5a90de/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
index ebe46d8..65d85f7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
@@ -9,8 +9,6 @@
 //      to
 //        package org.apache.hadoop.util;
 //   2. Removed author tags to clear hadoop author tag warning
-//        author <a href="mailto:kenney@apache.org">Kenney Westerhof</a>
-//        author <a href="mailto:hboutemy@apache.org">Hervé Boutemy</a>
 //
 package org.apache.hadoop.util;
 


[32/34] hadoop git commit: MAPREDUCE-6231. Grep example job is not working on a fully-distributed cluster. (aajisaka)

Posted by zh...@apache.org.
MAPREDUCE-6231. Grep example job is not working on a fully-distributed cluster. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd72a41a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd72a41a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd72a41a

Branch: refs/heads/HDFS-EC
Commit: dd72a41ae7e83c3797119e1365b69d5598ceedde
Parents: 899b5e1
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri Jan 30 01:10:02 2015 +0900
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:27 2015 -0800

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                              | 3 +++
 .../src/main/java/org/apache/hadoop/examples/Grep.java            | 2 ++
 2 files changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd72a41a/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index b576c29..39ff8cc 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -317,6 +317,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6230. Fixed RMContainerAllocator to update the new AMRMToken
     service name properly. (Jason Lowe via jianhe)
 
+    MAPREDUCE-6231. Grep example job is not working on a fully-distributed
+    cluster. (aajisaka)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd72a41a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Grep.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Grep.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Grep.java
index 421afcd..810d0dd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Grep.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Grep.java
@@ -61,6 +61,7 @@ public class Grep extends Configured implements Tool {
     try {
       
       grepJob.setJobName("grep-search");
+      grepJob.setJarByClass(Grep.class);
 
       FileInputFormat.setInputPaths(grepJob, args[0]);
 
@@ -78,6 +79,7 @@ public class Grep extends Configured implements Tool {
 
       Job sortJob = new Job(conf);
       sortJob.setJobName("grep-sort");
+      sortJob.setJarByClass(Grep.class);
 
       FileInputFormat.setInputPaths(sortJob, tempDir);
       sortJob.setInputFormatClass(SequenceFileInputFormat.class);


[15/34] hadoop git commit: HDFS-7675. Remove unused member DFSClient.spanReceiverHost (cmccabe)

Posted by zh...@apache.org.
HDFS-7675. Remove unused member DFSClient.spanReceiverHost (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f05809cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f05809cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f05809cb

Branch: refs/heads/HDFS-EC
Commit: f05809cb6163fd8079da3d50894d3d5fea5c3f47
Parents: 93f6e7a
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue Jan 27 17:04:20 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:25 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java           | 3 +--
 2 files changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f05809cb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1f036b8..973f2f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -554,6 +554,8 @@ Release 2.7.0 - UNRELEASED
     HDFS-7683. Combine usages and percent stats in NameNode UI.
     (Vinayakumar B via wheat9)
 
+    HDFS-7675. Remove unused member DFSClient#spanReceiverHost (cmccabe)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f05809cb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8512156..b4d541e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -275,7 +275,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
   private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
   @VisibleForTesting
   KeyProvider provider;
-  private final SpanReceiverHost spanReceiverHost;
   private final Sampler<?> traceSampler;
 
   /**
@@ -620,7 +619,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
   public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
       Configuration conf, FileSystem.Statistics stats)
     throws IOException {
-    spanReceiverHost = SpanReceiverHost.getInstance(conf);
+    SpanReceiverHost.getInstance(conf);
     traceSampler = TraceSamplerFactory.createSampler(conf);
     // Copy only the required DFSClient configuration
     this.dfsClientConf = new Conf(conf);


[34/34] hadoop git commit: HADOOP-9907. Webapp http://hostname:port/metrics link is not working. (aajisaka)

Posted by zh...@apache.org.
HADOOP-9907. Webapp http://hostname:port/metrics link is not working. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/570914e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/570914e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/570914e8

Branch: refs/heads/HDFS-EC
Commit: 570914e8a5ef250816fb899cd85e73ef7f79d344
Parents: 4941e5e
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri Jan 30 02:49:10 2015 +0900
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:28 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java  | 2 +-
 .../java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java | 2 +-
 .../main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java    | 2 +-
 .../apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java    | 2 +-
 .../hadoop/yarn/server/resourcemanager/webapp/NavBlock.java       | 2 +-
 6 files changed, 8 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/570914e8/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6550135..890a087 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -788,6 +788,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11316. "mvn package -Pdist,docs -DskipTests -Dtar" fails because
     of non-ascii characters. (ozawa)
 
+    HADOOP-9907. Webapp http://hostname:port/metrics link is not working.
+    (aajisaka)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/570914e8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
index 0edeb16..4eed7e3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
@@ -83,6 +83,6 @@ public class NavBlock extends HtmlBlock {
         li().a("/conf", "Configuration")._().
         li().a("/logs", "Local logs")._().
         li().a("/stacks", "Server stacks")._().
-        li().a("/metrics", "Server metrics")._()._()._();
+        li().a("/jmx?qry=Hadoop:*", "Server metrics")._()._()._();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/570914e8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java
index 231e362..7e49d52 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java
@@ -70,6 +70,6 @@ public class HsNavBlock extends HtmlBlock {
           li().a("/conf", "Configuration")._().
           li().a("/logs", "Local logs")._().
           li().a("/stacks", "Server stacks")._().
-          li().a("/metrics", "Server metrics")._()._()._();
+          li().a("/jmx?qry=Hadoop:*", "Server metrics")._()._()._();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/570914e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java
index febc818..cdc13eb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java
@@ -36,6 +36,6 @@ public class NavBlock extends HtmlBlock {
           li().a("/conf", "Configuration")._().
           li().a("/stacks", "Thread dump")._().
           li().a("/logs", "Logs")._().
-          li().a("/metrics", "Metrics")._()._()._();
+          li().a("/jmx?qry=Hadoop:*", "Metrics")._()._()._();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/570914e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
index c198ae6..1c3b63b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
@@ -59,7 +59,7 @@ public class NavBlock extends HtmlBlock implements YarnWebParams {
           .li().a("/conf", "Configuration")._()
           .li().a("/logs", "Local logs")._()
           .li().a("/stacks", "Server stacks")._()
-          .li().a("/metrics", "Server metrics")._()._()._();
+          .li().a("/jmx?qry=Hadoop:*", "Server metrics")._()._()._();
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/570914e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
index db00bb0..48df391 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
@@ -51,6 +51,6 @@ public class NavBlock extends HtmlBlock {
           li().a("/conf", "Configuration")._().
           li().a("/logs", "Local logs")._().
           li().a("/stacks", "Server stacks")._().
-          li().a("/metrics", "Server metrics")._()._()._();
+          li().a("/jmx?qry=Hadoop:*", "Server metrics")._()._()._();
   }
 }


[22/34] hadoop git commit: HDFS-6673. Add delimited format support to PB OIV tool. Contributed by Eddy Xu.

Posted by zh...@apache.org.
HDFS-6673. Add delimited format support to PB OIV tool. Contributed by Eddy Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5227f6f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5227f6f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5227f6f8

Branch: refs/heads/HDFS-EC
Commit: 5227f6f84a5843e6851b96361f32037db508bd01
Parents: 3a5a90d
Author: Andrew Wang <wa...@apache.org>
Authored: Wed Jan 28 12:36:29 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:26 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml         |   5 +
 .../tools/offlineImageViewer/FSImageLoader.java |   4 +-
 .../OfflineImageViewerPB.java                   |  21 +-
 .../PBImageDelimitedTextWriter.java             | 132 +++++
 .../offlineImageViewer/PBImageTextWriter.java   | 586 +++++++++++++++++++
 .../TestOfflineImageViewer.java                 |  54 +-
 7 files changed, 799 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5227f6f8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 973f2f0..4bd2c55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -309,6 +309,8 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7056. Snapshot support for truncate. (Plamen Jeliazkov and shv)
 
+    HDFS-6673. Add delimited format support to PB OIV tool. (Eddy Xu via wang)
+
   IMPROVEMENTS
 
     HDFS-7055. Add tracing to DFSInputStream (cmccabe)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5227f6f8/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index bad1792..d5c1f35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -198,6 +198,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.fusesource.leveldbjni</groupId>
+      <artifactId>leveldbjni-all</artifactId>
+      <version>1.8</version>
+    </dependency>
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.bouncycastle</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5227f6f8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
index 2f2fa5f..fd29106 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
@@ -237,7 +237,7 @@ class FSImageLoader {
     return inodes;
   }
 
-  private static String[] loadStringTable(InputStream in) throws
+  static String[] loadStringTable(InputStream in) throws
   IOException {
     FsImageProto.StringTableSection s = FsImageProto.StringTableSection
         .parseDelimitedFrom(in);
@@ -479,7 +479,7 @@ class FSImageLoader {
     }
   }
 
-  private long getFileSize(FsImageProto.INodeSection.INodeFile f) {
+  static long getFileSize(FsImageProto.INodeSection.INodeFile f) {
     long size = 0;
     for (HdfsProtos.BlockProto p : f.getBlocksList()) {
       size += p.getNumBytes();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5227f6f8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
index 4fce6a3..6590366 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
@@ -65,6 +65,10 @@ public class OfflineImageViewerPB {
       + "    -step defines the granularity of the distribution. (2MB by default)\n"
       + "  * Web: Run a viewer to expose read-only WebHDFS API.\n"
       + "    -addr specifies the address to listen. (localhost:5978 by default)\n"
+      + "  * Delimited: Generate a text file with all of the elements common\n"
+      + "    to both inodes and inodes-under-construction, separated by a\n"
+      + "    delimiter. The default delimiter is \\t, though this may be\n"
+      + "    changed via the -delimiter argument.\n"
       + "\n"
       + "Required command line arguments:\n"
       + "-i,--inputFile <arg>   FSImage file to process.\n"
@@ -74,8 +78,12 @@ public class OfflineImageViewerPB {
       + "                       file exists, it will be overwritten.\n"
       + "                       (output to stdout by default)\n"
       + "-p,--processor <arg>   Select which type of processor to apply\n"
-      + "                       against image file. (XML|FileDistribution|Web)\n"
+      + "                       against image file. (XML|FileDistribution|Web|Delimited)\n"
       + "                       (Web by default)\n"
+      + "-delimiter <arg>       Delimiting string to use with Delimited processor\n"
+      + "-t,--temp <arg>        Use temporary dir to cache intermediate result to generate\n"
+      + "                       Delimited outputs. If not set, Delimited processor constructs\n"
+      + "                       the namespace in memory before outputting text."
       + "-h,--help              Display usage information and exit\n";
 
   /**
@@ -97,6 +105,8 @@ public class OfflineImageViewerPB {
     options.addOption("maxSize", true, "");
     options.addOption("step", true, "");
     options.addOption("addr", true, "");
+    options.addOption("delimiter", true, "");
+    options.addOption("t", "temp", true, "");
 
     return options;
   }
@@ -141,6 +151,9 @@ public class OfflineImageViewerPB {
     String inputFile = cmd.getOptionValue("i");
     String processor = cmd.getOptionValue("p", "Web");
     String outputFile = cmd.getOptionValue("o", "-");
+    String delimiter = cmd.getOptionValue("delimiter",
+        PBImageDelimitedTextWriter.DEFAULT_DELIMITER);
+    String tempPath = cmd.getOptionValue("t", "");
 
     Configuration conf = new Configuration();
     try (PrintStream out = outputFile.equals("-") ?
@@ -163,6 +176,12 @@ public class OfflineImageViewerPB {
             viewer.start(inputFile);
           }
           break;
+        case "Delimited":
+          try (PBImageDelimitedTextWriter writer =
+              new PBImageDelimitedTextWriter(out, delimiter, tempPath)) {
+            writer.visit(new RandomAccessFile(inputFile, "r"));
+          }
+          break;
       }
       return 0;
     } catch (EOFException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5227f6f8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
new file mode 100644
index 0000000..350967d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+/**
+ * A PBImageDelimitedTextWriter generates a text representation of the PB fsimage,
+ * with each element separated by a delimiter string.  All of the elements
+ * common to both inodes and inodes-under-construction are included. When
+ * processing an fsimage with a layout version that did not include an
+ * element, such as AccessTime, the output file will include a column
+ * for the value, but no value will be included.
+ *
+ * Individual block information for each file is not currently included.
+ *
+ * The default delimiter is tab, as this is an unlikely value to be included in
+ * an inode path or other text metadata. The delimiter value can be via the
+ * constructor.
+ */
+public class PBImageDelimitedTextWriter extends PBImageTextWriter {
+  static final String DEFAULT_DELIMITER = "\t";
+  private static final String DATE_FORMAT="yyyy-MM-dd HH:mm";
+  private final SimpleDateFormat dateFormatter =
+      new SimpleDateFormat(DATE_FORMAT);
+
+  private final String delimiter;
+
+  PBImageDelimitedTextWriter(PrintStream out, String delimiter, String tempPath)
+      throws IOException {
+    super(out, tempPath);
+    this.delimiter = delimiter;
+  }
+
+  private String formatDate(long date) {
+    return dateFormatter.format(new Date(date));
+  }
+
+  private void append(StringBuffer buffer, int field) {
+    buffer.append(delimiter);
+    buffer.append(field);
+  }
+
+  private void append(StringBuffer buffer, long field) {
+    buffer.append(delimiter);
+    buffer.append(field);
+  }
+
+  private void append(StringBuffer buffer, String field) {
+    buffer.append(delimiter);
+    buffer.append(field);
+  }
+
+  @Override
+  public String getEntry(String parent, INode inode) {
+    StringBuffer buffer = new StringBuffer();
+    String path = new File(parent, inode.getName().toStringUtf8()).toString();
+    buffer.append(path);
+    PermissionStatus p = null;
+
+    switch (inode.getType()) {
+    case FILE:
+      INodeFile file = inode.getFile();
+      p = getPermission(file.getPermission());
+      append(buffer, file.getReplication());
+      append(buffer, formatDate(file.getModificationTime()));
+      append(buffer, formatDate(file.getAccessTime()));
+      append(buffer, file.getPreferredBlockSize());
+      append(buffer, file.getBlocksCount());
+      append(buffer, FSImageLoader.getFileSize(file));
+      append(buffer, 0);  // NS_QUOTA
+      append(buffer, 0);  // DS_QUOTA
+      break;
+    case DIRECTORY:
+      INodeDirectory dir = inode.getDirectory();
+      p = getPermission(dir.getPermission());
+      append(buffer, 0);  // Replication
+      append(buffer, formatDate(dir.getModificationTime()));
+      append(buffer, formatDate(0));  // Access time.
+      append(buffer, 0);  // Block size.
+      append(buffer, 0);  // Num blocks.
+      append(buffer, 0);  // Num bytes.
+      append(buffer, dir.getNsQuota());
+      append(buffer, dir.getDsQuota());
+      break;
+    case SYMLINK:
+      INodeSymlink s = inode.getSymlink();
+      p = getPermission(s.getPermission());
+      append(buffer, 0);  // Replication
+      append(buffer, formatDate(s.getModificationTime()));
+      append(buffer, formatDate(s.getAccessTime()));
+      append(buffer, 0);  // Block size.
+      append(buffer, 0);  // Num blocks.
+      append(buffer, 0);  // Num bytes.
+      append(buffer, 0);  // NS_QUOTA
+      append(buffer, 0);  // DS_QUOTA
+      break;
+    default:
+      break;
+    }
+    assert p != null;
+    append(buffer, p.getPermission().toString());
+    append(buffer, p.getUserName());
+    append(buffer, p.getGroupName());
+    return buffer.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5227f6f8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
new file mode 100644
index 0000000..0da263d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
@@ -0,0 +1,586 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.io.LimitInputStream;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
+import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf;
+import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName;
+import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
+import org.apache.hadoop.hdfs.server.namenode.INodeId;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.Time;
+import org.fusesource.leveldbjni.JniDBFactory;
+import org.iq80.leveldb.DB;
+import org.iq80.leveldb.Options;
+import org.iq80.leveldb.WriteBatch;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedInputStream;
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.PrintStream;
+import java.io.RandomAccessFile;
+import java.io.UnsupportedEncodingException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * This class reads the protobuf-based fsimage and generates text output
+ * for each inode to {@link PBImageTextWriter#out}. The sub-class can override
+ * {@link getEntry()} to generate formatted string for each inode.
+ *
+ * Since protobuf-based fsimage does not guarantee the order of inodes and
+ * directories, PBImageTextWriter runs two-phase scans:
+ *
+ * <ol>
+ *   <li>The first phase, PBImageTextWriter scans the INode sections to reads the
+ *   filename of each directory. It also scans the INode_Dir sections to loads
+ *   the relationships between a directory and its children. It uses these metadata
+ *   to build FS namespace and stored in {@link MetadataMap}</li>
+ *   <li>The second phase, PBImageTextWriter re-scans the INode sections. For each
+ *   inode, it looks up the path of the parent directory in the {@link MetadataMap},
+ *   and generate output.</li>
+ * </ol>
+ *
+ * Two various of {@link MetadataMap} are provided. {@link InMemoryMetadataDB}
+ * stores all metadata in memory (O(n) memory) while
+ * {@link LevelDBMetadataMap} stores metadata in LevelDB on disk (O(1) memory).
+ * User can choose between them based on the time/space tradeoffs.
+ */
+abstract class PBImageTextWriter implements Closeable {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(PBImageTextWriter.class);
+
+  /**
+   * This metadata map is used to construct the namespace before generating
+   * text outputs.
+   *
+   * It contains two mapping relationships:
+   * <p>
+   *   <li>It maps each inode (inode Id) to its parent directory (inode Id).</li>
+   *   <li>It maps each directory from its inode Id.</li>
+   * </p>
+   */
+  private static interface MetadataMap extends Closeable {
+    /**
+     * Associate an inode with its parent directory.
+     */
+    public void putDirChild(long parentId, long childId) throws IOException;
+
+    /**
+     * Associate a directory with its inode Id.
+     */
+    public void putDir(INode dir) throws IOException;
+
+    /** Get the full path of the parent directory for the given inode. */
+    public String getParentPath(long inode) throws IOException;
+
+    /** Synchronize metadata to persistent storage, if possible */
+    public void sync() throws IOException;
+  }
+
+  /**
+   * Maintain all the metadata in memory.
+   */
+  private static class InMemoryMetadataDB implements MetadataMap {
+    /**
+     * Represent a directory in memory.
+     */
+    private static class Dir {
+      private final long inode;
+      private Dir parent = null;
+      private String name;
+      private String path = null;  // cached full path of the directory.
+
+      Dir(long inode, String name) {
+        this.inode = inode;
+        this.name = name;
+      }
+
+      private void setParent(Dir parent) {
+        Preconditions.checkState(this.parent == null);
+        this.parent = parent;
+      }
+
+      /**
+       * Returns the full path of this directory.
+       */
+      private String getPath() {
+        if (this.parent == null) {
+          return "/";
+        }
+        if (this.path == null) {
+          this.path = new File(parent.getPath(), name).toString();
+          this.name = null;
+        }
+        return this.path;
+      }
+
+      @Override
+      public boolean equals(Object o) {
+        return o instanceof Dir && inode == ((Dir) o).inode;
+      }
+
+      @Override
+      public int hashCode() {
+        return Long.valueOf(inode).hashCode();
+      }
+    }
+
+    /** INode Id to Dir object mapping */
+    private Map<Long, Dir> dirMap = new HashMap<>();
+
+    /** Children to parent directory INode ID mapping. */
+    private Map<Long, Dir> dirChildMap = new HashMap<>();
+
+    InMemoryMetadataDB() {
+    }
+
+    @Override
+    public void close() throws IOException {
+    }
+
+    @Override
+    public void putDirChild(long parentId, long childId) {
+      Dir parent = dirMap.get(parentId);
+      Dir child = dirMap.get(childId);
+      if (child != null) {
+        child.setParent(parent);
+      }
+      Preconditions.checkState(!dirChildMap.containsKey(childId));
+      dirChildMap.put(childId, parent);
+    }
+
+    @Override
+    public void putDir(INode p) {
+      Preconditions.checkState(!dirMap.containsKey(p.getId()));
+      Dir dir = new Dir(p.getId(), p.getName().toStringUtf8());
+      dirMap.put(p.getId(), dir);
+    }
+
+    public String getParentPath(long inode) throws IOException {
+      if (inode == INodeId.ROOT_INODE_ID) {
+        return "";
+      }
+      Dir parent = dirChildMap.get(inode);
+      Preconditions.checkState(parent != null,
+          "Can not find parent directory for INode: %s", inode);
+      return parent.getPath();
+    }
+
+    @Override
+    public void sync() {
+    }
+  }
+
+  /**
+   * A MetadataMap that stores metadata in LevelDB.
+   */
+  private static class LevelDBMetadataMap implements MetadataMap {
+    /**
+     * Store metadata in LevelDB.
+     */
+    private static class LevelDBStore implements Closeable {
+      private DB db = null;
+      private WriteBatch batch = null;
+      private int writeCount = 0;
+      private static final int BATCH_SIZE = 1024;
+
+      LevelDBStore(final File dbPath) throws IOException {
+        Options options = new Options();
+        options.createIfMissing(true);
+        options.errorIfExists(true);
+        db = JniDBFactory.factory.open(dbPath, options);
+        batch = db.createWriteBatch();
+      }
+
+      @Override
+      public void close() throws IOException {
+        if (batch != null) {
+          IOUtils.cleanup(null, batch);
+          batch = null;
+        }
+        IOUtils.cleanup(null, db);
+        db = null;
+      }
+
+      public void put(byte[] key, byte[] value) throws IOException {
+        batch.put(key, value);
+        writeCount++;
+        if (writeCount >= BATCH_SIZE) {
+          sync();
+        }
+      }
+
+      public byte[] get(byte[] key) throws IOException {
+        return db.get(key);
+      }
+
+      public void sync() throws IOException {
+        try {
+          db.write(batch);
+        } finally {
+          batch.close();
+          batch = null;
+        }
+        batch = db.createWriteBatch();
+        writeCount = 0;
+      }
+    }
+
+    /**
+     * A LRU cache for directory path strings.
+     *
+     * The key of this LRU cache is the inode of a directory.
+     */
+    private static class DirPathCache extends LinkedHashMap<Long, String> {
+      private final static int CAPACITY = 16 * 1024;
+
+      DirPathCache() {
+        super(CAPACITY);
+      }
+
+      @Override
+      protected boolean removeEldestEntry(Map.Entry<Long, String> entry) {
+        return super.size() > CAPACITY;
+      }
+    }
+
+    /** Map the child inode to the parent directory inode. */
+    private LevelDBStore dirChildMap = null;
+    /** Directory entry map */
+    private LevelDBStore dirMap = null;
+    private DirPathCache dirPathCache = new DirPathCache();
+
+    LevelDBMetadataMap(String baseDir) throws IOException {
+      File dbDir = new File(baseDir);
+      if (dbDir.exists()) {
+        FileUtils.deleteDirectory(dbDir);
+      }
+      if (!dbDir.mkdirs()) {
+        throw new IOException("Failed to mkdir on " + dbDir);
+      }
+      try {
+        dirChildMap = new LevelDBStore(new File(dbDir, "dirChildMap"));
+        dirMap = new LevelDBStore(new File(dbDir, "dirMap"));
+      } catch (IOException e) {
+        LOG.error("Failed to open LevelDBs", e);
+        IOUtils.cleanup(null, this);
+      }
+    }
+
+    @Override
+    public void close() throws IOException {
+      IOUtils.cleanup(null, dirChildMap, dirMap);
+      dirChildMap = null;
+      dirMap = null;
+    }
+
+    private static byte[] toBytes(long value) {
+      return ByteBuffer.allocate(8).putLong(value).array();
+    }
+
+    private static byte[] toBytes(String value)
+        throws UnsupportedEncodingException {
+      return value.getBytes("UTF-8");
+    }
+
+    private static long toLong(byte[] bytes) {
+      Preconditions.checkArgument(bytes.length == 8);
+      return ByteBuffer.wrap(bytes).getLong();
+    }
+
+    private static String toString(byte[] bytes) throws IOException {
+      try {
+        return new String(bytes, "UTF-8");
+      } catch (UnsupportedEncodingException e) {
+        throw new IOException(e);
+      }
+    }
+
+    @Override
+    public void putDirChild(long parentId, long childId) throws IOException {
+      dirChildMap.put(toBytes(childId), toBytes(parentId));
+    }
+
+    @Override
+    public void putDir(INode dir) throws IOException {
+      Preconditions.checkArgument(dir.hasDirectory(),
+          "INode %s (%s) is not a directory.", dir.getId(), dir.getName());
+      dirMap.put(toBytes(dir.getId()), toBytes(dir.getName().toStringUtf8()));
+    }
+
+    @Override
+    public String getParentPath(long inode) throws IOException {
+      if (inode == INodeId.ROOT_INODE_ID) {
+        return "/";
+      }
+      byte[] bytes = dirChildMap.get(toBytes(inode));
+      Preconditions.checkState(bytes != null && bytes.length == 8,
+          "Can not find parent directory for inode %s, "
+              + "fsimage might be corrupted", inode);
+      long parent = toLong(bytes);
+      if (!dirPathCache.containsKey(parent)) {
+        bytes = dirMap.get(toBytes(parent));
+        if (parent != INodeId.ROOT_INODE_ID) {
+          Preconditions.checkState(bytes != null,
+              "Can not find parent directory for inode %s, "
+                  + ", the fsimage might be corrupted.", parent);
+        }
+        String parentName = toString(bytes);
+        String parentPath =
+            new File(getParentPath(parent), parentName).toString();
+        dirPathCache.put(parent, parentPath);
+      }
+      return dirPathCache.get(parent);
+    }
+
+    @Override
+    public void sync() throws IOException {
+      dirChildMap.sync();
+      dirMap.sync();
+    }
+  }
+
+  private String[] stringTable;
+  private PrintStream out;
+  private MetadataMap metadataMap = null;
+
+  /**
+   * Construct a PB FsImage writer to generate text file.
+   * @param out the writer to output text information of fsimage.
+   * @param tempPath the path to store metadata. If it is empty, store metadata
+   *                 in memory instead.
+   */
+  PBImageTextWriter(PrintStream out, String tempPath) throws IOException {
+    this.out = out;
+    if (tempPath.isEmpty()) {
+      metadataMap = new InMemoryMetadataDB();
+    } else {
+      metadataMap = new LevelDBMetadataMap(tempPath);
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    IOUtils.cleanup(null, metadataMap);
+  }
+
+  /**
+   * Get text output for the given inode.
+   * @param parent the path of parent directory
+   * @param inode the INode object to output.
+   */
+  abstract protected String getEntry(String parent, INode inode);
+
+  public void visit(RandomAccessFile file) throws IOException {
+    Configuration conf = new Configuration();
+    if (!FSImageUtil.checkFileFormat(file)) {
+      throw new IOException("Unrecognized FSImage");
+    }
+
+    FileSummary summary = FSImageUtil.loadSummary(file);
+
+    try (FileInputStream fin = new FileInputStream(file.getFD())) {
+      InputStream is;
+      ArrayList<FileSummary.Section> sections =
+          Lists.newArrayList(summary.getSectionsList());
+      Collections.sort(sections,
+          new Comparator<FileSummary.Section>() {
+            @Override
+            public int compare(FsImageProto.FileSummary.Section s1,
+                FsImageProto.FileSummary.Section s2) {
+              FSImageFormatProtobuf.SectionName n1 =
+                  FSImageFormatProtobuf.SectionName.fromString(s1.getName());
+              FSImageFormatProtobuf.SectionName n2 =
+                  FSImageFormatProtobuf.SectionName.fromString(s2.getName());
+              if (n1 == null) {
+                return n2 == null ? 0 : -1;
+              } else if (n2 == null) {
+                return -1;
+              } else {
+                return n1.ordinal() - n2.ordinal();
+              }
+            }
+          });
+
+      for (FileSummary.Section section : sections) {
+        fin.getChannel().position(section.getOffset());
+        is = FSImageUtil.wrapInputStreamForCompression(conf,
+            summary.getCodec(), new BufferedInputStream(new LimitInputStream(
+                fin, section.getLength())));
+        switch (SectionName.fromString(section.getName())) {
+        case STRING_TABLE:
+          stringTable = FSImageLoader.loadStringTable(is);
+          break;
+        default:
+          break;
+        }
+      }
+
+      loadDirectories(fin, sections, summary, conf);
+      loadINodeDirSection(fin, sections, summary, conf);
+      metadataMap.sync();
+      output(conf, summary, fin, sections);
+    }
+  }
+
+  private void output(Configuration conf, FileSummary summary,
+      FileInputStream fin, ArrayList<FileSummary.Section> sections)
+      throws IOException {
+    InputStream is;
+    long startTime = Time.monotonicNow();
+    for (FileSummary.Section section : sections) {
+      if (SectionName.fromString(section.getName()) == SectionName.INODE) {
+        fin.getChannel().position(section.getOffset());
+        is = FSImageUtil.wrapInputStreamForCompression(conf,
+            summary.getCodec(), new BufferedInputStream(new LimitInputStream(
+                fin, section.getLength())));
+        outputINodes(is);
+      }
+    }
+    long timeTaken = Time.monotonicNow() - startTime;
+    LOG.debug("Time to output inodes: {}ms", timeTaken);
+  }
+
+  protected PermissionStatus getPermission(long perm) {
+    return FSImageFormatPBINode.Loader.loadPermission(perm, stringTable);
+  }
+
+  /** Load the directories in the INode section. */
+  private void loadDirectories(
+      FileInputStream fin, List<FileSummary.Section> sections,
+      FileSummary summary, Configuration conf)
+      throws IOException {
+    LOG.info("Loading directories");
+    long startTime = Time.monotonicNow();
+    for (FileSummary.Section section : sections) {
+      if (SectionName.fromString(section.getName())
+          == SectionName.INODE) {
+        fin.getChannel().position(section.getOffset());
+        InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
+            summary.getCodec(), new BufferedInputStream(new LimitInputStream(
+                fin, section.getLength())));
+        loadDirectoriesInINodeSection(is);
+      }
+    }
+    long timeTaken = Time.monotonicNow() - startTime;
+    LOG.info("Finished loading directories in {}ms", timeTaken);
+  }
+
+  private void loadINodeDirSection(
+      FileInputStream fin, List<FileSummary.Section> sections,
+      FileSummary summary, Configuration conf)
+      throws IOException {
+    LOG.info("Loading INode directory section.");
+    long startTime = Time.monotonicNow();
+    for (FileSummary.Section section : sections) {
+      if (SectionName.fromString(section.getName())
+          == SectionName.INODE_DIR) {
+        fin.getChannel().position(section.getOffset());
+        InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
+            summary.getCodec(), new BufferedInputStream(
+                new LimitInputStream(fin, section.getLength())));
+        buildNamespace(is);
+      }
+    }
+    long timeTaken = Time.monotonicNow() - startTime;
+    LOG.info("Finished loading INode directory section in {}ms", timeTaken);
+  }
+
+  /**
+   * Load the filenames of the directories from the INode section.
+   */
+  private void loadDirectoriesInINodeSection(InputStream in) throws IOException {
+    INodeSection s = INodeSection.parseDelimitedFrom(in);
+    LOG.info("Loading directories in INode section.");
+    int numDirs = 0;
+    for (int i = 0; i < s.getNumInodes(); ++i) {
+      INode p = INode.parseDelimitedFrom(in);
+      if (LOG.isDebugEnabled() && i % 10000 == 0) {
+        LOG.debug("Scanned {} inodes.", i);
+      }
+      if (p.hasDirectory()) {
+        metadataMap.putDir(p);
+        numDirs++;
+      }
+    }
+    LOG.info("Found {} directories in INode section.", numDirs);
+  }
+
+  /**
+   * Scan the INodeDirectory section to construct the namespace.
+   */
+  private void buildNamespace(InputStream in) throws IOException {
+    int count = 0;
+    while (true) {
+      FsImageProto.INodeDirectorySection.DirEntry e =
+          FsImageProto.INodeDirectorySection.DirEntry.parseDelimitedFrom(in);
+      if (e == null) {
+        break;
+      }
+      count++;
+      if (LOG.isDebugEnabled() && count % 10000 == 0) {
+        LOG.debug("Scanned {} directories.", count);
+      }
+      long parentId = e.getParent();
+      // Referred INode is not support for now.
+      for (int i = 0; i < e.getChildrenCount(); i++) {
+        long childId = e.getChildren(i);
+        metadataMap.putDirChild(parentId, childId);
+      }
+      Preconditions.checkState(e.getRefChildrenCount() == 0);
+    }
+    LOG.info("Scanned {} INode directories to build namespace.", count);
+  }
+
+  private void outputINodes(InputStream in) throws IOException {
+    INodeSection s = INodeSection.parseDelimitedFrom(in);
+    LOG.info("Found {} INodes in the INode section", s.getNumInodes());
+    for (int i = 0; i < s.getNumInodes(); ++i) {
+      INode p = INode.parseDelimitedFrom(in);
+      String parentPath = metadataMap.getParentPath(p.getId());
+      out.println(getEntry(parentPath, p));
+
+      if (LOG.isDebugEnabled() && i % 100000 == 0) {
+        LOG.debug("Outputted {} INodes.", i);
+      }
+    }
+    LOG.info("Outputted {} INodes.", s.getNumInodes());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5227f6f8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 4bb2b79..a8d1d54 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -20,22 +20,26 @@ package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.InputStreamReader;
 import java.io.PrintStream;
-import java.io.PrintWriter;
 import java.io.RandomAccessFile;
 import java.io.StringReader;
-import java.io.StringWriter;
 import java.net.HttpURLConnection;
 import java.net.URI;
 import java.net.URL;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -51,6 +55,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -329,6 +334,51 @@ public class TestOfflineImageViewer {
     }
   }
 
+  @Test
+  public void testPBDelimitedWriter() throws IOException, InterruptedException {
+    testPBDelimitedWriter("");  // Test in memory db.
+    testPBDelimitedWriter(
+        new FileSystemTestHelper().getTestRootDir() + "/delimited.db");
+  }
+
+  private void testPBDelimitedWriter(String db)
+      throws IOException, InterruptedException {
+    final String DELIMITER = "\t";
+    ByteArrayOutputStream output = new ByteArrayOutputStream();
+
+    try (PrintStream o = new PrintStream(output)) {
+      PBImageDelimitedTextWriter v =
+          new PBImageDelimitedTextWriter(o, DELIMITER, db);
+      v.visit(new RandomAccessFile(originalFsimage, "r"));
+    }
+
+    Set<String> fileNames = new HashSet<>();
+    try (
+        ByteArrayInputStream input =
+            new ByteArrayInputStream(output.toByteArray());
+        BufferedReader reader =
+            new BufferedReader(new InputStreamReader(input))) {
+      String line;
+      while ((line = reader.readLine()) != null) {
+        System.out.println(line);
+        String[] fields = line.split(DELIMITER);
+        assertEquals(12, fields.length);
+        fileNames.add(fields[0]);
+      }
+    }
+
+    // writtenFiles does not contain root directory and "invalid XML char" dir.
+    for (Iterator<String> it = fileNames.iterator(); it.hasNext(); ) {
+      String filename = it.next();
+      if (filename.startsWith("/dirContainingInvalidXMLChar")) {
+        it.remove();
+      } else if (filename.equals("/")) {
+        it.remove();
+      }
+    }
+    assertEquals(writtenFiles.keySet(), fileNames);
+  }
+
   private static void compareFile(FileStatus expected, FileStatus status) {
     assertEquals(expected.getAccessTime(), status.getAccessTime());
     assertEquals(expected.getBlockSize(), status.getBlockSize());


[04/34] hadoop git commit: HADOOP-11509. Change parsing sequence in GenericOptionsParser to parse -D parameters before -files. Contributed by Xuan Gong

Posted by zh...@apache.org.
HADOOP-11509. Change parsing sequence in GenericOptionsParser to parse
-D parameters before -files. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79d0787c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79d0787c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79d0787c

Branch: refs/heads/HDFS-EC
Commit: 79d0787c58d930fcf210ed5d87a7290e20ef4d72
Parents: 0111b57
Author: Xuan <xg...@apache.org>
Authored: Mon Jan 26 15:35:35 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:23 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../hadoop/util/GenericOptionsParser.java       | 20 +++++++++++---------
 2 files changed, 14 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79d0787c/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2806ee2..f1aab62 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -765,6 +765,9 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-6221 RPC Client operations cannot be interrupted. (stevel)
 
+    HADOOP-11509. Change parsing sequence in GenericOptionsParser to parse -D
+    parameters before -files. (xgong)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79d0787c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index d0e7655..0a46a7a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -284,6 +284,17 @@ public class GenericOptionsParser {
         conf.addResource(new Path(value));
       }
     }
+
+    if (line.hasOption('D')) {
+      String[] property = line.getOptionValues('D');
+      for(String prop : property) {
+        String[] keyval = prop.split("=", 2);
+        if (keyval.length == 2) {
+          conf.set(keyval[0], keyval[1], "from command line");
+        }
+      }
+    }
+
     if (line.hasOption("libjars")) {
       conf.set("tmpjars", 
                validateFiles(line.getOptionValue("libjars"), conf),
@@ -307,15 +318,6 @@ public class GenericOptionsParser {
                 validateFiles(line.getOptionValue("archives"), conf),
                 "from -archives command line option");
     }
-    if (line.hasOption('D')) {
-      String[] property = line.getOptionValues('D');
-      for(String prop : property) {
-        String[] keyval = prop.split("=", 2);
-        if (keyval.length == 2) {
-          conf.set(keyval[0], keyval[1], "from command line");
-        }
-      }
-    }
     conf.setBoolean("mapreduce.client.genericoptionsparser.used", true);
     
     // tokensFile


[17/34] hadoop git commit: HADOOP-11469. KMS should skip default.key.acl and whitelist.key.acl when loading key acl. (Dian Fu via yliu)

Posted by zh...@apache.org.
HADOOP-11469. KMS should skip default.key.acl and whitelist.key.acl when loading key acl. (Dian Fu via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93f6e7a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93f6e7a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93f6e7a3

Branch: refs/heads/HDFS-EC
Commit: 93f6e7a39e06cffdb92b7f73e4e6df2d5c964fd3
Parents: dad98fb
Author: yliu <yl...@apache.org>
Authored: Wed Jan 28 00:07:21 2015 +0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:25 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../hadoop/crypto/key/kms/server/KMSACLs.java     |  7 +++++--
 .../crypto/key/kms/server/KMSConfiguration.java   |  1 +
 .../hadoop/crypto/key/kms/server/TestKMSACLs.java | 18 +++++++++++++++---
 4 files changed, 24 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93f6e7a3/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0396e7d..b87c9ae 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -774,6 +774,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11509. Change parsing sequence in GenericOptionsParser to parse -D
     parameters before -files. (xgong)
 
+    HADOOP-11469. KMS should skip default.key.acl and whitelist.key.acl when
+    loading key acl. (Dian Fu via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93f6e7a3/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
index c33dd4b..5b67950 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
@@ -36,6 +36,8 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Pattern;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Provides access to the <code>AccessControlList</code>s used by KMS,
  * hot-reloading them if the <code>kms-acls.xml</code> file where the ACLs
@@ -70,7 +72,8 @@ public class KMSACLs implements Runnable, KeyACLs {
 
   private volatile Map<Type, AccessControlList> acls;
   private volatile Map<Type, AccessControlList> blacklistedAcls;
-  private volatile Map<String, HashMap<KeyOpType, AccessControlList>> keyAcls;
+  @VisibleForTesting
+  volatile Map<String, HashMap<KeyOpType, AccessControlList>> keyAcls;
   private final Map<KeyOpType, AccessControlList> defaultKeyAcls =
       new HashMap<KeyOpType, AccessControlList>();
   private final Map<KeyOpType, AccessControlList> whitelistKeyAcls =
@@ -112,7 +115,7 @@ public class KMSACLs implements Runnable, KeyACLs {
     Map<String, HashMap<KeyOpType, AccessControlList>> tempKeyAcls =
         new HashMap<String, HashMap<KeyOpType,AccessControlList>>();
     Map<String, String> allKeyACLS =
-        conf.getValByRegex(Pattern.quote(KMSConfiguration.KEY_ACL_PREFIX));
+        conf.getValByRegex(KMSConfiguration.KEY_ACL_PREFIX_REGEX);
     for (Map.Entry<String, String> keyAcl : allKeyACLS.entrySet()) {
       String k = keyAcl.getKey();
       // this should be of type "key.acl.<KEY_NAME>.<OP_TYPE>"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93f6e7a3/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index a67c68e..23c983f 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -38,6 +38,7 @@ public class KMSConfiguration {
   public static final String CONFIG_PREFIX = "hadoop.kms.";
 
   public static final String KEY_ACL_PREFIX = "key.acl.";
+  public static final String KEY_ACL_PREFIX_REGEX = "^key\\.acl\\..+";
   public static final String DEFAULT_KEY_ACL_PREFIX = "default.key.acl.";
   public static final String WHITELIST_KEY_ACL_PREFIX = "whitelist.key.acl.";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93f6e7a3/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSACLs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSACLs.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSACLs.java
index abdf3c2..b4bf504 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSACLs.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSACLs.java
@@ -26,7 +26,7 @@ public class TestKMSACLs {
 
   @Test
   public void testDefaults() {
-    KMSACLs acls = new KMSACLs(new Configuration(false));
+    final KMSACLs acls = new KMSACLs(new Configuration(false));
     for (KMSACLs.Type type : KMSACLs.Type.values()) {
       Assert.assertTrue(acls.hasAccess(type,
           UserGroupInformation.createRemoteUser("foo")));
@@ -35,11 +35,11 @@ public class TestKMSACLs {
 
   @Test
   public void testCustom() {
-    Configuration conf = new Configuration(false);
+    final Configuration conf = new Configuration(false);
     for (KMSACLs.Type type : KMSACLs.Type.values()) {
       conf.set(type.getAclConfigKey(), type.toString() + " ");
     }
-    KMSACLs acls = new KMSACLs(conf);
+    final KMSACLs acls = new KMSACLs(conf);
     for (KMSACLs.Type type : KMSACLs.Type.values()) {
       Assert.assertTrue(acls.hasAccess(type,
           UserGroupInformation.createRemoteUser(type.toString())));
@@ -48,4 +48,16 @@ public class TestKMSACLs {
     }
   }
 
+  @Test
+  public void testKeyAclConfigurationLoad() {
+    final Configuration conf = new Configuration(false);
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "test_key_1.MANAGEMENT", "CREATE");
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "test_key_2.ALL", "CREATE");
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "test_key_3.NONEXISTOPERATION", "CREATE");
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "ROLLOVER");
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT", "DECRYPT_EEK");
+    final KMSACLs acls = new KMSACLs(conf);
+    Assert.assertTrue("expected key ACL size is 2 but got " + acls.keyAcls.size(),
+        acls.keyAcls.size() == 2);
+  }
 }


[26/34] hadoop git commit: YARN-3103. AMRMClientImpl does not update AMRM token properly. Contributed by Jason Lowe

Posted by zh...@apache.org.
YARN-3103. AMRMClientImpl does not update AMRM token properly. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/620dd747
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/620dd747
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/620dd747

Branch: refs/heads/HDFS-EC
Commit: 620dd747fe8652a957493932f2035241f29ea15d
Parents: 5227f6f
Author: Jian He <ji...@apache.org>
Authored: Wed Jan 28 15:23:13 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:26 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                       |  3 +++
 .../hadoop/yarn/client/api/impl/AMRMClientImpl.java   |  8 ++++----
 .../hadoop/yarn/client/api/impl/TestAMRMClient.java   | 14 +++++++++++---
 3 files changed, 18 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/620dd747/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 225e126..3f56cfc 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -431,6 +431,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3011. Possible IllegalArgumentException in ResourceLocalizationService
     might lead NM to crash. (Varun Saxena via jianhe)
 
+    YARN-3103. AMRMClientImpl does not update AMRM token properly. (Jason Lowe
+    via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/620dd747/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index 3cf18ba..ab8aaa8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -755,11 +755,11 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
         new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(token
           .getIdentifier().array(), token.getPassword().array(), new Text(
           token.getKind()), new Text(token.getService()));
-    amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig()));
+    // Preserve the token service sent by the RM when adding the token
+    // to ensure we replace the previous token setup by the RM.
+    // Afterwards we can update the service address for the RPC layer.
     UserGroupInformation currentUGI = UserGroupInformation.getCurrentUser();
-    if (UserGroupInformation.isSecurityEnabled()) {
-      currentUGI = UserGroupInformation.getLoginUser();
-    }
     currentUGI.addToken(amrmToken);
+    amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig()));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/620dd747/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index e24b5f6..3c6918c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -200,8 +200,11 @@ public class TestAMRMClient {
     // of testing.
     UserGroupInformation.setLoginUser(UserGroupInformation
       .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
-    appAttempt.getAMRMToken().setService(ClientRMProxy.getAMRMTokenService(conf));
+
+    // emulate RM setup of AMRM token in credentials by adding the token
+    // *before* setting the token service
     UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken());
+    appAttempt.getAMRMToken().setService(ClientRMProxy.getAMRMTokenService(conf));
   }
   
   @After
@@ -1026,13 +1029,18 @@ public class TestAMRMClient {
         UserGroupInformation.getCurrentUser().getCredentials();
     Iterator<org.apache.hadoop.security.token.Token<?>> iter =
         credentials.getAllTokens().iterator();
+    org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> result = null;
     while (iter.hasNext()) {
       org.apache.hadoop.security.token.Token<?> token = iter.next();
       if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
-        return (org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>)
+        if (result != null) {
+          Assert.fail("credentials has more than one AMRM token."
+              + " token1: " + result + " token2: " + token);
+        }
+        result = (org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>)
             token;
       }
     }
-    return null;
+    return result;
   }
 }


[30/34] hadoop git commit: YARN-2428. LCE default banned user list should have yarn (Varun Saxena via aw)

Posted by zh...@apache.org.
YARN-2428. LCE default banned user list should have yarn (Varun Saxena via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4941e5ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4941e5ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4941e5ee

Branch: refs/heads/HDFS-EC
Commit: 4941e5eea37ac825f74c4c1b28b57f4eb126e20a
Parents: a64335e
Author: Allen Wittenauer <aw...@apache.org>
Authored: Thu Jan 29 09:29:59 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:27 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../src/main/native/container-executor/impl/container-executor.c  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4941e5ee/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ae3832b..b582f22 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -34,6 +34,9 @@ Trunk - Unreleased
 
     YARN-3002. YARN documentation needs updating post-shell rewrite (aw)
 
+    YARN-2428. LCE default banned user list should have yarn (Varun
+    Saxena via aw)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4941e5ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 4fc78b6..f73776e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -38,7 +38,7 @@
 
 static const int DEFAULT_MIN_USERID = 1000;
 
-static const char* DEFAULT_BANNED_USERS[] = {"mapred", "hdfs", "bin", 0};
+static const char* DEFAULT_BANNED_USERS[] = {"yarn", "mapred", "hdfs", "bin", 0};
 
 //struct to store the user details
 struct passwd *user_detail = NULL;


[16/34] hadoop git commit: HADOOP-11317. Increment SLF4J version to 1.7.10. Contributed by Tim Robertson.

Posted by zh...@apache.org.
HADOOP-11317. Increment SLF4J version to 1.7.10. Contributed by Tim Robertson.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3397c233
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3397c233
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3397c233

Branch: refs/heads/HDFS-EC
Commit: 3397c233b2a1e2bb18a2b78c2e0d9b8c72305e03
Parents: f05809c
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Jan 28 21:55:09 2015 +0900
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:25 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 hadoop-project/pom.xml                          | 6 +++---
 2 files changed, 5 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3397c233/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index b87c9ae..7ba6148 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -508,6 +508,8 @@ Release 2.7.0 - UNRELEASED
     HADOOP-10626. Limit Returning Attributes for LDAP search. (Jason Hubbard
     via atm)
 
+    HADOOP-11317. Increment SLF4J version to 1.7.10. (Tim Robertson via ozawa)
+
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3397c233/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index c3881e8..a13168d 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -675,17 +675,17 @@
       <dependency>
         <groupId>org.slf4j</groupId>
         <artifactId>slf4j-api</artifactId>
-        <version>1.7.5</version>
+        <version>1.7.10</version>
       </dependency>
       <dependency>
         <groupId>org.slf4j</groupId>
         <artifactId>slf4j-log4j12</artifactId>
-        <version>1.7.5</version>
+        <version>1.7.10</version>
       </dependency>
       <dependency>
         <groupId>org.slf4j</groupId>
         <artifactId>jul-to-slf4j</artifactId>
-        <version>1.7.5</version>
+        <version>1.7.10</version>
       </dependency>
       <dependency>
         <groupId>org.eclipse.jdt</groupId>


[02/34] hadoop git commit: YARN-3086. Make NodeManager memory configurable in MiniYARNCluster. Contributed by Robert Metzger.

Posted by zh...@apache.org.
YARN-3086. Make NodeManager memory configurable in MiniYARNCluster. Contributed by Robert Metzger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cfb00e73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cfb00e73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cfb00e73

Branch: refs/heads/HDFS-EC
Commit: cfb00e7320c21e446f2832da8c7bef772cdb359b
Parents: 6b67115
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Jan 28 00:15:34 2015 +0900
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:23 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                | 3 +++
 .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java    | 5 +++++
 .../java/org/apache/hadoop/yarn/server/MiniYARNCluster.java    | 6 ++++--
 3 files changed, 12 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfb00e73/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 924bfa6..52b3cab 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -206,6 +206,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3092. Created a common ResourceUsage class to track labeled resource
     usages in Capacity Scheduler. (Wangda Tan via jianhe)
 
+    YARN-3086. Make NodeManager memory configurable in MiniYARNCluster.
+    (Robert Metzger via ozawa)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfb00e73/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2ccd894..015baa1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1205,6 +1205,11 @@ public class YarnConfiguration extends Configuration {
   public static final boolean
       DEFAULT_YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING = false;
 
+  /** Allow changing the memory for the NodeManager in the MiniYARNCluster */
+  public static final String YARN_MINICLUSTER_NM_PMEM_MB =
+      YARN_MC_PREFIX + YarnConfiguration.NM_PMEM_MB;
+  public static final int DEFAULT_YARN_MINICLUSTER_NM_PMEM_MB = 4 * 1024;
+
   /** The log directory for the containers */
   public static final String YARN_APP_CONTAINER_LOG_DIR =
       YARN_PREFIX + "app.container.log.dir";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfb00e73/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index 4bdf33a..365e0bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -508,8 +508,10 @@ public class MiniYARNCluster extends CompositeService {
       String logDirsString = prepareDirs("log", numLogDirs);
       config.set(YarnConfiguration.NM_LOG_DIRS, logDirsString);
 
-      // By default AM + 2 containers
-      config.setInt(YarnConfiguration.NM_PMEM_MB, 4*1024);
+      config.setInt(YarnConfiguration.NM_PMEM_MB, config.getInt(
+          YarnConfiguration.YARN_MINICLUSTER_NM_PMEM_MB,
+          YarnConfiguration.DEFAULT_YARN_MINICLUSTER_NM_PMEM_MB));
+
       config.set(YarnConfiguration.NM_ADDRESS,
           MiniYARNCluster.getHostname() + ":0");
       config.set(YarnConfiguration.NM_LOCALIZER_ADDRESS,


[21/34] hadoop git commit: HDFS-7677. DistributedFileSystem#truncate should resolve symlinks. (yliu)

Posted by zh...@apache.org.
HDFS-7677. DistributedFileSystem#truncate should resolve symlinks. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dad98fb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dad98fb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dad98fb5

Branch: refs/heads/HDFS-EC
Commit: dad98fb5332cc8290711bcd0bd16d4f896e5264c
Parents: c690f6a
Author: yliu <yl...@apache.org>
Authored: Tue Jan 27 23:50:11 2015 +0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:25 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 ++
 .../hadoop/hdfs/DistributedFileSystem.java      | 14 ++++++++-
 .../hdfs/server/namenode/TestFileTruncate.java  | 30 ++++++++++++++++++++
 3 files changed, 45 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dad98fb5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index beea13b..1f036b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -801,6 +801,8 @@ Release 2.7.0 - UNRELEASED
     HDFS-7566. Remove obsolete entries from hdfs-default.xml (Ray Chiang
     via aw)
 
+    HDFS-7677. DistributedFileSystem#truncate should resolve symlinks. (yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dad98fb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 710ab18..6d5684e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -636,7 +636,19 @@ public class DistributedFileSystem extends FileSystem {
   @Override
   public boolean truncate(Path f, final long newLength) throws IOException {
     statistics.incrementWriteOps(1);
-    return dfs.truncate(getPathName(f), newLength);
+    Path absF = fixRelativePart(f);
+    return new FileSystemLinkResolver<Boolean>() {
+      @Override
+      public Boolean doCall(final Path p)
+          throws IOException, UnresolvedLinkException {
+        return dfs.truncate(getPathName(p), newLength);
+      }
+      @Override
+      public Boolean next(final FileSystem fs, final Path p)
+          throws IOException {
+        return fs.truncate(p, newLength);
+      }
+    }.resolve(this, absF);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dad98fb5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index e8250a2..579e718 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -759,6 +759,36 @@ public class TestFileTruncate {
     }
   }
 
+  @Test
+  public void testTruncate4Symlink() throws IOException {
+    final int fileLength = 3 * BLOCK_SIZE;
+
+    final Path parent = new Path("/test");
+    fs.mkdirs(parent);
+    final byte[] contents = AppendTestUtil.initBuffer(fileLength);
+    final Path file = new Path(parent, "testTruncate4Symlink");
+    writeContents(contents, fileLength, file);
+
+    final Path link = new Path(parent, "link");
+    fs.createSymlink(file, link, false);
+
+    final int newLength = fileLength/3;
+    boolean isReady = fs.truncate(link, newLength);
+
+    assertTrue("Recovery is not expected.", isReady);
+
+    FileStatus fileStatus = fs.getFileStatus(file);
+    assertThat(fileStatus.getLen(), is((long) newLength));
+
+    ContentSummary cs = fs.getContentSummary(parent);
+    assertEquals("Bad disk space usage",
+        cs.getSpaceConsumed(), newLength * REPLICATION);
+    // validate the file content
+    checkFullFile(file, newLength, contents);
+
+    fs.delete(parent, true);
+  }
+
   static void writeContents(byte[] contents, int fileLength, Path p)
       throws IOException {
     FSDataOutputStream out = fs.create(p, true, BLOCK_SIZE, REPLICATION,


[05/34] hadoop git commit: HADOOP-6221 RPC Client operations cannot be interrupted (stevel)

Posted by zh...@apache.org.
HADOOP-6221 RPC Client operations cannot be interrupted (stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0111b57e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0111b57e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0111b57e

Branch: refs/heads/HDFS-EC
Commit: 0111b57e19e78f612107952c41bd95cfbf1ce883
Parents: 9feb6b3
Author: Steve Loughran <st...@apache.org>
Authored: Mon Jan 26 22:04:45 2015 +0000
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:23 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 .../main/java/org/apache/hadoop/ipc/Client.java |   6 +
 .../main/java/org/apache/hadoop/ipc/RPC.java    |   9 +-
 .../apache/hadoop/net/SocketIOWithTimeout.java  |  12 +-
 .../apache/hadoop/ipc/TestRPCWaitForProxy.java  | 130 +++++++++++++++++++
 5 files changed, 152 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0111b57e/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index e0da851..2806ee2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -763,6 +763,8 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11499. Check of executorThreadsStarted in
     ValueQueue#submitRefillTask() evades lock acquisition (Ted Yu via jlowe)
 
+    HADOOP-6221 RPC Client operations cannot be interrupted. (stevel)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0111b57e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 45a4660..dfde136 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -849,6 +849,12 @@ public class Client {
         throw ioe;
       }
 
+      // Throw the exception if the thread is interrupted
+      if (Thread.currentThread().isInterrupted()) {
+        LOG.warn("Interrupted while trying for connection");
+        throw ioe;
+      }
+
       try {
         Thread.sleep(action.delayMillis);
       } catch (InterruptedException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0111b57e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 40f6515..8ada0ff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -412,11 +412,18 @@ public class RPC {
         throw ioe;
       }
 
+      if (Thread.currentThread().isInterrupted()) {
+        // interrupted during some IO; this may not have been caught
+        throw new InterruptedIOException("Interrupted waiting for the proxy");
+      }
+
       // wait for retry
       try {
         Thread.sleep(1000);
       } catch (InterruptedException ie) {
-        // IGNORE
+        Thread.currentThread().interrupt();
+        throw (IOException) new InterruptedIOException(
+            "Interrupted waiting for the proxy").initCause(ioe);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0111b57e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
index ed12b3c..b50f7e9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
@@ -338,6 +338,12 @@ abstract class SocketIOWithTimeout {
             return ret;
           }
           
+          if (Thread.currentThread().isInterrupted()) {
+            throw new InterruptedIOException("Interrupted while waiting for "
+                + "IO on channel " + channel + ". " + timeout
+                + " millis timeout left.");
+          }
+
           /* Sometimes select() returns 0 much before timeout for 
            * unknown reasons. So select again if required.
            */
@@ -348,12 +354,6 @@ abstract class SocketIOWithTimeout {
             }
           }
           
-          if (Thread.currentThread().isInterrupted()) {
-            throw new InterruptedIOException("Interruped while waiting for " +
-                                             "IO on channel " + channel +
-                                             ". " + timeout + 
-                                             " millis timeout left.");
-          }
         }
       } finally {
         if (key != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0111b57e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java
new file mode 100644
index 0000000..5807998
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import org.apache.hadoop.conf.Configuration;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
+import org.apache.hadoop.ipc.TestRPC.TestProtocol;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.InterruptedIOException;
+import java.net.ConnectException;
+import java.net.InetSocketAddress;
+import java.nio.channels.ClosedByInterruptException;
+
+/**
+ * tests that the proxy can be interrupted
+ */
+public class TestRPCWaitForProxy extends Assert {
+  private static final String ADDRESS = "0.0.0.0";
+  private static final Logger
+      LOG = LoggerFactory.getLogger(TestRPCWaitForProxy.class);
+
+  private static final Configuration conf = new Configuration();
+
+  /**
+   * This tests that the time-bounded wait for a proxy operation works, and
+   * times out.
+   *
+   * @throws Throwable any exception other than that which was expected
+   */
+  @Test(timeout = 10000)
+  public void testWaitForProxy() throws Throwable {
+    RpcThread worker = new RpcThread(0);
+    worker.start();
+    worker.join();
+    Throwable caught = worker.getCaught();
+    assertNotNull("No exception was raised", caught);
+    if (!(caught instanceof ConnectException)) {
+      throw caught;
+    }
+  }
+
+  /**
+   * This test sets off a blocking thread and then interrupts it, before
+   * checking that the thread was interrupted
+   *
+   * @throws Throwable any exception other than that which was expected
+   */
+  @Test(timeout = 10000)
+  public void testInterruptedWaitForProxy() throws Throwable {
+    RpcThread worker = new RpcThread(100);
+    worker.start();
+    Thread.sleep(1000);
+    assertTrue("worker hasn't started", worker.waitStarted);
+    worker.interrupt();
+    worker.join();
+    Throwable caught = worker.getCaught();
+    assertNotNull("No exception was raised", caught);
+    // looking for the root cause here, which can be wrapped
+    // as part of the NetUtils work. Having this test look
+    // a the type of exception there would be brittle to improvements
+    // in exception diagnostics.
+    Throwable cause = caught.getCause();
+    if (cause == null) {
+      // no inner cause, use outer exception as root cause.
+      cause = caught;
+    }
+    if (!(cause instanceof InterruptedIOException)
+        && !(cause instanceof ClosedByInterruptException)) {
+      throw caught;
+    }
+  }
+
+  /**
+   * This thread waits for a proxy for the specified timeout, and retains any
+   * throwable that was raised in the process
+   */
+
+  private class RpcThread extends Thread {
+    private Throwable caught;
+    private int connectRetries;
+    private volatile boolean waitStarted = false;
+
+    private RpcThread(int connectRetries) {
+      this.connectRetries = connectRetries;
+    }
+    @Override
+    public void run() {
+      try {
+        Configuration config = new Configuration(conf);
+        config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
+            connectRetries);
+        config.setInt(
+            IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
+            connectRetries);
+        waitStarted = true;
+        TestProtocol proxy = RPC.waitForProxy(TestProtocol.class,
+            TestProtocol.versionID,
+            new InetSocketAddress(ADDRESS, 20),
+            config,
+            15000L);
+        proxy.echo("");
+      } catch (Throwable throwable) {
+        caught = throwable;
+      }
+    }
+
+    public Throwable getCaught() {
+      return caught;
+    }
+  }
+}


[11/34] hadoop git commit: HDFS-3689. Add support for variable length block. Contributed by Jing Zhao.

Posted by zh...@apache.org.
HDFS-3689. Add support for variable length block. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32548f4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32548f4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32548f4b

Branch: refs/heads/HDFS-EC
Commit: 32548f4b4a7150317dd1469787806e90d3373893
Parents: 2323609
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Jan 27 12:58:10 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:24 2015 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/CreateFlag.java   |  24 +-
 .../org/apache/hadoop/fs/FSOutputSummer.java    |   2 +-
 .../hadoop/hdfs/nfs/nfs3/WriteManager.java      |   5 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  48 +-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  69 +--
 .../hadoop/hdfs/DistributedFileSystem.java      |  10 +-
 .../hdfs/client/HdfsDataOutputStream.java       |   8 +-
 .../org/apache/hadoop/hdfs/inotify/Event.java   |  12 +
 .../hadoop/hdfs/protocol/ClientProtocol.java    |   9 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |  14 +-
 .../ClientNamenodeProtocolTranslatorPB.java     |  17 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  25 +-
 .../datanode/web/webhdfs/WebHdfsHandler.java    |   3 +-
 .../hdfs/server/namenode/FSDirConcatOp.java     | 259 +++++------
 .../hdfs/server/namenode/FSDirectory.java       |   4 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  20 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  64 ++-
 .../hdfs/server/namenode/FSEditLogOp.java       | 101 ++++-
 .../hdfs/server/namenode/FSEditLogOpCodes.java  |   1 +
 .../hdfs/server/namenode/FSNamesystem.java      |  56 ++-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   2 +-
 .../namenode/InotifyFSEditLogOpTranslator.java  |   4 +
 .../server/namenode/NameNodeLayoutVersion.java  |   3 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   9 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |   2 +
 .../hadoop-hdfs/src/main/proto/inotify.proto    |   1 +
 .../org/apache/hadoop/hdfs/AppendTestUtil.java  |  16 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |   3 +
 .../hdfs/TestDFSInotifyEventInputStream.java    |   9 +-
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 162 +++++++
 .../org/apache/hadoop/hdfs/TestFileAppend2.java | 193 +++++++-
 .../org/apache/hadoop/hdfs/TestFileAppend3.java | 212 +++++++--
 .../hadoop/hdfs/TestFileAppendRestart.java      |  10 +-
 .../java/org/apache/hadoop/hdfs/TestHFlush.java | 128 +++++-
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   |   6 +-
 .../fsdataset/impl/TestLazyPersistFiles.java    |   5 +-
 .../hdfs/server/namenode/TestHDFSConcat.java    |  78 +++-
 .../server/namenode/TestNamenodeRetryCache.java |  16 +-
 .../namenode/ha/TestRetryCacheWithHA.java       |  10 +-
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 5586 -> 5803 bytes
 .../src/test/resources/editsStored.xml          | 437 ++++++++++---------
 42 files changed, 1509 insertions(+), 550 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
index c5d23b4..e008ecc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
@@ -47,6 +47,10 @@ import org.apache.hadoop.classification.InterfaceStability;
  * <li> SYNC_BLOCK - to force closed blocks to the disk device.
  * In addition {@link Syncable#hsync()} should be called after each write,
  * if true synchronous behavior is required.</li>
+ * <li> LAZY_PERSIST - Create the block on transient storage (RAM) if
+ * available.</li>
+ * <li> APPEND_NEWBLOCK - Append data to a new block instead of end of the last
+ * partial block.</li>
  * </ol>
  * 
  * Following combination is not valid and will result in 
@@ -93,7 +97,13 @@ public enum CreateFlag {
    * This flag must only be used for intermediate data whose loss can be
    * tolerated by the application.
    */
-  LAZY_PERSIST((short) 0x10);
+  LAZY_PERSIST((short) 0x10),
+
+  /**
+   * Append data to a new block instead of the end of the last partial block.
+   * This is only useful for APPEND.
+   */
+  NEW_BLOCK((short) 0x20);
 
   private final short mode;
 
@@ -149,4 +159,16 @@ public enum CreateFlag {
           + ". Create option is not specified in " + flag);
     }
   }
+
+  /**
+   * Validate the CreateFlag for the append operation. The flag must contain
+   * APPEND, and cannot contain OVERWRITE.
+   */
+  public static void validateForAppend(EnumSet<CreateFlag> flag) {
+    validate(flag);
+    if (!flag.contains(APPEND)) {
+      throw new HadoopIllegalArgumentException(flag
+          + " does not contain APPEND");
+    }
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
index 934421a..13a5e26 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
@@ -165,7 +165,7 @@ abstract public class FSOutputSummer extends OutputStream {
         count = partialLen;
         System.arraycopy(buf, bufLen - count, buf, 0, count);
       } else {
-      count = 0;
+        count = 0;
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
index df02e04..52c75ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
@@ -18,10 +18,12 @@
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
 import java.io.IOException;
+import java.util.EnumSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
@@ -147,7 +149,8 @@ public class WriteManager {
             CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
             CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
         
-        fos = dfsClient.append(fileIdPath, bufferSize, null, null);
+        fos = dfsClient.append(fileIdPath, bufferSize,
+            EnumSet.of(CreateFlag.APPEND), null, null);
 
         latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
       } catch (RemoteException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1e1af97..b867a70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -18,6 +18,8 @@ Trunk (Unreleased)
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
 
+    HDFS-3689. Add support for variable length block. (jing9)
+
   IMPROVEMENTS
 
     HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 21f75a5..8512156 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1656,9 +1656,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
    * @param checksumOpt checksum options
    * 
    * @return output stream
-   * 
-   * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
-   * boolean, short, long) for detailed description of exceptions thrown
+   *
+   * @see ClientProtocol#create for detailed description of exceptions thrown
    */
   public DFSOutputStream create(String src, 
                              FsPermission permission,
@@ -1732,7 +1731,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
         }
         return null;
       }
-      return callAppend(src, buffersize, progress);
+      return callAppend(src, buffersize, flag, progress);
     }
     return null;
   }
@@ -1810,11 +1809,16 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
   }
 
   /** Method to get stream returned by append call */
-  private DFSOutputStream callAppend(String src,
-      int buffersize, Progressable progress) throws IOException {
-    LastBlockWithStatus lastBlockWithStatus = null;
-    try {
-      lastBlockWithStatus = namenode.append(src, clientName);
+  private DFSOutputStream callAppend(String src, int buffersize,
+      EnumSet<CreateFlag> flag, Progressable progress) throws IOException {
+    CreateFlag.validateForAppend(flag);
+    try {
+      LastBlockWithStatus blkWithStatus = namenode.append(src, clientName,
+          new EnumSetWritable<>(flag, CreateFlag.class));
+      return DFSOutputStream.newStreamForAppend(this, src,
+          flag.contains(CreateFlag.NEW_BLOCK),
+          buffersize, progress, blkWithStatus.getLastBlock(),
+          blkWithStatus.getFileStatus(), dfsClientConf.createChecksum());
     } catch(RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
                                      FileNotFoundException.class,
@@ -1824,10 +1828,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
                                      UnresolvedPathException.class,
                                      SnapshotAccessControlException.class);
     }
-    HdfsFileStatus newStat = lastBlockWithStatus.getFileStatus();
-    return DFSOutputStream.newStreamForAppend(this, src, buffersize, progress,
-        lastBlockWithStatus.getLastBlock(), newStat,
-        dfsClientConf.createChecksum());
   }
   
   /**
@@ -1835,23 +1835,25 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
    * 
    * @param src file name
    * @param buffersize buffer size
+   * @param flag indicates whether to append data to a new block instead of
+   *             the last block
    * @param progress for reporting write-progress; null is acceptable.
    * @param statistics file system statistics; null is acceptable.
    * @return an output stream for writing into the file
    * 
-   * @see ClientProtocol#append(String, String) 
+   * @see ClientProtocol#append(String, String, EnumSetWritable)
    */
   public HdfsDataOutputStream append(final String src, final int buffersize,
-      final Progressable progress, final FileSystem.Statistics statistics
-      ) throws IOException {
-    final DFSOutputStream out = append(src, buffersize, progress);
+      EnumSet<CreateFlag> flag, final Progressable progress,
+      final FileSystem.Statistics statistics) throws IOException {
+    final DFSOutputStream out = append(src, buffersize, flag, progress);
     return createWrappedOutputStream(out, statistics, out.getInitialLen());
   }
 
-  private DFSOutputStream append(String src, int buffersize, Progressable progress) 
-      throws IOException {
+  private DFSOutputStream append(String src, int buffersize,
+      EnumSet<CreateFlag> flag, Progressable progress) throws IOException {
     checkOpen();
-    final DFSOutputStream result = callAppend(src, buffersize, progress);
+    final DFSOutputStream result = callAppend(src, buffersize, flag, progress);
     beginFileLease(result.getFileId(), result);
     return result;
   }
@@ -1938,7 +1940,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
 
   /**
    * Move blocks from src to trg and delete src
-   * See {@link ClientProtocol#concat(String, String [])}. 
+   * See {@link ClientProtocol#concat}.
    */
   public void concat(String trg, String [] srcs) throws IOException {
     checkOpen();
@@ -1980,7 +1982,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
 
   /**
    * Truncate a file to an indicated size
-   * See {@link ClientProtocol#truncate(String, long)}. 
+   * See {@link ClientProtocol#truncate}.
    */
   public boolean truncate(String src, long newLength) throws IOException {
     checkOpen();
@@ -3005,7 +3007,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
   
   /**
    * Get {@link ContentSummary} rooted at the specified directory.
-   * @param path The string representation of the path
+   * @param src The string representation of the path
    * 
    * @see ClientProtocol#getContentSummary(String)
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 67d3143..8cebda1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -426,15 +426,16 @@ public class DFSOutputStream extends FSOutputSummer
     /**
      * construction with tracing info
      */
-    private DataStreamer(HdfsFileStatus stat, Span span) {
+    private DataStreamer(HdfsFileStatus stat, ExtendedBlock block, Span span) {
       isAppend = false;
       isLazyPersistFile = isLazyPersist(stat);
+      this.block = block;
       stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
       traceSpan = span;
     }
     
     /**
-     * Construct a data streamer for append
+     * Construct a data streamer for appending to the last partial block
      * @param lastBlock last block of the file to be appended
      * @param stat status of the file to be appended
      * @param bytesPerChecksum number of bytes per checksum
@@ -1716,7 +1717,7 @@ public class DFSOutputStream extends FSOutputSummer
     if (Trace.isTracing()) {
       traceSpan = Trace.startSpan(this.getClass().getSimpleName()).detach();
     }
-    streamer = new DataStreamer(stat, traceSpan);
+    streamer = new DataStreamer(stat, null, traceSpan);
     if (favoredNodes != null && favoredNodes.length != 0) {
       streamer.setFavoredNodes(favoredNodes);
     }
@@ -1773,7 +1774,7 @@ public class DFSOutputStream extends FSOutputSummer
   }
 
   /** Construct a new output stream for append. */
-  private DFSOutputStream(DFSClient dfsClient, String src,
+  private DFSOutputStream(DFSClient dfsClient, String src, boolean toNewBlock,
       Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat,
       DataChecksum checksum) throws IOException {
     this(dfsClient, src, progress, stat, checksum);
@@ -1785,21 +1786,24 @@ public class DFSOutputStream extends FSOutputSummer
     }
 
     // The last partial block of the file has to be filled.
-    if (lastBlock != null) {
+    if (!toNewBlock && lastBlock != null) {
       // indicate that we are appending to an existing block
       bytesCurBlock = lastBlock.getBlockSize();
       streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum, traceSpan);
     } else {
-      computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum);
-      streamer = new DataStreamer(stat, traceSpan);
+      computePacketChunkSize(dfsClient.getConf().writePacketSize,
+          bytesPerChecksum);
+      streamer = new DataStreamer(stat,
+          lastBlock != null ? lastBlock.getBlock() : null, traceSpan);
     }
     this.fileEncryptionInfo = stat.getFileEncryptionInfo();
   }
 
   static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
-      int buffersize, Progressable progress, LocatedBlock lastBlock,
-      HdfsFileStatus stat, DataChecksum checksum) throws IOException {
-    final DFSOutputStream out = new DFSOutputStream(dfsClient, src,
+      boolean toNewBlock, int bufferSize, Progressable progress,
+      LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum)
+      throws IOException {
+    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, toNewBlock,
         progress, lastBlock, stat, checksum);
     out.start();
     return out;
@@ -1995,35 +1999,37 @@ public class DFSOutputStream extends FSOutputSummer
       long toWaitFor;
       long lastBlockLength = -1L;
       boolean updateLength = syncFlags.contains(SyncFlag.UPDATE_LENGTH);
+      boolean endBlock = syncFlags.contains(SyncFlag.END_BLOCK);
       synchronized (this) {
-        // flush checksum buffer, but keep checksum buffer intact
-        int numKept = flushBuffer(true, true);
+        // flush checksum buffer, but keep checksum buffer intact if we do not
+        // need to end the current block
+        int numKept = flushBuffer(!endBlock, true);
         // bytesCurBlock potentially incremented if there was buffered data
 
         if (DFSClient.LOG.isDebugEnabled()) {
-          DFSClient.LOG.debug(
-            "DFSClient flush() :" +
-            " bytesCurBlock " + bytesCurBlock +
-            " lastFlushOffset " + lastFlushOffset);
+          DFSClient.LOG.debug("DFSClient flush():"
+              + " bytesCurBlock=" + bytesCurBlock
+              + " lastFlushOffset=" + lastFlushOffset
+              + " createNewBlock=" + endBlock);
         }
         // Flush only if we haven't already flushed till this offset.
         if (lastFlushOffset != bytesCurBlock) {
           assert bytesCurBlock > lastFlushOffset;
           // record the valid offset of this flush
           lastFlushOffset = bytesCurBlock;
-          if (isSync && currentPacket == null) {
+          if (isSync && currentPacket == null && !endBlock) {
             // Nothing to send right now,
             // but sync was requested.
-            // Send an empty packet
+            // Send an empty packet if we do not end the block right now
             currentPacket = createPacket(packetSize, chunksPerPacket,
                 bytesCurBlock, currentSeqno++);
           }
         } else {
-          if (isSync && bytesCurBlock > 0) {
+          if (isSync && bytesCurBlock > 0 && !endBlock) {
             // Nothing to send right now,
             // and the block was partially written,
             // and sync was requested.
-            // So send an empty sync packet.
+            // So send an empty sync packet if we do not end the block right now
             currentPacket = createPacket(packetSize, chunksPerPacket,
                 bytesCurBlock, currentSeqno++);
           } else if (currentPacket != null) {
@@ -2036,10 +2042,21 @@ public class DFSOutputStream extends FSOutputSummer
           currentPacket.syncBlock = isSync;
           waitAndQueueCurrentPacket();          
         }
-        // Restore state of stream. Record the last flush offset 
-        // of the last full chunk that was flushed.
-        //
-        bytesCurBlock -= numKept;
+        if (endBlock && bytesCurBlock > 0) {
+          // Need to end the current block, thus send an empty packet to
+          // indicate this is the end of the block and reset bytesCurBlock
+          currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++);
+          currentPacket.lastPacketInBlock = true;
+          currentPacket.syncBlock = shouldSyncBlock || isSync;
+          waitAndQueueCurrentPacket();
+          bytesCurBlock = 0;
+          lastFlushOffset = 0;
+        } else {
+          // Restore state of stream. Record the last flush offset
+          // of the last full chunk that was flushed.
+          bytesCurBlock -= numKept;
+        }
+
         toWaitFor = lastQueuedSeqno;
       } // end synchronized
 
@@ -2058,8 +2075,8 @@ public class DFSOutputStream extends FSOutputSummer
       // namenode.
       if (persistBlocks.getAndSet(false) || updateLength) {
         try {
-          dfsClient.namenode.fsync(src, fileId,
-              dfsClient.clientName, lastBlockLength);
+          dfsClient.namenode.fsync(src, fileId, dfsClient.clientName,
+              lastBlockLength);
         } catch (IOException ioe) {
           DFSClient.LOG.warn("Unable to persist blocks in hflush for " + src, ioe);
           // If we got an error here, it might be because some other thread called

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 654e2f9..710ab18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -314,13 +314,19 @@ public class DistributedFileSystem extends FileSystem {
   @Override
   public FSDataOutputStream append(Path f, final int bufferSize,
       final Progressable progress) throws IOException {
+    return append(f, EnumSet.of(CreateFlag.APPEND), bufferSize, progress);
+  }
+
+  public FSDataOutputStream append(Path f, final EnumSet<CreateFlag> flag,
+      final int bufferSize, final Progressable progress) throws IOException {
     statistics.incrementWriteOps(1);
     Path absF = fixRelativePart(f);
     return new FileSystemLinkResolver<FSDataOutputStream>() {
       @Override
       public FSDataOutputStream doCall(final Path p)
-          throws IOException, UnresolvedLinkException {
-        return dfs.append(getPathName(p), bufferSize, progress, statistics);
+          throws IOException {
+        return dfs.append(getPathName(p), bufferSize, flag, progress,
+            statistics);
       }
       @Override
       public FSDataOutputStream next(final FileSystem fs, final Path p)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
index 2149678..745ca7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
@@ -101,6 +101,12 @@ public class HdfsDataOutputStream extends FSDataOutputStream {
      * When doing sync to DataNodes, also update the metadata (block length) in
      * the NameNode.
      */
-    UPDATE_LENGTH;
+    UPDATE_LENGTH,
+
+    /**
+     * Sync the data to DataNode, close the current block, and allocate a new
+     * block
+     */
+    END_BLOCK;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
index 5ceff1b..a6de289 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
@@ -463,15 +463,22 @@ public abstract class Event {
    */
   public static class AppendEvent extends Event {
     private String path;
+    private boolean newBlock;
 
     public static class Builder {
       private String path;
+      private boolean newBlock;
 
       public Builder path(String path) {
         this.path = path;
         return this;
       }
 
+      public Builder newBlock(boolean newBlock) {
+        this.newBlock = newBlock;
+        return this;
+      }
+
       public AppendEvent build() {
         return new AppendEvent(this);
       }
@@ -480,11 +487,16 @@ public abstract class Event {
     private AppendEvent(Builder b) {
       super(EventType.APPEND);
       this.path = b.path;
+      this.newBlock = b.newBlock;
     }
 
     public String getPath() {
       return path;
     }
+
+    public boolean toNewBlock() {
+      return newBlock;
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index cfd1c67..cba1982 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -203,6 +203,7 @@ public interface ClientProtocol {
    * Append to the end of the file. 
    * @param src path of the file being created.
    * @param clientName name of the current client.
+   * @param flag indicates whether the data is appended to a new block.
    * @return wrapper with information about the last partial block and file
    *    status if any
    * @throws AccessControlException if permission to append file is 
@@ -225,10 +226,10 @@ public interface ClientProtocol {
    * @throws UnsupportedOperationException if append is not supported
    */
   @AtMostOnce
-  public LastBlockWithStatus append(String src, String clientName)
-      throws AccessControlException, DSQuotaExceededException,
-      FileNotFoundException, SafeModeException, UnresolvedLinkException,
-      SnapshotAccessControlException, IOException;
+  public LastBlockWithStatus append(String src, String clientName,
+      EnumSetWritable<CreateFlag> flag) throws AccessControlException,
+      DSQuotaExceededException, FileNotFoundException, SafeModeException,
+      UnresolvedLinkException, SnapshotAccessControlException, IOException;
 
   /**
    * Set replication for an existing file.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 8bcc1eb..dbb8b85 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.hdfs.protocolPB;
 
 import java.io.IOException;
+import java.util.EnumSet;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
@@ -65,6 +67,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowS
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
@@ -187,8 +191,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto;
@@ -209,6 +211,7 @@ import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
 import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
@@ -412,8 +415,11 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public AppendResponseProto append(RpcController controller,
       AppendRequestProto req) throws ServiceException {
     try {
+      EnumSetWritable<CreateFlag> flags = req.hasFlag() ?
+          PBHelper.convertCreateFlag(req.getFlag()) :
+          new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND));
       LastBlockWithStatus result = server.append(req.getSrc(),
-          req.getClientName());
+          req.getClientName(), flags);
       AppendResponseProto.Builder builder = AppendResponseProto.newBuilder();
       if (result.getLastBlock() != null) {
         builder.setBlock(PBHelper.convert(result.getLastBlock()));
@@ -522,7 +528,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       throw new ServiceException(e);
     }
   }
-  
+
   @Override
   public CompleteResponseProto complete(RpcController controller,
       CompleteRequestProto req) throws ServiceException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index f3826af..1d6c0ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -28,7 +28,6 @@ import com.google.common.collect.Lists;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.CacheFlag;
@@ -85,6 +84,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowS
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
@@ -158,13 +158,11 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTim
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
@@ -318,13 +316,12 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
 
   @Override
-  public LastBlockWithStatus append(String src, String clientName)
-      throws AccessControlException, DSQuotaExceededException,
-      FileNotFoundException, SafeModeException, UnresolvedLinkException,
-      IOException {
-    AppendRequestProto req = AppendRequestProto.newBuilder()
-        .setSrc(src)
-        .setClientName(clientName)
+  public LastBlockWithStatus append(String src, String clientName,
+      EnumSetWritable<CreateFlag> flag) throws AccessControlException,
+      DSQuotaExceededException, FileNotFoundException, SafeModeException,
+      UnresolvedLinkException, IOException {
+    AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
+        .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
         .build();
     try {
       AppendResponseProto res = rpcProxy.append(null, req);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 7187838..e4746cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -1373,6 +1373,9 @@ public class PBHelper {
     if (flag.contains(CreateFlag.LAZY_PERSIST)) {
       value |= CreateFlagProto.LAZY_PERSIST.getNumber();
     }
+    if (flag.contains(CreateFlag.NEW_BLOCK)) {
+      value |= CreateFlagProto.NEW_BLOCK.getNumber();
+    }
     return value;
   }
   
@@ -1393,7 +1396,11 @@ public class PBHelper {
         == CreateFlagProto.LAZY_PERSIST_VALUE) {
       result.add(CreateFlag.LAZY_PERSIST);
     }
-    return new EnumSetWritable<CreateFlag>(result);
+    if ((flag & CreateFlagProto.NEW_BLOCK_VALUE)
+        == CreateFlagProto.NEW_BLOCK_VALUE) {
+      result.add(CreateFlag.NEW_BLOCK);
+    }
+    return new EnumSetWritable<CreateFlag>(result, CreateFlag.class);
   }
 
   public static int convertCacheFlags(EnumSet<CacheFlag> flags) {
@@ -2605,11 +2612,11 @@ public class PBHelper {
                   .build());
             break;
           case EVENT_APPEND:
-            InotifyProtos.AppendEventProto reopen =
+            InotifyProtos.AppendEventProto append =
                 InotifyProtos.AppendEventProto.parseFrom(p.getContents());
-            events.add(new Event.AppendEvent.Builder()
-                  .path(reopen.getPath())
-                  .build());
+            events.add(new Event.AppendEvent.Builder().path(append.getPath())
+                .newBlock(append.hasNewBlock() && append.getNewBlock())
+                .build());
             break;
           case EVENT_UNLINK:
             InotifyProtos.UnlinkEventProto unlink =
@@ -2710,10 +2717,10 @@ public class PBHelper {
             Event.AppendEvent re2 = (Event.AppendEvent) e;
             events.add(InotifyProtos.EventProto.newBuilder()
                 .setType(InotifyProtos.EventType.EVENT_APPEND)
-                .setContents(
-                    InotifyProtos.AppendEventProto.newBuilder()
-                        .setPath(re2.getPath()).build().toByteString()
-                ).build());
+                .setContents(InotifyProtos.AppendEventProto.newBuilder()
+                    .setPath(re2.getPath())
+                    .setNewBlock(re2.toNewBlock()).build().toByteString())
+                .build());
             break;
           case UNLINK:
             Event.UnlinkEvent ue = (Event.UnlinkEvent) e;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index f02780a..be1faec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -176,7 +176,8 @@ public class WebHdfsHandler extends SimpleChannelInboundHandler<HttpRequest> {
     final int bufferSize = params.bufferSize();
 
     DFSClient dfsClient = newDfsClient(nnId, conf);
-    OutputStream out = dfsClient.append(path, bufferSize, null, null);
+    OutputStream out = dfsClient.append(path, bufferSize,
+        EnumSet.of(CreateFlag.APPEND), null, null);
     DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1, OK);
     resp.headers().set(CONTENT_LENGTH, 0);
     ctx.pipeline().replace(this, HdfsWriter.class.getSimpleName(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 43d3b20..ecfd2e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -19,11 +19,10 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -33,201 +32,171 @@ import java.util.Set;
 import static org.apache.hadoop.util.Time.now;
 
 class FSDirConcatOp {
-  static HdfsFileStatus concat(
-    FSDirectory fsd, String target, String[] srcs,
+
+  static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs,
     boolean logRetryCache) throws IOException {
     Preconditions.checkArgument(!target.isEmpty(), "Target file name is empty");
     Preconditions.checkArgument(srcs != null && srcs.length > 0,
       "No sources given");
     assert srcs != null;
-
-    FSDirectory.LOG.debug("concat {} to {}", Arrays.toString(srcs), target);
-    // We require all files be in the same directory
-    String trgParent =
-      target.substring(0, target.lastIndexOf(Path.SEPARATOR_CHAR));
-    for (String s : srcs) {
-      String srcParent = s.substring(0, s.lastIndexOf(Path.SEPARATOR_CHAR));
-      if (!srcParent.equals(trgParent)) {
-        throw new IllegalArgumentException(
-           "Sources and target are not in the same directory");
-      }
+    if (FSDirectory.LOG.isDebugEnabled()) {
+      FSDirectory.LOG.debug("concat {} to {}", Arrays.toString(srcs), target);
     }
 
-    final INodesInPath trgIip = fsd.getINodesInPath4Write(target);
+    final INodesInPath targetIIP = fsd.getINodesInPath4Write(target);
     // write permission for the target
+    FSPermissionChecker pc = null;
     if (fsd.isPermissionEnabled()) {
-      FSPermissionChecker pc = fsd.getPermissionChecker();
-      fsd.checkPathAccess(pc, trgIip, FsAction.WRITE);
-
-      // and srcs
-      for(String aSrc: srcs) {
-        final INodesInPath srcIip = fsd.getINodesInPath4Write(aSrc);
-        fsd.checkPathAccess(pc, srcIip, FsAction.READ); // read the file
-        fsd.checkParentAccess(pc, srcIip, FsAction.WRITE); // for delete
-      }
+      pc = fsd.getPermissionChecker();
+      fsd.checkPathAccess(pc, targetIIP, FsAction.WRITE);
     }
 
-    // to make sure no two files are the same
-    Set<INode> si = new HashSet<INode>();
+    // check the target
+    verifyTargetFile(fsd, target, targetIIP);
+    // check the srcs
+    INodeFile[] srcFiles = verifySrcFiles(fsd, srcs, targetIIP, pc);
 
-    // we put the following prerequisite for the operation
-    // replication and blocks sizes should be the same for ALL the blocks
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* NameSystem.concat: " +
+          Arrays.toString(srcs) + " to " + target);
+    }
+
+    long timestamp = now();
+    fsd.writeLock();
+    try {
+      unprotectedConcat(fsd, targetIIP, srcFiles, timestamp);
+    } finally {
+      fsd.writeUnlock();
+    }
+    fsd.getEditLog().logConcat(target, srcs, timestamp, logRetryCache);
+    return fsd.getAuditFileInfo(targetIIP);
+  }
 
+  private static void verifyTargetFile(FSDirectory fsd, final String target,
+      final INodesInPath targetIIP) throws IOException {
     // check the target
-    if (fsd.getEZForPath(trgIip) != null) {
+    if (fsd.getEZForPath(targetIIP) != null) {
       throw new HadoopIllegalArgumentException(
           "concat can not be called for files in an encryption zone.");
     }
-    final INodeFile trgInode = INodeFile.valueOf(trgIip.getLastINode(), target);
-    if(trgInode.isUnderConstruction()) {
+    final INodeFile targetINode = INodeFile.valueOf(targetIIP.getLastINode(),
+        target);
+    if(targetINode.isUnderConstruction()) {
       throw new HadoopIllegalArgumentException("concat: target file "
           + target + " is under construction");
     }
-    // per design target shouldn't be empty and all the blocks same size
-    if(trgInode.numBlocks() == 0) {
-      throw new HadoopIllegalArgumentException("concat: target file "
-          + target + " is empty");
-    }
-    if (trgInode.isWithSnapshot()) {
-      throw new HadoopIllegalArgumentException("concat: target file "
-          + target + " is in a snapshot");
-    }
-
-    long blockSize = trgInode.getPreferredBlockSize();
-
-    // check the end block to be full
-    final BlockInfo last = trgInode.getLastBlock();
-    if(blockSize != last.getNumBytes()) {
-      throw new HadoopIllegalArgumentException("The last block in " + target
-          + " is not full; last block size = " + last.getNumBytes()
-          + " but file block size = " + blockSize);
-    }
-
-    si.add(trgInode);
-    final short repl = trgInode.getFileReplication();
+  }
 
+  private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs,
+      INodesInPath targetIIP, FSPermissionChecker pc) throws IOException {
+    // to make sure no two files are the same
+    Set<INodeFile> si = new HashSet<>();
+    final INodeFile targetINode = targetIIP.getLastINode().asFile();
+    final INodeDirectory targetParent = targetINode.getParent();
     // now check the srcs
-    boolean endSrc = false; // final src file doesn't have to have full end block
-    for(int i=0; i< srcs.length; i++) {
-      String src = srcs[i];
-      if(i== srcs.length-1)
-        endSrc=true;
-
-      final INodeFile srcInode = INodeFile.valueOf(fsd.getINode4Write(src), src);
-      if(src.isEmpty()
-          || srcInode.isUnderConstruction()
-          || srcInode.numBlocks() == 0) {
-        throw new HadoopIllegalArgumentException("concat: source file " + src
-            + " is invalid or empty or underConstruction");
+    for(String src : srcs) {
+      final INodesInPath iip = fsd.getINodesInPath4Write(src);
+      // permission check for srcs
+      if (pc != null) {
+        fsd.checkPathAccess(pc, iip, FsAction.READ); // read the file
+        fsd.checkParentAccess(pc, iip, FsAction.WRITE); // for delete
       }
-
-      // check replication and blocks size
-      if(repl != srcInode.getBlockReplication()) {
-        throw new HadoopIllegalArgumentException("concat: the source file "
-            + src + " and the target file " + target
-            + " should have the same replication: source replication is "
-            + srcInode.getBlockReplication()
-            + " but target replication is " + repl);
+      final INode srcINode = iip.getLastINode();
+      final INodeFile srcINodeFile = INodeFile.valueOf(srcINode, src);
+      // make sure the src file and the target file are in the same dir
+      if (srcINodeFile.getParent() != targetParent) {
+        throw new HadoopIllegalArgumentException("Source file " + src
+            + " is not in the same directory with the target "
+            + targetIIP.getPath());
       }
-
-      //boolean endBlock=false;
-      // verify that all the blocks are of the same length as target
-      // should be enough to check the end blocks
-      final BlockInfo[] srcBlocks = srcInode.getBlocks();
-      int idx = srcBlocks.length-1;
-      if(endSrc)
-        idx = srcBlocks.length-2; // end block of endSrc is OK not to be full
-      if(idx >= 0 && srcBlocks[idx].getNumBytes() != blockSize) {
-        throw new HadoopIllegalArgumentException("concat: the source file "
-            + src + " and the target file " + target
-            + " should have the same blocks sizes: target block size is "
-            + blockSize + " but the size of source block " + idx + " is "
-            + srcBlocks[idx].getNumBytes());
+      // make sure all the source files are not in snapshot
+      if (srcINode.isInLatestSnapshot(iip.getLatestSnapshotId())) {
+        throw new SnapshotException("Concat: the source file " + src
+            + " is in snapshot");
       }
-
-      si.add(srcInode);
+      // check if the file has other references.
+      if (srcINode.isReference() && ((INodeReference.WithCount)
+          srcINode.asReference().getReferredINode()).getReferenceCount() > 1) {
+        throw new SnapshotException("Concat: the source file " + src
+            + " is referred by some other reference in some snapshot.");
+      }
+      if (srcINode == targetINode) {
+        throw new HadoopIllegalArgumentException("concat: the src file " + src
+            + " is the same with the target file " + targetIIP.getPath());
+      }
+      if(srcINodeFile.isUnderConstruction() || srcINodeFile.numBlocks() == 0) {
+        throw new HadoopIllegalArgumentException("concat: source file " + src
+            + " is invalid or empty or underConstruction");
+      }
+      si.add(srcINodeFile);
     }
 
     // make sure no two files are the same
-    if(si.size() < srcs.length+1) { // trg + srcs
+    if(si.size() < srcs.length) {
       // it means at least two files are the same
       throw new HadoopIllegalArgumentException(
           "concat: at least two of the source files are the same");
     }
+    return si.toArray(new INodeFile[si.size()]);
+  }
 
-    if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* NameSystem.concat: " +
-          Arrays.toString(srcs) + " to " + target);
+  private static long computeQuotaDelta(INodeFile target, INodeFile[] srcList) {
+    long delta = 0;
+    short targetRepl = target.getBlockReplication();
+    for (INodeFile src : srcList) {
+      if (targetRepl != src.getBlockReplication()) {
+        delta += src.computeFileSize() *
+            (targetRepl - src.getBlockReplication());
+      }
     }
+    return delta;
+  }
 
-    long timestamp = now();
-    fsd.writeLock();
-    try {
-      unprotectedConcat(fsd, target, srcs, timestamp);
-    } finally {
-      fsd.writeUnlock();
+  private static void verifyQuota(FSDirectory fsd, INodesInPath targetIIP,
+      long delta) throws QuotaExceededException {
+    if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
+      // Do not check quota if editlog is still being processed
+      return;
     }
-    fsd.getEditLog().logConcat(target, srcs, timestamp, logRetryCache);
-    return fsd.getAuditFileInfo(trgIip);
+    FSDirectory.verifyQuota(targetIIP, targetIIP.length() - 1, 0, delta, null);
   }
 
   /**
    * Concat all the blocks from srcs to trg and delete the srcs files
    * @param fsd FSDirectory
-   * @param target target file to move the blocks to
-   * @param srcs list of file to move the blocks from
    */
-  static void unprotectedConcat(
-    FSDirectory fsd, String target, String[] srcs, long timestamp)
-    throws IOException {
+  static void unprotectedConcat(FSDirectory fsd, INodesInPath targetIIP,
+      INodeFile[] srcList, long timestamp) throws IOException {
     assert fsd.hasWriteLock();
     if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* FSNamesystem.concat to "+target);
+      NameNode.stateChangeLog.debug("DIR* FSNamesystem.concat to "
+          + targetIIP.getPath());
     }
-    // do the move
-
-    final INodesInPath trgIIP = fsd.getINodesInPath4Write(target, true);
-    final INodeFile trgInode = trgIIP.getLastINode().asFile();
-    INodeDirectory trgParent = trgIIP.getINode(-2).asDirectory();
-    final int trgLatestSnapshot = trgIIP.getLatestSnapshotId();
-
-    final INodeFile [] allSrcInodes = new INodeFile[srcs.length];
-    for(int i = 0; i < srcs.length; i++) {
-      final INodesInPath iip = fsd.getINodesInPath4Write(srcs[i]);
-      final int latest = iip.getLatestSnapshotId();
-      final INode inode = iip.getLastINode();
-
-      // check if the file in the latest snapshot
-      if (inode.isInLatestSnapshot(latest)) {
-        throw new SnapshotException("Concat: the source file " + srcs[i]
-            + " is in snapshot " + latest);
-      }
 
-      // check if the file has other references.
-      if (inode.isReference() && ((INodeReference.WithCount)
-          inode.asReference().getReferredINode()).getReferenceCount() > 1) {
-        throw new SnapshotException("Concat: the source file " + srcs[i]
-            + " is referred by some other reference in some snapshot.");
-      }
+    final INodeFile trgInode = targetIIP.getLastINode().asFile();
+    long delta = computeQuotaDelta(trgInode, srcList);
+    verifyQuota(fsd, targetIIP, delta);
 
-      allSrcInodes[i] = inode.asFile();
-    }
-    trgInode.concatBlocks(allSrcInodes);
+    // the target file can be included in a snapshot
+    trgInode.recordModification(targetIIP.getLatestSnapshotId());
+    INodeDirectory trgParent = targetIIP.getINode(-2).asDirectory();
+    trgInode.concatBlocks(srcList);
 
     // since we are in the same dir - we can use same parent to remove files
     int count = 0;
-    for(INodeFile nodeToRemove: allSrcInodes) {
-      if(nodeToRemove == null) continue;
-
-      nodeToRemove.setBlocks(null);
-      trgParent.removeChild(nodeToRemove, trgLatestSnapshot);
-      fsd.getINodeMap().remove(nodeToRemove);
-      count++;
+    for (INodeFile nodeToRemove : srcList) {
+      if(nodeToRemove != null) {
+        nodeToRemove.setBlocks(null);
+        nodeToRemove.getParent().removeChild(nodeToRemove);
+        fsd.getINodeMap().remove(nodeToRemove);
+        count++;
+      }
     }
 
-    trgInode.setModificationTime(timestamp, trgLatestSnapshot);
-    trgParent.updateModificationTime(timestamp, trgLatestSnapshot);
+    trgInode.setModificationTime(timestamp, targetIIP.getLatestSnapshotId());
+    trgParent.updateModificationTime(timestamp, targetIIP.getLatestSnapshotId());
     // update quota on the parent directory ('count' files removed, 0 space)
-    FSDirectory.unprotectedUpdateCount(trgIIP, trgIIP.length() - 1, -count, 0);
+    FSDirectory.unprotectedUpdateCount(targetIIP, targetIIP.length() - 1,
+        -count, delta);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index c171448..c012847 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -452,7 +452,7 @@ public class FSDirectory implements Closeable {
       Preconditions.checkState(fileINode.isUnderConstruction());
 
       // check quota limits and updated space consumed
-      updateCount(inodesInPath, 0, fileINode.getBlockDiskspace(), true);
+      updateCount(inodesInPath, 0, fileINode.getPreferredBlockDiskspace(), true);
 
       // associate new last block for the file
       BlockInfoUnderConstruction blockInfo =
@@ -508,7 +508,7 @@ public class FSDirectory implements Closeable {
     }
 
     // update space consumed
-    updateCount(iip, 0, -fileNode.getBlockDiskspace(), true);
+    updateCount(iip, 0, -fileNode.getPreferredBlockDiskspace(), true);
     return true;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 144be37..3c7eae4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -34,10 +34,10 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -52,9 +52,11 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddBlockOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AppendOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CloseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ConcatDeleteOp;
@@ -76,6 +78,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op;
@@ -90,7 +93,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TruncateOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp;
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -702,7 +704,19 @@ public class FSEditLog implements LogsPurgeable {
       op.setRpcCallId(Server.getCallId());
     }
   }
-  
+
+  public void logAppendFile(String path, INodeFile file, boolean newBlock,
+      boolean toLogRpcIds) {
+    FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
+    assert uc != null;
+    AppendOp op = AppendOp.getInstance(cache.get()).setPath(path)
+        .setClientName(uc.getClientName())
+        .setClientMachine(uc.getClientMachine())
+        .setNewBlock(newBlock);
+    logRpcIds(op, toLogRpcIds);
+    logEdit(op);
+  }
+
   /** 
    * Add open lease record to edit log. 
    * Records the block locations of the last block.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 322e18c..7cb6486 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -41,7 +42,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AppendOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ClearNSQuotaOp;
@@ -68,6 +69,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
@@ -83,7 +85,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetStoragePolicyOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
@@ -325,22 +326,22 @@ public class FSEditLogLoader {
       LOG.trace("replaying edit log: " + op);
     }
     final boolean toAddRetryCache = fsNamesys.hasRetryCache() && op.hasRpcIds();
-    
+
     switch (op.opCode) {
     case OP_ADD: {
       AddCloseOp addCloseOp = (AddCloseOp)op;
       final String path =
           renameReservedPathsOnUpgrade(addCloseOp.path, logVersion);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(op.opCode + ": " + path +
+      if (FSNamesystem.LOG.isDebugEnabled()) {
+        FSNamesystem.LOG.debug(op.opCode + ": " + path +
             " numblocks : " + addCloseOp.blocks.length +
             " clientHolder " + addCloseOp.clientName +
             " clientMachine " + addCloseOp.clientMachine);
       }
-      // There three cases here:
+      // There are 3 cases here:
       // 1. OP_ADD to create a new file
       // 2. OP_ADD to update file blocks
-      // 3. OP_ADD to open file for append
+      // 3. OP_ADD to open file for append (old append)
 
       // See if the file already exists (persistBlocks call)
       INodesInPath iip = fsDir.getINodesInPath(path, true);
@@ -383,19 +384,17 @@ public class FSEditLogLoader {
           fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
               addCloseOp.rpcCallId, stat);
         }
-      } else { // This is OP_ADD on an existing file
+      } else { // This is OP_ADD on an existing file (old append)
         if (!oldFile.isUnderConstruction()) {
           // This is case 3: a call to append() on an already-closed file.
           if (FSNamesystem.LOG.isDebugEnabled()) {
             FSNamesystem.LOG.debug("Reopening an already-closed file " +
                 "for append");
           }
-          // Note we do not replace the INodeFile when converting it to
-          // under-construction
-          LocatedBlock lb = fsNamesys.prepareFileForWrite(path, iip,
-              addCloseOp.clientName, addCloseOp.clientMachine, false, false);
-          
-          // add the op into retry cache is necessary
+          LocatedBlock lb = fsNamesys.prepareFileForAppend(path, iip,
+              addCloseOp.clientName, addCloseOp.clientMachine, false, false,
+              false);
+          // add the op into retry cache if necessary
           if (toAddRetryCache) {
             HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus(
                 fsNamesys.dir,
@@ -453,6 +452,34 @@ public class FSEditLogLoader {
       }
       break;
     }
+    case OP_APPEND: {
+      AppendOp appendOp = (AppendOp) op;
+      final String path = renameReservedPathsOnUpgrade(appendOp.path,
+          logVersion);
+      if (FSNamesystem.LOG.isDebugEnabled()) {
+        FSNamesystem.LOG.debug(op.opCode + ": " + path +
+            " clientName " + appendOp.clientName +
+            " clientMachine " + appendOp.clientMachine +
+            " newBlock " + appendOp.newBlock);
+      }
+      INodesInPath iip = fsDir.getINodesInPath4Write(path);
+      INodeFile file = INodeFile.valueOf(iip.getLastINode(), path);
+      if (!file.isUnderConstruction()) {
+        LocatedBlock lb = fsNamesys.prepareFileForAppend(path, iip,
+            appendOp.clientName, appendOp.clientMachine, appendOp.newBlock,
+            false, false);
+        // add the op into retry cache if necessary
+        if (toAddRetryCache) {
+          HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus(
+              fsNamesys.dir, HdfsFileStatus.EMPTY_NAME, file,
+              BlockStoragePolicySuite.ID_UNSPECIFIED,
+              Snapshot.CURRENT_STATE_ID, false, iip);
+          fsNamesys.addCacheEntryWithPayload(appendOp.rpcClientId,
+              appendOp.rpcCallId, new LastBlockWithStatus(lb, stat));
+        }
+      }
+      break;
+    }
     case OP_UPDATE_BLOCKS: {
       UpdateBlocksOp updateOp = (UpdateBlocksOp)op;
       final String path =
@@ -499,7 +526,14 @@ public class FSEditLogLoader {
         srcs[i] =
             renameReservedPathsOnUpgrade(concatDeleteOp.srcs[i], logVersion);
       }
-      FSDirConcatOp.unprotectedConcat(fsDir, trg, srcs, concatDeleteOp.timestamp);
+      INodesInPath targetIIP = fsDir.getINodesInPath4Write(trg);
+      INodeFile[] srcFiles = new INodeFile[srcs.length];
+      for (int i = 0; i < srcs.length; i++) {
+        INodesInPath srcIIP = fsDir.getINodesInPath4Write(srcs[i]);
+        srcFiles[i] = srcIIP.getLastINode().asFile();
+      }
+      FSDirConcatOp.unprotectedConcat(fsDir, targetIIP, srcFiles,
+          concatDeleteOp.timestamp);
       
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(concatDeleteOp.rpcClientId,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 9424156..1629d80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD;
+import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_APPEND;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_BLOCK;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_DIRECTIVE;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_POOL;
@@ -207,6 +208,7 @@ public abstract class FSEditLogOp {
       inst.put(OP_SET_XATTR, new SetXAttrOp());
       inst.put(OP_REMOVE_XATTR, new RemoveXAttrOp());
       inst.put(OP_SET_STORAGE_POLICY, new SetStoragePolicyOp());
+      inst.put(OP_APPEND, new AppendOp());
     }
     
     public FSEditLogOp get(FSEditLogOpCodes opcode) {
@@ -428,7 +430,7 @@ public abstract class FSEditLogOp {
     private AddCloseOp(FSEditLogOpCodes opCode) {
       super(opCode);
       storagePolicyId = BlockStoragePolicySuite.ID_UNSPECIFIED;
-      assert(opCode == OP_ADD || opCode == OP_CLOSE);
+      assert(opCode == OP_ADD || opCode == OP_CLOSE || opCode == OP_APPEND);
     }
 
     @Override
@@ -770,7 +772,7 @@ public abstract class FSEditLogOp {
     }
 
     static AddOp getInstance(OpInstanceCache cache) {
-      return (AddOp)cache.get(OP_ADD);
+      return (AddOp) cache.get(OP_ADD);
     }
 
     @Override
@@ -788,7 +790,7 @@ public abstract class FSEditLogOp {
   }
 
   /**
-   * Although {@link ClientProtocol#appendFile} may also log a close op, we do
+   * Although {@link ClientProtocol#append} may also log a close op, we do
    * not need to record the rpc ids here since a successful appendFile op will
    * finally log an AddOp.
    */
@@ -814,6 +816,97 @@ public abstract class FSEditLogOp {
       return builder.toString();
     }
   }
+
+  static class AppendOp extends FSEditLogOp {
+    String path;
+    String clientName;
+    String clientMachine;
+    boolean newBlock;
+
+    private AppendOp() {
+      super(OP_APPEND);
+    }
+
+    static AppendOp getInstance(OpInstanceCache cache) {
+      return (AppendOp) cache.get(OP_APPEND);
+    }
+
+    AppendOp setPath(String path) {
+      this.path = path;
+      return this;
+    }
+
+    AppendOp setClientName(String clientName) {
+      this.clientName = clientName;
+      return this;
+    }
+
+    AppendOp setClientMachine(String clientMachine) {
+      this.clientMachine = clientMachine;
+      return this;
+    }
+
+    AppendOp setNewBlock(boolean newBlock) {
+      this.newBlock = newBlock;
+      return this;
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder builder = new StringBuilder();
+      builder.append("AppendOp ");
+      builder.append("[path=").append(path);
+      builder.append(", clientName=").append(clientName);
+      builder.append(", clientMachine=").append(clientMachine);
+      builder.append(", newBlock=").append(newBlock).append("]");
+      return builder.toString();
+    }
+
+    @Override
+    void resetSubFields() {
+      this.path = null;
+      this.clientName = null;
+      this.clientMachine = null;
+      this.newBlock = false;
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion) throws IOException {
+      this.path = FSImageSerialization.readString(in);
+      this.clientName = FSImageSerialization.readString(in);
+      this.clientMachine = FSImageSerialization.readString(in);
+      this.newBlock = FSImageSerialization.readBoolean(in);
+      readRpcIds(in, logVersion);
+    }
+
+    @Override
+    public void writeFields(DataOutputStream out) throws IOException {
+      FSImageSerialization.writeString(path, out);
+      FSImageSerialization.writeString(clientName, out);
+      FSImageSerialization.writeString(clientMachine, out);
+      FSImageSerialization.writeBoolean(newBlock, out);
+      writeRpcIds(rpcClientId, rpcCallId, out);
+    }
+
+    @Override
+    protected void toXml(ContentHandler contentHandler) throws SAXException {
+      XMLUtils.addSaxString(contentHandler, "PATH", path);
+      XMLUtils.addSaxString(contentHandler, "CLIENT_NAME", clientName);
+      XMLUtils.addSaxString(contentHandler, "CLIENT_MACHINE", clientMachine);
+      XMLUtils.addSaxString(contentHandler, "NEWBLOCK",
+          Boolean.toString(newBlock));
+      appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
+    }
+
+    @Override
+    void fromXml(Stanza st) throws InvalidXmlException {
+      this.path = st.getValue("PATH");
+      this.clientName = st.getValue("CLIENT_NAME");
+      this.clientMachine = st.getValue("CLIENT_MACHINE");
+      this.newBlock = Boolean.parseBoolean(st.getValue("NEWBLOCK"));
+      readRpcIdsFromXml(st);
+    }
+  }
   
   static class AddBlockOp extends FSEditLogOp {
     private String path;
@@ -1643,7 +1736,7 @@ public abstract class FSEditLogOp {
    * {@link ClientProtocol#updateBlockForPipeline},
    * {@link ClientProtocol#recoverLease}, {@link ClientProtocol#addBlock}) or
    * already bound with other editlog op which records rpc ids (
-   * {@link ClientProtocol#startFile}). Thus no need to record rpc ids here.
+   * {@link ClientProtocol#create}). Thus no need to record rpc ids here.
    */
   static class SetGenstampV1Op extends FSEditLogOp {
     long genStampV1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
index 468e048..6cd1617 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
@@ -74,6 +74,7 @@ public enum FSEditLogOpCodes {
   OP_REMOVE_XATTR               ((byte) 44),
   OP_SET_STORAGE_POLICY         ((byte) 45),
   OP_TRUNCATE                   ((byte) 46),
+  OP_APPEND                     ((byte) 47),
 
   // Note that the current range of the valid OP code is 0~127
   OP_INVALID                    ((byte) -1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index fae1641..ebdec1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -250,6 +250,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RetriableException;
@@ -2586,12 +2587,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * <p>
    * 
    * For description of parameters and exceptions thrown see
-   * {@link ClientProtocol#append(String, String)}
-   * 
+   * {@link ClientProtocol#append(String, String, EnumSetWritable)}
+   *
    * @return the last block locations if the block is partial or null otherwise
    */
   private LocatedBlock appendFileInternal(FSPermissionChecker pc,
-      INodesInPath iip, String holder, String clientMachine,
+      INodesInPath iip, String holder, String clientMachine, boolean newBlock,
       boolean logRetryCache) throws IOException {
     assert hasWriteLock();
     // Verify that the destination does not exist as a directory already.
@@ -2613,7 +2614,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       INodeFile myFile = INodeFile.valueOf(inode, src, true);
       final BlockStoragePolicy lpPolicy =
           blockManager.getStoragePolicy("LAZY_PERSIST");
-
       if (lpPolicy != null &&
           lpPolicy.getId() == myFile.getStoragePolicyID()) {
         throw new UnsupportedOperationException(
@@ -2629,8 +2629,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         throw new IOException("append: lastBlock=" + lastBlock +
             " of src=" + src + " is not sufficiently replicated yet.");
       }
-      return prepareFileForWrite(src, iip, holder, clientMachine, true,
-              logRetryCache);
+      return prepareFileForAppend(src, iip, holder, clientMachine, newBlock,
+          true, logRetryCache);
     } catch (IOException ie) {
       NameNode.stateChangeLog.warn("DIR* NameSystem.append: " +ie.getMessage());
       throw ie;
@@ -2644,6 +2644,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * @param src path to the file
    * @param leaseHolder identifier of the lease holder on this file
    * @param clientMachine identifier of the client machine
+   * @param newBlock if the data is appended to a new block
    * @param writeToEditLog whether to persist this change to the edit log
    * @param logRetryCache whether to record RPC ids in editlog for retry cache
    *                      rebuilding
@@ -2651,26 +2652,34 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * @throws UnresolvedLinkException
    * @throws IOException
    */
-  LocatedBlock prepareFileForWrite(String src, INodesInPath iip,
-      String leaseHolder, String clientMachine, boolean writeToEditLog,
-      boolean logRetryCache) throws IOException {
+  LocatedBlock prepareFileForAppend(String src, INodesInPath iip,
+      String leaseHolder, String clientMachine, boolean newBlock,
+      boolean writeToEditLog, boolean logRetryCache) throws IOException {
     final INodeFile file = iip.getLastINode().asFile();
     file.recordModification(iip.getLatestSnapshotId());
     file.toUnderConstruction(leaseHolder, clientMachine);
 
     leaseManager.addLease(
         file.getFileUnderConstructionFeature().getClientName(), src);
-    
-    LocatedBlock ret =
-        blockManager.convertLastBlockToUnderConstruction(file, 0);
-    if (ret != null) {
-      // update the quota: use the preferred block size for UC block
-      final long diff = file.getPreferredBlockSize() - ret.getBlockSize();
-      dir.updateSpaceConsumed(iip, 0, diff * file.getBlockReplication());
+
+    LocatedBlock ret = null;
+    if (!newBlock) {
+      ret = blockManager.convertLastBlockToUnderConstruction(file, 0);
+      if (ret != null) {
+        // update the quota: use the preferred block size for UC block
+        final long diff = file.getPreferredBlockSize() - ret.getBlockSize();
+        dir.updateSpaceConsumed(iip, 0, diff * file.getBlockReplication());
+      }
+    } else {
+      BlockInfo lastBlock = file.getLastBlock();
+      if (lastBlock != null) {
+        ExtendedBlock blk = new ExtendedBlock(this.getBlockPoolId(), lastBlock);
+        ret = new LocatedBlock(blk, new DatanodeInfo[0]);
+      }
     }
 
     if (writeToEditLog) {
-      getEditLog().logOpenFile(src, file, false, logRetryCache);
+      getEditLog().logAppendFile(src, file, newBlock, logRetryCache);
     }
     return ret;
   }
@@ -2805,11 +2814,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   /**
    * Append to an existing file in the namespace.
    */
-  LastBlockWithStatus appendFile(
-      String src, String holder, String clientMachine, boolean logRetryCache)
+  LastBlockWithStatus appendFile(String src, String holder,
+      String clientMachine, EnumSet<CreateFlag> flag, boolean logRetryCache)
       throws IOException {
     try {
-      return appendFileInt(src, holder, clientMachine, logRetryCache);
+      return appendFileInt(src, holder, clientMachine,
+          flag.contains(CreateFlag.NEW_BLOCK), logRetryCache);
     } catch (AccessControlException e) {
       logAuditEvent(false, "append", src);
       throw e;
@@ -2817,7 +2827,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   private LastBlockWithStatus appendFileInt(final String srcArg, String holder,
-      String clientMachine, boolean logRetryCache) throws IOException {
+      String clientMachine, boolean newBlock, boolean logRetryCache)
+      throws IOException {
     String src = srcArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src=" + src
@@ -2836,7 +2847,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       checkNameNodeSafeMode("Cannot append to file" + src);
       src = dir.resolvePath(pc, src, pathComponents);
       final INodesInPath iip = dir.getINodesInPath4Write(src);
-      lb = appendFileInternal(pc, iip, holder, clientMachine, logRetryCache);
+      lb = appendFileInternal(pc, iip, holder, clientMachine, newBlock,
+          logRetryCache);
       stat = FSDirStatAndListingOp.getFileInfo(dir, src, false,
           FSDirectory.isReservedRawName(srcArg), true);
     } catch (StandbyException se) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index e871bdc..cbcdac9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -412,7 +412,7 @@ public class INodeFile extends INodeWithAdditionalFields
   }
 
   /** @return the diskspace required for a full block. */
-  final long getBlockDiskspace() {
+  final long getPreferredBlockDiskspace() {
     return getPreferredBlockSize() * getBlockReplication();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java
index f265340..5345b46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java
@@ -65,6 +65,10 @@ public class InotifyFSEditLogOpTranslator {
       FSEditLogOp.CloseOp cOp = (FSEditLogOp.CloseOp) op;
       return new EventBatch(op.txid, new Event[] {
           new Event.CloseEvent(cOp.path, getSize(cOp), cOp.mtime) });
+    case OP_APPEND:
+      FSEditLogOp.AppendOp appendOp = (FSEditLogOp.AppendOp) op;
+      return new EventBatch(op.txid, new Event[] {new Event.AppendEvent
+          .Builder().path(appendOp.path).newBlock(appendOp.newBlock).build()});
     case OP_SET_REPLICATION:
       FSEditLogOp.SetReplicationOp setRepOp = (FSEditLogOp.SetReplicationOp) op;
       return new EventBatch(op.txid,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
index d742c6d..848fa33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
@@ -70,7 +70,8 @@ public class NameNodeLayoutVersion {
       "creating file with overwrite"),
     XATTRS_NAMESPACE_EXT(-59, "Increase number of xattr namespaces"),
     BLOCK_STORAGE_POLICY(-60, "Block Storage policy"),
-    TRUNCATE(-61, "Truncate");
+    TRUNCATE(-61, "Truncate"),
+    APPEND_NEW_BLOCK(-62, "Support appending to new block");
 
     private final FeatureInfo info;
 


[23/34] hadoop git commit: HDFS-7681. Change ReplicaInputStreams constructor to take InputStream(s) instead of FileDescriptor(s). Contributed by Joe Pallas

Posted by zh...@apache.org.
HDFS-7681. Change ReplicaInputStreams constructor to take InputStream(s) instead of FileDescriptor(s).  Contributed by Joe Pallas


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ced72c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ced72c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ced72c8

Branch: refs/heads/HDFS-EC
Commit: 8ced72c81a01a126a37078e7f55fd631b6bef7ab
Parents: dd5946a
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Wed Jan 28 15:59:33 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:26 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt               |  3 +++
 .../server/datanode/fsdataset/ReplicaInputStreams.java    |  6 +++---
 .../server/datanode/fsdataset/impl/FsDatasetImpl.java     | 10 ++++++++--
 .../server/datanode/extdataset/ExternalDatasetImpl.java   |  2 +-
 4 files changed, 15 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ced72c8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4d2b41d..fd29408 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -141,6 +141,9 @@ Trunk (Unreleased)
     HDFS-7430. Rewrite the BlockScanner to use O(1) memory and use multiple
     threads (cmccabe)
 
+    HDFS-7681. Change ReplicaInputStreams constructor to take InputStream(s)
+    instead of FileDescriptor(s).  (Joe Pallas via szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ced72c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaInputStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaInputStreams.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaInputStreams.java
index a8bf622..e0928a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaInputStreams.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaInputStreams.java
@@ -33,11 +33,11 @@ public class ReplicaInputStreams implements Closeable {
   private final FsVolumeReference volumeRef;
 
   /** Create an object with a data input stream and a checksum input stream. */
-  public ReplicaInputStreams(FileDescriptor dataFd, FileDescriptor checksumFd,
+  public ReplicaInputStreams(InputStream dataStream, InputStream checksumStream,
       FsVolumeReference volumeRef) {
     this.volumeRef = volumeRef;
-    this.dataIn = new FileInputStream(dataFd);
-    this.checksumIn = new FileInputStream(checksumFd);
+    this.dataIn = dataStream;
+    this.checksumIn = checksumStream;
   }
 
   /** @return the data input stream. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ced72c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index c00d467..5c20102 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -642,8 +642,14 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       if (ckoff > 0) {
         metaInFile.seek(ckoff);
       }
-      return new ReplicaInputStreams(
-          blockInFile.getFD(), metaInFile.getFD(), ref);
+      InputStream blockInStream = new FileInputStream(blockInFile.getFD());
+      try {
+        InputStream metaInStream = new FileInputStream(metaInFile.getFD());
+        return new ReplicaInputStreams(blockInStream, metaInStream, ref);
+      } catch (IOException e) {
+        IOUtils.cleanup(null, blockInStream);
+        throw e;
+      }
     } catch (IOException e) {
       IOUtils.cleanup(null, ref);
       throw e;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ced72c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index 8fd51d2..aa868d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -137,7 +137,7 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
   @Override
   public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
       long ckoff) throws IOException {
-    return new ReplicaInputStreams(FileDescriptor.in, FileDescriptor.in, null);
+    return new ReplicaInputStreams(null, null, null);
   }
 
   @Override


[29/34] hadoop git commit: YARN-3079. Scheduler should also update maximumAllocation when updateNodeResource. (Zhihai Xu via wangda)

Posted by zh...@apache.org.
YARN-3079. Scheduler should also update maximumAllocation when updateNodeResource. (Zhihai Xu via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ca84c1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ca84c1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ca84c1f

Branch: refs/heads/HDFS-EC
Commit: 1ca84c1fea5b411756f9645bc9db38ce8ae475fb
Parents: 840d214
Author: Wangda Tan <wa...@apache.org>
Authored: Wed Jan 28 21:54:27 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:27 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../scheduler/AbstractYarnScheduler.java        | 47 ++++++++-------
 .../scheduler/TestAbstractYarnScheduler.java    | 62 ++++++++++++++++++++
 3 files changed, 92 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ca84c1f/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3f56cfc..ae3832b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -434,6 +434,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3103. AMRMClientImpl does not update AMRM token properly. (Jason Lowe
     via jianhe)
 
+    YARN-3079. Scheduler should also update maximumAllocation when updateNodeResource.
+    (Zhihai Xu via wangda)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ca84c1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 5d4d7e2..753259c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -22,6 +22,8 @@ import java.io.IOException;
 import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -82,8 +84,9 @@ public abstract class AbstractYarnScheduler
   private Resource configuredMaximumAllocation;
   private int maxNodeMemory = -1;
   private int maxNodeVCores = -1;
-  private ReentrantReadWriteLock maximumAllocationLock =
-      new ReentrantReadWriteLock();
+  private final ReadLock maxAllocReadLock;
+  private final WriteLock maxAllocWriteLock;
+
   private boolean useConfiguredMaximumAllocationOnly = true;
   private long configuredMaximumAllocationWaitTime;
 
@@ -103,6 +106,9 @@ public abstract class AbstractYarnScheduler
    */
   public AbstractYarnScheduler(String name) {
     super(name);
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    this.maxAllocReadLock = lock.readLock();
+    this.maxAllocWriteLock = lock.writeLock();
   }
 
   @Override
@@ -157,8 +163,7 @@ public abstract class AbstractYarnScheduler
   @Override
   public Resource getMaximumResourceCapability() {
     Resource maxResource;
-    ReentrantReadWriteLock.ReadLock readLock = maximumAllocationLock.readLock();
-    readLock.lock();
+    maxAllocReadLock.lock();
     try {
       if (useConfiguredMaximumAllocationOnly) {
         if (System.currentTimeMillis() - ResourceManager.getClusterTimeStamp()
@@ -170,22 +175,20 @@ public abstract class AbstractYarnScheduler
         maxResource = Resources.clone(maximumAllocation);
       }
     } finally {
-      readLock.unlock();
+      maxAllocReadLock.unlock();
     }
     return maxResource;
   }
 
   protected void initMaximumResourceCapability(Resource maximumAllocation) {
-    ReentrantReadWriteLock.WriteLock writeLock =
-        maximumAllocationLock.writeLock();
-    writeLock.lock();
+    maxAllocWriteLock.lock();
     try {
       if (this.configuredMaximumAllocation == null) {
         this.configuredMaximumAllocation = Resources.clone(maximumAllocation);
         this.maximumAllocation = Resources.clone(maximumAllocation);
       }
     } finally {
-      writeLock.unlock();
+      maxAllocWriteLock.unlock();
     }
   }
 
@@ -535,19 +538,24 @@ public abstract class AbstractYarnScheduler
    */
   public synchronized void updateNodeResource(RMNode nm, 
       ResourceOption resourceOption) {
-  
     SchedulerNode node = getSchedulerNode(nm.getNodeID());
     Resource newResource = resourceOption.getResource();
     Resource oldResource = node.getTotalResource();
     if(!oldResource.equals(newResource)) {
       // Log resource change
-      LOG.info("Update resource on node: " + node.getNodeName() 
+      LOG.info("Update resource on node: " + node.getNodeName()
           + " from: " + oldResource + ", to: "
           + newResource);
 
+      nodes.remove(nm.getNodeID());
+      updateMaximumAllocation(node, false);
+
       // update resource to node
       node.setTotalResource(newResource);
-    
+
+      nodes.put(nm.getNodeID(), (N)node);
+      updateMaximumAllocation(node, true);
+
       // update resource to clusterResource
       Resources.subtractFrom(clusterResource, oldResource);
       Resources.addTo(clusterResource, newResource);
@@ -571,28 +579,27 @@ public abstract class AbstractYarnScheduler
   }
 
   protected void updateMaximumAllocation(SchedulerNode node, boolean add) {
-    ReentrantReadWriteLock.WriteLock writeLock =
-        maximumAllocationLock.writeLock();
-    writeLock.lock();
+    Resource totalResource = node.getTotalResource();
+    maxAllocWriteLock.lock();
     try {
       if (add) { // added node
-        int nodeMemory = node.getTotalResource().getMemory();
+        int nodeMemory = totalResource.getMemory();
         if (nodeMemory > maxNodeMemory) {
           maxNodeMemory = nodeMemory;
           maximumAllocation.setMemory(Math.min(
               configuredMaximumAllocation.getMemory(), maxNodeMemory));
         }
-        int nodeVCores = node.getTotalResource().getVirtualCores();
+        int nodeVCores = totalResource.getVirtualCores();
         if (nodeVCores > maxNodeVCores) {
           maxNodeVCores = nodeVCores;
           maximumAllocation.setVirtualCores(Math.min(
               configuredMaximumAllocation.getVirtualCores(), maxNodeVCores));
         }
       } else {  // removed node
-        if (maxNodeMemory == node.getTotalResource().getMemory()) {
+        if (maxNodeMemory == totalResource.getMemory()) {
           maxNodeMemory = -1;
         }
-        if (maxNodeVCores == node.getTotalResource().getVirtualCores()) {
+        if (maxNodeVCores == totalResource.getVirtualCores()) {
           maxNodeVCores = -1;
         }
         // We only have to iterate through the nodes if the current max memory
@@ -625,7 +632,7 @@ public abstract class AbstractYarnScheduler
         }
       }
     } finally {
-      writeLock.unlock();
+      maxAllocWriteLock.unlock();
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ca84c1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
index 27b20d4..48ce822 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
@@ -279,6 +280,67 @@ public class TestAbstractYarnScheduler extends ParameterizedSchedulerTestBase {
     }
   }
 
+  @Test
+  public void testMaxAllocationAfterUpdateNodeResource() throws IOException {
+    final int configuredMaxVCores = 20;
+    final int configuredMaxMemory = 10 * 1024;
+    Resource configuredMaximumResource = Resource.newInstance
+        (configuredMaxMemory, configuredMaxVCores);
+
+    configureScheduler();
+    YarnConfiguration conf = getConf();
+    conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
+        configuredMaxVCores);
+    conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
+        configuredMaxMemory);
+    conf.setLong(
+        YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS,
+        0);
+
+    MockRM rm = new MockRM(conf);
+    try {
+      rm.start();
+      AbstractYarnScheduler scheduler = (AbstractYarnScheduler) rm
+          .getResourceScheduler();
+      verifyMaximumResourceCapability(configuredMaximumResource, scheduler);
+
+      Resource resource1 = Resource.newInstance(2048, 5);
+      Resource resource2 = Resource.newInstance(4096, 10);
+      Resource resource3 = Resource.newInstance(512, 1);
+      Resource resource4 = Resource.newInstance(1024, 2);
+
+      RMNode node1 = MockNodes.newNodeInfo(
+          0, resource1, 1, "127.0.0.2");
+      scheduler.handle(new NodeAddedSchedulerEvent(node1));
+      RMNode node2 = MockNodes.newNodeInfo(
+          0, resource3, 2, "127.0.0.3");
+      scheduler.handle(new NodeAddedSchedulerEvent(node2));
+      verifyMaximumResourceCapability(resource1, scheduler);
+
+      // increase node1 resource
+      scheduler.updateNodeResource(node1, ResourceOption.newInstance(
+          resource2, 0));
+      verifyMaximumResourceCapability(resource2, scheduler);
+
+      // decrease node1 resource
+      scheduler.updateNodeResource(node1, ResourceOption.newInstance(
+          resource1, 0));
+      verifyMaximumResourceCapability(resource1, scheduler);
+
+      // increase node2 resource
+      scheduler.updateNodeResource(node2, ResourceOption.newInstance(
+          resource4, 0));
+      verifyMaximumResourceCapability(resource1, scheduler);
+
+      // decrease node2 resource
+      scheduler.updateNodeResource(node2, ResourceOption.newInstance(
+          resource3, 0));
+      verifyMaximumResourceCapability(resource1, scheduler);
+    } finally {
+      rm.stop();
+    }
+  }
+
   private void verifyMaximumResourceCapability(
       Resource expectedMaximumResource, AbstractYarnScheduler scheduler) {
 


[10/34] hadoop git commit: HDFS-3689. Add support for variable length block. Contributed by Jing Zhao.

Posted by zh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index a3ac455..38fc637 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -633,15 +633,16 @@ class NameNodeRpcServer implements NamenodeProtocols {
   }
 
   @Override // ClientProtocol
-  public LastBlockWithStatus append(String src, String clientName) 
-      throws IOException {
+  public LastBlockWithStatus append(String src, String clientName,
+      EnumSetWritable<CreateFlag> flag) throws IOException {
     checkNNStartup();
     String clientMachine = getClientMachine();
     if (stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*DIR* NameNode.append: file "
           +src+" for "+clientName+" at "+clientMachine);
     }
-    CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
+    CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
+        null);
     if (cacheEntry != null && cacheEntry.isSuccess()) {
       return (LastBlockWithStatus) cacheEntry.getPayload();
     }
@@ -649,7 +650,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
     LastBlockWithStatus info = null;
     boolean success = false;
     try {
-      info = namesystem.appendFile(src, clientName, clientMachine,
+      info = namesystem.appendFile(src, clientName, clientMachine, flag.get(),
           cacheEntry != null);
       success = true;
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
index 5c9f752..34564d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
@@ -66,6 +66,7 @@ enum CreateFlagProto {
   OVERWRITE = 0x02; // Truncate/overwrite a file. Same as POSIX O_TRUNC
   APPEND = 0x04;    // Append to a file
   LAZY_PERSIST = 0x10; // File with reduced durability guarantees.
+  NEW_BLOCK = 0x20; // Write data to a new block when appending
 }
 
 message CreateRequestProto {
@@ -86,6 +87,7 @@ message CreateResponseProto {
 message AppendRequestProto {
   required string src = 1;
   required string clientName = 2;
+  optional uint32 flag = 3; // bits set using CreateFlag
 }
 
 message AppendResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto
index e50f14b..5b78fe6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto
@@ -89,6 +89,7 @@ message CloseEventProto {
 
 message AppendEventProto {
   required string path = 1;
+  optional bool newBlock = 2 [default = false];
 }
 
 message RenameEventProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
index eab44be..68a85b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
@@ -136,6 +136,22 @@ public class AppendTestUtil {
     }
   }
 
+  public static void check(DistributedFileSystem fs, Path p, int position,
+      int length) throws IOException {
+    byte[] buf = new byte[length];
+    int i = 0;
+    try {
+      FSDataInputStream in = fs.open(p);
+      in.read(position, buf, 0, buf.length);
+      for(i = position; i < length + position; i++) {
+        assertEquals((byte) i, buf[i - position]);
+      }
+      in.close();
+    } catch(IOException ioe) {
+      throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
+    }
+  }
+
   /**
    *  create a buffer that contains the entire test file data.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 0eef46f..126827a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -1132,6 +1132,9 @@ public class DFSTestUtil {
     FSDataOutputStream s = filesystem.create(pathFileCreate);
     // OP_CLOSE 9
     s.close();
+    // OP_APPEND 47
+    FSDataOutputStream s2 = filesystem.append(pathFileCreate, 4096, null);
+    s2.close();
     // OP_SET_STORAGE_POLICY 45
     filesystem.setStoragePolicy(pathFileCreate,
         HdfsConstants.HOT_STORAGE_POLICY_NAME);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
index 75a4ad4..4f449d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttrSetFlag;
@@ -71,7 +72,7 @@ public class TestDFSInotifyEventInputStream {
    */
   @Test
   public void testOpcodeCount() {
-    Assert.assertEquals(48, FSEditLogOpCodes.values().length);
+    Assert.assertEquals(49, FSEditLogOpCodes.values().length);
   }
 
 
@@ -109,7 +110,8 @@ public class TestDFSInotifyEventInputStream {
       os.write(new byte[BLOCK_SIZE]);
       os.close(); // CloseOp -> CloseEvent
       // AddOp -> AppendEvent
-      os = client.append("/file2", BLOCK_SIZE, null, null);
+      os = client.append("/file2", BLOCK_SIZE, EnumSet.of(CreateFlag.APPEND),
+          null, null);
       os.write(new byte[BLOCK_SIZE]);
       os.close(); // CloseOp -> CloseEvent
       Thread.sleep(10); // so that the atime will get updated on the next line
@@ -182,13 +184,14 @@ public class TestDFSInotifyEventInputStream {
       Assert.assertTrue(ce2.getFileSize() > 0);
       Assert.assertTrue(ce2.getTimestamp() > 0);
 
-      // AddOp
+      // AppendOp
       batch = waitForNextEvents(eis);
       Assert.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
       Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
       Event.AppendEvent append2 = (Event.AppendEvent)batch.getEvents()[0];
       Assert.assertEquals("/file2", append2.getPath());
+      Assert.assertFalse(append2.toNewBlock());
 
       // CloseOp
       batch = waitForNextEvents(eis);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index 34c701d..3cb72ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -25,10 +25,12 @@ import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.util.EnumSet;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.HardLink;
@@ -344,7 +346,46 @@ public class TestFileAppend{
       cluster.shutdown();
     }
   }
+
+  /** Test two consecutive appends on a file with a full block. */
+  @Test
+  public void testAppend2Twice() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    final DistributedFileSystem fs1 = cluster.getFileSystem();
+    final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
+    try {
+      final Path p = new Path("/testAppendTwice/foo");
+      final int len = 1 << 16;
+      final byte[] fileContents = AppendTestUtil.initBuffer(len);
+
+      {
+        // create a new file with a full block.
+        FSDataOutputStream out = fs2.create(p, true, 4096, (short)1, len);
+        out.write(fileContents, 0, len);
+        out.close();
+      }
   
+      //1st append does not add any data so that the last block remains full
+      //and the last block in INodeFileUnderConstruction is a BlockInfo
+      //but not BlockInfoUnderConstruction.
+      ((DistributedFileSystem) fs2).append(p,
+          EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
+
+      // 2nd append should get AlreadyBeingCreatedException
+      fs1.append(p);
+      Assert.fail();
+    } catch(RemoteException re) {
+      AppendTestUtil.LOG.info("Got an exception:", re);
+      Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
+          re.getClassName());
+    } finally {
+      fs2.close();
+      fs1.close();
+      cluster.shutdown();
+    }
+  }
+
   /** Tests appending after soft-limit expires. */
   @Test
   public void testAppendAfterSoftLimit() 
@@ -386,6 +427,54 @@ public class TestFileAppend{
     }
   }
 
+  /** Tests appending after soft-limit expires. */
+  @Test
+  public void testAppend2AfterSoftLimit() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+    //Set small soft-limit for lease
+    final long softLimit = 1L;
+    final long hardLimit = 9999999L;
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .build();
+    cluster.setLeasePeriod(softLimit, hardLimit);
+    cluster.waitActive();
+
+    DistributedFileSystem fs = cluster.getFileSystem();
+    DistributedFileSystem fs2 = new DistributedFileSystem();
+    fs2.initialize(fs.getUri(), conf);
+
+    final Path testPath = new Path("/testAppendAfterSoftLimit");
+    final byte[] fileContents = AppendTestUtil.initBuffer(32);
+
+    // create a new file without closing
+    FSDataOutputStream out = fs.create(testPath);
+    out.write(fileContents);
+
+    //Wait for > soft-limit
+    Thread.sleep(250);
+
+    try {
+      FSDataOutputStream appendStream2 = fs2.append(testPath,
+          EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
+      appendStream2.write(fileContents);
+      appendStream2.close();
+      assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
+      // make sure we now have 1 block since the first writer was revoked
+      LocatedBlocks blks = fs.getClient().getLocatedBlocks(testPath.toString(),
+          0L);
+      assertEquals(1, blks.getLocatedBlocks().size());
+      for (LocatedBlock blk : blks.getLocatedBlocks()) {
+        assertEquals(fileContents.length, blk.getBlockSize());
+      }
+    } finally {
+      fs.close();
+      fs2.close();
+      cluster.shutdown();
+    }
+  }
+
   /**
    * Old replica of the block should not be accepted as valid for append/read
    */
@@ -439,4 +528,77 @@ public class TestFileAppend{
     }
   }
 
+  /**
+   * Old replica of the block should not be accepted as valid for append/read
+   */
+  @Test
+  public void testMultiAppend2() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
+        "false");
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
+        .build();
+    DistributedFileSystem fs = null;
+    final String hello = "hello\n";
+    try {
+      fs = cluster.getFileSystem();
+      Path path = new Path("/test");
+      FSDataOutputStream out = fs.create(path);
+      out.writeBytes(hello);
+      out.close();
+
+      // stop one datanode
+      DataNodeProperties dnProp = cluster.stopDataNode(0);
+      String dnAddress = dnProp.datanode.getXferAddress().toString();
+      if (dnAddress.startsWith("/")) {
+        dnAddress = dnAddress.substring(1);
+      }
+
+      // append again to bump genstamps
+      for (int i = 0; i < 2; i++) {
+        out = fs.append(path,
+            EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
+        out.writeBytes(hello);
+        out.close();
+      }
+
+      // re-open and make the block state as underconstruction
+      out = fs.append(path, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK),
+          4096, null);
+      cluster.restartDataNode(dnProp, true);
+      // wait till the block report comes
+      Thread.sleep(2000);
+      out.writeBytes(hello);
+      out.close();
+      // check the block locations
+      LocatedBlocks blocks = fs.getClient().getLocatedBlocks(path.toString(), 0L);
+      // since we append the file 3 time, we should be 4 blocks
+      assertEquals(4, blocks.getLocatedBlocks().size());
+      for (LocatedBlock block : blocks.getLocatedBlocks()) {
+        assertEquals(hello.length(), block.getBlockSize());
+      }
+      StringBuilder sb = new StringBuilder();
+      for (int i = 0; i < 4; i++) {
+        sb.append(hello);
+      }
+      final byte[] content = sb.toString().getBytes();
+      AppendTestUtil.checkFullFile(fs, path, content.length, content,
+          "Read /test");
+
+      // restart namenode to make sure the editlog can be properly applied
+      cluster.restartNameNode(true);
+      cluster.waitActive();
+      AppendTestUtil.checkFullFile(fs, path, content.length, content,
+          "Read /test");
+      blocks = fs.getClient().getLocatedBlocks(path.toString(), 0L);
+      // since we append the file 3 time, we should be 4 blocks
+      assertEquals(4, blocks.getLocatedBlocks().size());
+      for (LocatedBlock block : blocks.getLocatedBlocks()) {
+        assertEquals(hello.length(), block.getBlockSize());
+      }
+    } finally {
+      IOUtils.closeStream(fs);
+      cluster.shutdown();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
index eecd23b..99d04dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -24,14 +25,18 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.List;
 
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -67,11 +72,7 @@ public class TestFileAppend2 {
   final int numberOfFiles = 50;
   final int numThreads = 10;
   final int numAppendsPerThread = 20;
-/***
-  int numberOfFiles = 1;
-  int numThreads = 1;
-  int numAppendsPerThread = 2000;
-****/
+
   Workload[] workload = null;
   final ArrayList<Path> testFiles = new ArrayList<Path>();
   volatile static boolean globalStatus = true;
@@ -229,16 +230,170 @@ public class TestFileAppend2 {
     }
   }
 
+  /**
+   * Creates one file, writes a few bytes to it and then closed it.
+   * Reopens the same file for appending using append2 API, write all blocks and
+   * then close. Verify that all data exists in file.
+   */
+  @Test
+  public void testSimpleAppend2() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    if (simulatedStorage) {
+      SimulatedFSDataset.setFactory(conf);
+    }
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
+    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    DistributedFileSystem fs = cluster.getFileSystem();
+    try {
+      { // test appending to a file.
+        // create a new file.
+        Path file1 = new Path("/simpleAppend.dat");
+        FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
+        System.out.println("Created file simpleAppend.dat");
+
+        // write to file
+        int mid = 186;   // io.bytes.per.checksum bytes
+        System.out.println("Writing " + mid + " bytes to file " + file1);
+        stm.write(fileContents, 0, mid);
+        stm.close();
+        System.out.println("Wrote and Closed first part of file.");
+
+        // write to file
+        int mid2 = 607;   // io.bytes.per.checksum bytes
+        System.out.println("Writing " + mid + " bytes to file " + file1);
+        stm = fs.append(file1,
+            EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
+        stm.write(fileContents, mid, mid2-mid);
+        stm.close();
+        System.out.println("Wrote and Closed second part of file.");
+
+        // write the remainder of the file
+        stm = fs.append(file1,
+            EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
+        // ensure getPos is set to reflect existing size of the file
+        assertTrue(stm.getPos() > 0);
+        System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) +
+            " bytes to file " + file1);
+        stm.write(fileContents, mid2, AppendTestUtil.FILE_SIZE - mid2);
+        System.out.println("Written second part of file");
+        stm.close();
+        System.out.println("Wrote and Closed second part of file.");
+
+        // verify that entire file is good
+        AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
+            fileContents, "Read 2");
+        // also make sure there three different blocks for the file
+        List<LocatedBlock> blocks = fs.getClient().getLocatedBlocks(
+            file1.toString(), 0L).getLocatedBlocks();
+        assertEquals(12, blocks.size()); // the block size is 1024
+        assertEquals(mid, blocks.get(0).getBlockSize());
+        assertEquals(mid2 - mid, blocks.get(1).getBlockSize());
+        for (int i = 2; i < 11; i++) {
+          assertEquals(AppendTestUtil.BLOCK_SIZE, blocks.get(i).getBlockSize());
+        }
+        assertEquals((AppendTestUtil.FILE_SIZE - mid2)
+            % AppendTestUtil.BLOCK_SIZE, blocks.get(11).getBlockSize());
+      }
+
+      { // test appending to an non-existing file.
+        FSDataOutputStream out = null;
+        try {
+          out = fs.append(new Path("/non-existing.dat"),
+              EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
+          fail("Expected to have FileNotFoundException");
+        } catch(java.io.FileNotFoundException fnfe) {
+          System.out.println("Good: got " + fnfe);
+          fnfe.printStackTrace(System.out);
+        } finally {
+          IOUtils.closeStream(out);
+        }
+      }
+
+      { // test append permission.
+        // set root to all writable
+        Path root = new Path("/");
+        fs.setPermission(root, new FsPermission((short)0777));
+        fs.close();
+
+        // login as a different user
+        final UserGroupInformation superuser =
+          UserGroupInformation.getCurrentUser();
+        String username = "testappenduser";
+        String group = "testappendgroup";
+        assertFalse(superuser.getShortUserName().equals(username));
+        assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
+        UserGroupInformation appenduser = UserGroupInformation
+            .createUserForTesting(username, new String[] { group });
+
+        fs = (DistributedFileSystem) DFSTestUtil.getFileSystemAs(appenduser,
+            conf);
+
+        // create a file
+        Path dir = new Path(root, getClass().getSimpleName());
+        Path foo = new Path(dir, "foo.dat");
+        FSDataOutputStream out = null;
+        int offset = 0;
+        try {
+          out = fs.create(foo);
+          int len = 10 + AppendTestUtil.nextInt(100);
+          out.write(fileContents, offset, len);
+          offset += len;
+        } finally {
+          IOUtils.closeStream(out);
+        }
+
+        // change dir and foo to minimal permissions.
+        fs.setPermission(dir, new FsPermission((short)0100));
+        fs.setPermission(foo, new FsPermission((short)0200));
+
+        // try append, should success
+        out = null;
+        try {
+          out = fs.append(foo,
+              EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
+          int len = 10 + AppendTestUtil.nextInt(100);
+          out.write(fileContents, offset, len);
+          offset += len;
+        } finally {
+          IOUtils.closeStream(out);
+        }
+
+        // change dir and foo to all but no write on foo.
+        fs.setPermission(foo, new FsPermission((short)0577));
+        fs.setPermission(dir, new FsPermission((short)0777));
+
+        // try append, should fail
+        out = null;
+        try {
+          out = fs.append(foo,
+              EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
+          fail("Expected to have AccessControlException");
+        } catch(AccessControlException ace) {
+          System.out.println("Good: got " + ace);
+          ace.printStackTrace(System.out);
+        } finally {
+          IOUtils.closeStream(out);
+        }
+      }
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
   //
   // an object that does a bunch of appends to files
   //
   class Workload extends Thread {
     private final int id;
     private final MiniDFSCluster cluster;
+    private final boolean appendToNewBlock;
 
-    Workload(MiniDFSCluster cluster, int threadIndex) {
+    Workload(MiniDFSCluster cluster, int threadIndex, boolean append2) {
       id = threadIndex;
       this.cluster = cluster;
+      this.appendToNewBlock = append2;
     }
 
     // create a bunch of files. Write to them and then verify.
@@ -261,7 +416,7 @@ public class TestFileAppend2 {
         long len = 0;
         int sizeToAppend = 0;
         try {
-          FileSystem fs = cluster.getFileSystem();
+          DistributedFileSystem fs = cluster.getFileSystem();
 
           // add a random number of bytes to file
           len = fs.getFileStatus(testfile).getLen();
@@ -285,7 +440,9 @@ public class TestFileAppend2 {
                              " appending " + sizeToAppend + " bytes " +
                              " to file " + testfile +
                              " of size " + len);
-          FSDataOutputStream stm = fs.append(testfile);
+          FSDataOutputStream stm = appendToNewBlock ? fs.append(testfile,
+                  EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null)
+              : fs.append(testfile);
           stm.write(fileContents, (int)len, sizeToAppend);
           stm.close();
 
@@ -298,7 +455,7 @@ public class TestFileAppend2 {
                                  " expected size " + (len + sizeToAppend) +
                                  " waiting for namenode metadata update.");
               Thread.sleep(5000);
-            } catch (InterruptedException e) {;}
+            } catch (InterruptedException e) {}
           }
 
           assertTrue("File " + testfile + " size is " + 
@@ -306,7 +463,7 @@ public class TestFileAppend2 {
                      " but expected " + (len + sizeToAppend),
                     fs.getFileStatus(testfile).getLen() == (len + sizeToAppend));
 
-          AppendTestUtil.checkFullFile(fs, testfile, (int)(len + sizeToAppend),
+          AppendTestUtil.checkFullFile(fs, testfile, (int) (len + sizeToAppend),
               fileContents, "Read 2");
         } catch (Throwable e) {
           globalStatus = false;
@@ -331,10 +488,8 @@ public class TestFileAppend2 {
 
   /**
    * Test that appends to files at random offsets.
-   * @throws IOException an exception might be thrown
    */
-  @Test
-  public void testComplexAppend() throws IOException {
+  private void testComplexAppend(boolean appendToNewBlock) throws IOException {
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
@@ -366,7 +521,7 @@ public class TestFileAppend2 {
       // Create threads and make them run workload concurrently.
       workload = new Workload[numThreads];
       for (int i = 0; i < numThreads; i++) {
-        workload[i] = new Workload(cluster, i);
+        workload[i] = new Workload(cluster, i, appendToNewBlock);
         workload[i].start();
       }
 
@@ -390,4 +545,14 @@ public class TestFileAppend2 {
     //
     assertTrue("testComplexAppend Worker encountered exceptions.", globalStatus);
   }
+
+  @Test
+  public void testComplexAppend() throws IOException {
+    testComplexAppend(false);
+  }
+
+  @Test
+  public void testComplexAppend2() throws IOException {
+    testComplexAppend(true);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
index d5de0ff..9ebe115 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
@@ -24,7 +24,10 @@ import static org.junit.Assert.fail;
 import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
+import java.util.EnumSet;
+import java.util.List;
 
+import org.apache.hadoop.fs.CreateFlag;
 import org.mockito.invocation.InvocationOnMock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
@@ -36,8 +39,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSClientAdapter;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -52,6 +53,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.log4j.Level;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -121,6 +123,32 @@ public class TestFileAppend3  {
     AppendTestUtil.check(fs, p, len1 + len2);
   }
 
+  @Test
+  public void testTC1ForAppend2() throws Exception {
+    final Path p = new Path("/TC1/foo2");
+
+    //a. Create file and write one block of data. Close file.
+    final int len1 = (int) BLOCK_SIZE;
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION,
+          BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, len1);
+      out.close();
+    }
+
+    // Reopen file to append. Append half block of data. Close file.
+    final int len2 = (int) BLOCK_SIZE / 2;
+    {
+      FSDataOutputStream out = fs.append(p,
+          EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
+      AppendTestUtil.write(out, len1, len2);
+      out.close();
+    }
+
+    // b. Reopen file and read 1.5 blocks worth of data. Close file.
+    AppendTestUtil.check(fs, p, len1 + len2);
+  }
+
   /**
    * TC2: Append on non-block boundary.
    * @throws IOException an exception might be thrown
@@ -152,6 +180,40 @@ public class TestFileAppend3  {
     AppendTestUtil.check(fs, p, len1 + len2);
   }
 
+  @Test
+  public void testTC2ForAppend2() throws Exception {
+    final Path p = new Path("/TC2/foo2");
+
+    //a. Create file with one and a half block of data. Close file.
+    final int len1 = (int) (BLOCK_SIZE + BLOCK_SIZE / 2);
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION,
+          BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, len1);
+      out.close();
+    }
+
+    AppendTestUtil.check(fs, p, len1);
+
+    //   Reopen file to append quarter block of data. Close file.
+    final int len2 = (int) BLOCK_SIZE / 4;
+    {
+      FSDataOutputStream out = fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK),
+          4096, null);
+      AppendTestUtil.write(out, len1, len2);
+      out.close();
+    }
+
+    // b. Reopen file and read 1.75 blocks of data. Close file.
+    AppendTestUtil.check(fs, p, len1 + len2);
+    List<LocatedBlock> blocks = fs.getClient().getLocatedBlocks(
+        p.toString(), 0L).getLocatedBlocks();
+    Assert.assertEquals(3, blocks.size());
+    Assert.assertEquals(BLOCK_SIZE, blocks.get(0).getBlockSize());
+    Assert.assertEquals(BLOCK_SIZE / 2, blocks.get(1).getBlockSize());
+    Assert.assertEquals(BLOCK_SIZE / 4, blocks.get(2).getBlockSize());
+  }
+
   /**
    * TC5: Only one simultaneous append.
    * @throws IOException an exception might be thrown
@@ -179,18 +241,63 @@ public class TestFileAppend3  {
       AppendTestUtil.LOG.info("GOOD: got an exception", ioe);
     }
 
+    try {
+      ((DistributedFileSystem) AppendTestUtil
+          .createHdfsWithDifferentUsername(conf)).append(p,
+          EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
+      fail("This should fail.");
+    } catch(IOException ioe) {
+      AppendTestUtil.LOG.info("GOOD: got an exception", ioe);
+    }
+
     //d. On Machine M1, close file.
     out.close();        
   }
 
+  @Test
+  public void testTC5ForAppend2() throws Exception {
+    final Path p = new Path("/TC5/foo2");
+
+    // a. Create file on Machine M1. Write half block to it. Close file.
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION,
+          BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, (int)(BLOCK_SIZE/2));
+      out.close();
+    }
+
+    // b. Reopen file in "append" mode on Machine M1.
+    FSDataOutputStream out = fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK),
+        4096, null);
+
+    // c. On Machine M2, reopen file in "append" mode. This should fail.
+    try {
+      ((DistributedFileSystem) AppendTestUtil
+          .createHdfsWithDifferentUsername(conf)).append(p,
+          EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
+      fail("This should fail.");
+    } catch(IOException ioe) {
+      AppendTestUtil.LOG.info("GOOD: got an exception", ioe);
+    }
+
+    try {
+      AppendTestUtil.createHdfsWithDifferentUsername(conf).append(p);
+      fail("This should fail.");
+    } catch(IOException ioe) {
+      AppendTestUtil.LOG.info("GOOD: got an exception", ioe);
+    }
+
+    // d. On Machine M1, close file.
+    out.close();
+  }
+
   /**
    * TC7: Corrupted replicas are present.
    * @throws IOException an exception might be thrown
    */
-  @Test
-  public void testTC7() throws Exception {
+  private void testTC7(boolean appendToNewBlock) throws Exception {
     final short repl = 2;
-    final Path p = new Path("/TC7/foo");
+    final Path p = new Path("/TC7/foo" + (appendToNewBlock ? "0" : "1"));
     System.out.println("p=" + p);
     
     //a. Create file with replication factor of 2. Write half block of data. Close file.
@@ -224,7 +331,8 @@ public class TestFileAppend3  {
     //c. Open file in "append mode".  Append a new block worth of data. Close file.
     final int len2 = (int)BLOCK_SIZE; 
     {
-      FSDataOutputStream out = fs.append(p);
+      FSDataOutputStream out = appendToNewBlock ?
+          fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) : fs.append(p);
       AppendTestUtil.write(out, len1, len2);
       out.close();
     }
@@ -233,13 +341,21 @@ public class TestFileAppend3  {
     AppendTestUtil.check(fs, p, len1 + len2);
   }
 
+  @Test
+  public void testTC7() throws Exception {
+    testTC7(false);
+  }
+
+  @Test
+  public void testTC7ForAppend2() throws Exception {
+    testTC7(true);
+  }
+
   /**
    * TC11: Racing rename
-   * @throws IOException an exception might be thrown
    */
-  @Test
-  public void testTC11() throws Exception {
-    final Path p = new Path("/TC11/foo");
+  private void testTC11(boolean appendToNewBlock) throws Exception {
+    final Path p = new Path("/TC11/foo" + (appendToNewBlock ? "0" : "1"));
     System.out.println("p=" + p);
 
     //a. Create file and write one block of data. Close file.
@@ -251,7 +367,9 @@ public class TestFileAppend3  {
     }
 
     //b. Reopen file in "append" mode. Append half block of data.
-    FSDataOutputStream out = fs.append(p);
+    FSDataOutputStream out = appendToNewBlock ?
+        fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) :
+        fs.append(p);
     final int len2 = (int)BLOCK_SIZE/2; 
     AppendTestUtil.write(out, len1, len2);
     out.hflush();
@@ -283,13 +401,21 @@ public class TestFileAppend3  {
     }
   }
 
+  @Test
+  public void testTC11() throws Exception {
+    testTC11(false);
+  }
+
+  @Test
+  public void testTC11ForAppend2() throws Exception {
+    testTC11(true);
+  }
+
   /** 
    * TC12: Append to partial CRC chunk
-   * @throws IOException an exception might be thrown
    */
-  @Test
-  public void testTC12() throws Exception {
-    final Path p = new Path("/TC12/foo");
+  private void testTC12(boolean appendToNewBlock) throws Exception {
+    final Path p = new Path("/TC12/foo" + (appendToNewBlock ? "0" : "1"));
     System.out.println("p=" + p);
     
     //a. Create file with a block size of 64KB
@@ -305,23 +431,43 @@ public class TestFileAppend3  {
     //b. Reopen file in "append" mode. Append another 5877 bytes of data. Close file.
     final int len2 = 5877; 
     {
-      FSDataOutputStream out = fs.append(p);
+      FSDataOutputStream out = appendToNewBlock ?
+          fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) :
+          fs.append(p);
       AppendTestUtil.write(out, len1, len2);
       out.close();
     }
 
     //c. Reopen file and read 25687+5877 bytes of data from file. Close file.
     AppendTestUtil.check(fs, p, len1 + len2);
+    if (appendToNewBlock) {
+      LocatedBlocks blks = fs.dfs.getLocatedBlocks(p.toString(), 0);
+      Assert.assertEquals(2, blks.getLocatedBlocks().size());
+      Assert.assertEquals(len1, blks.getLocatedBlocks().get(0).getBlockSize());
+      Assert.assertEquals(len2, blks.getLocatedBlocks().get(1).getBlockSize());
+      AppendTestUtil.check(fs, p, 0, len1);
+      AppendTestUtil.check(fs, p, len1, len2);
+    }
   }
-  
-  /** Append to a partial CRC chunk and 
-   * the first write does not fill up the partial CRC trunk
-   * *
-   * @throws IOException
-   */
+
   @Test
-  public void testAppendToPartialChunk() throws IOException {
-    final Path p = new Path("/partialChunk/foo");
+  public void testTC12() throws Exception {
+    testTC12(false);
+  }
+
+  @Test
+  public void testTC12ForAppend2() throws Exception {
+    testTC12(true);
+  }
+
+  /**
+   * Append to a partial CRC chunk and the first write does not fill up the
+   * partial CRC trunk
+   */
+  private void testAppendToPartialChunk(boolean appendToNewBlock)
+      throws IOException {
+    final Path p = new Path("/partialChunk/foo"
+        + (appendToNewBlock ? "0" : "1"));
     final int fileLen = 513;
     System.out.println("p=" + p);
     
@@ -336,7 +482,9 @@ public class TestFileAppend3  {
     System.out.println("Wrote 1 byte and closed the file " + p);
 
     // append to file
-    stm = fs.append(p);
+    stm = appendToNewBlock ?
+        fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) :
+        fs.append(p);
     // Append to a partial CRC trunk
     stm.write(fileContents, 1, 1);
     stm.hflush();
@@ -345,7 +493,9 @@ public class TestFileAppend3  {
     System.out.println("Append 1 byte and closed the file " + p);
 
     // write the remainder of the file
-    stm = fs.append(p);
+    stm = appendToNewBlock ?
+        fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) :
+        fs.append(p);
 
     // ensure getPos is set to reflect existing size of the file
     assertEquals(2, stm.getPos());
@@ -444,4 +594,14 @@ public class TestFileAppend3  {
     // if append was called with a stale file stat.
     doSmallAppends(file, fs, 20);
   }
+
+  @Test
+  public void testAppendToPartialChunk() throws IOException {
+    testAppendToPartialChunk(false);
+  }
+
+  @Test
+  public void testAppendToPartialChunkforAppend2() throws IOException {
+    testAppendToPartialChunk(true);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
index 0bca23d..a2b344c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
@@ -99,10 +99,11 @@ public class TestFileAppendRestart {
       // OP_ADD to create file
       // OP_ADD_BLOCK for first block
       // OP_CLOSE to close file
-      // OP_ADD to reopen file
+      // OP_APPEND to reopen file
       // OP_ADD_BLOCK for second block
       // OP_CLOSE to close file
-      assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
+      assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
+      assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held);
       assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
       assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
 
@@ -112,13 +113,14 @@ public class TestFileAppendRestart {
       // OP_ADD to create file
       // OP_ADD_BLOCK for first block
       // OP_CLOSE to close file
-      // OP_ADD to re-establish the lease
+      // OP_APPEND to re-establish the lease
       // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
       // OP_ADD_BLOCK at the start of the second block
       // OP_CLOSE to close file
       // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
        //       in addition to the ones above
-      assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
+      assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
+      assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held);
       assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
       assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
       assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
index 9ada95f..6bcfa71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
@@ -31,7 +31,9 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
 import org.junit.Test;
 
@@ -121,7 +123,66 @@ public class TestHFlush {
       cluster.shutdown();
     }
   }
-  
+
+  /**
+   * Test hsync with END_BLOCK flag.
+   */
+  @Test
+  public void hSyncEndBlock_00() throws IOException {
+    final int preferredBlockSize = 1024;
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, preferredBlockSize);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
+        .build();
+    DistributedFileSystem fileSystem = cluster.getFileSystem();
+    FSDataOutputStream stm = null;
+    try {
+      Path path = new Path("/" + fName);
+      stm = fileSystem.create(path, true, 4096, (short) 2,
+          AppendTestUtil.BLOCK_SIZE);
+      System.out.println("Created file " + path.toString());
+      ((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet
+          .of(SyncFlag.END_BLOCK));
+      long currentFileLength = fileSystem.getFileStatus(path).getLen();
+      assertEquals(0L, currentFileLength);
+      LocatedBlocks blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
+      assertEquals(0, blocks.getLocatedBlocks().size());
+
+      // write a block and call hsync(end_block) at the block boundary
+      stm.write(new byte[preferredBlockSize]);
+      ((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet
+          .of(SyncFlag.END_BLOCK));
+      currentFileLength = fileSystem.getFileStatus(path).getLen();
+      assertEquals(preferredBlockSize, currentFileLength);
+      blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
+      assertEquals(1, blocks.getLocatedBlocks().size());
+
+      // call hsync then call hsync(end_block) immediately
+      stm.write(new byte[preferredBlockSize / 2]);
+      stm.hsync();
+      ((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet
+          .of(SyncFlag.END_BLOCK));
+      currentFileLength = fileSystem.getFileStatus(path).getLen();
+      assertEquals(preferredBlockSize + preferredBlockSize / 2,
+          currentFileLength);
+      blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
+      assertEquals(2, blocks.getLocatedBlocks().size());
+
+      stm.write(new byte[preferredBlockSize / 4]);
+      stm.hsync();
+      currentFileLength = fileSystem.getFileStatus(path).getLen();
+      assertEquals(preferredBlockSize + preferredBlockSize / 2
+          + preferredBlockSize / 4, currentFileLength);
+      blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
+      assertEquals(3, blocks.getLocatedBlocks().size());
+    } finally {
+      IOUtils.cleanup(null, stm, fileSystem);
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   /**
    * The test calls
    * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
@@ -136,6 +197,29 @@ public class TestHFlush {
   /**
    * The test calls
    * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
+   * while requiring the semantic of {@link SyncFlag#END_BLOCK}.
+   */
+  @Test
+  public void hSyncEndBlock_01() throws IOException {
+    doTheJob(new HdfsConfiguration(), fName, AppendTestUtil.BLOCK_SIZE,
+        (short) 2, true, EnumSet.of(SyncFlag.END_BLOCK));
+  }
+
+  /**
+   * The test calls
+   * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
+   * while requiring the semantic of {@link SyncFlag#END_BLOCK} and
+   * {@link SyncFlag#UPDATE_LENGTH}.
+   */
+  @Test
+  public void hSyncEndBlockAndUpdateLength() throws IOException {
+    doTheJob(new HdfsConfiguration(), fName, AppendTestUtil.BLOCK_SIZE,
+        (short) 2, true, EnumSet.of(SyncFlag.END_BLOCK, SyncFlag.UPDATE_LENGTH));
+  }
+
+  /**
+   * The test calls
+   * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
    * while requiring the semantic of {@link SyncFlag#UPDATE_LENGTH}.
    * Similar with {@link #hFlush_02()} , it writes a file with a custom block
    * size so the writes will be happening across block' boundaries
@@ -152,7 +236,20 @@ public class TestHFlush {
     doTheJob(conf, fName, customBlockSize, (short) 2, true,
         EnumSet.of(SyncFlag.UPDATE_LENGTH));
   }
-  
+
+  @Test
+  public void hSyncEndBlock_02() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    int customPerChecksumSize = 512;
+    int customBlockSize = customPerChecksumSize * 3;
+    // Modify defaul filesystem settings
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
+
+    doTheJob(conf, fName, customBlockSize, (short) 2, true,
+        EnumSet.of(SyncFlag.END_BLOCK));
+  }
+
   /**
    * The test calls
    * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
@@ -173,7 +270,20 @@ public class TestHFlush {
     doTheJob(conf, fName, customBlockSize, (short) 2, true,
         EnumSet.of(SyncFlag.UPDATE_LENGTH));
   }
-  
+
+  @Test
+  public void hSyncEndBlock_03() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    int customPerChecksumSize = 400;
+    int customBlockSize = customPerChecksumSize * 3;
+    // Modify defaul filesystem settings
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
+
+    doTheJob(conf, fName, customBlockSize, (short) 2, true,
+        EnumSet.of(SyncFlag.END_BLOCK));
+  }
+
   /**
    * The method starts new cluster with defined Configuration; creates a file
    * with specified block_size and writes 10 equal sections in it; it also calls
@@ -197,12 +307,13 @@ public class TestHFlush {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                                .numDataNodes(replicas).build();
     // Make sure we work with DFS in order to utilize all its functionality
-    DistributedFileSystem fileSystem =
-        cluster.getFileSystem();
+    DistributedFileSystem fileSystem = cluster.getFileSystem();
 
     FSDataInputStream is;
     try {
       Path path = new Path(fileName);
+      final String pathName = new Path(fileSystem.getWorkingDirectory(), path)
+          .toUri().getPath();
       FSDataOutputStream stm = fileSystem.create(path, false, 4096, replicas,
           block_size);
       System.out.println("Created file " + fileName);
@@ -210,7 +321,8 @@ public class TestHFlush {
       int tenth = AppendTestUtil.FILE_SIZE/SECTIONS;
       int rounding = AppendTestUtil.FILE_SIZE - tenth * SECTIONS;
       for (int i=0; i<SECTIONS; i++) {
-        System.out.println("Writing " + (tenth * i) + " to " + (tenth * (i+1)) + " section to file " + fileName);
+        System.out.println("Writing " + (tenth * i) + " to "
+            + (tenth * (i + 1)) + " section to file " + fileName);
         // write to the file
         stm.write(fileContent, tenth * i, tenth);
         
@@ -227,7 +339,11 @@ public class TestHFlush {
           assertEquals(
             "File size doesn't match for hsync/hflush with updating the length",
             tenth * (i + 1), currentFileLength);
+        } else if (isSync && syncFlags.contains(SyncFlag.END_BLOCK)) {
+          LocatedBlocks blocks = fileSystem.dfs.getLocatedBlocks(pathName, 0);
+          assertEquals(i + 1, blocks.getLocatedBlocks().size());
         }
+
         byte [] toRead = new byte[tenth];
         byte [] expected = new byte[tenth];
         System.arraycopy(fileContent, tenth * i, expected, 0, tenth);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index b84989f..15580a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -22,8 +22,10 @@ import static org.junit.Assert.assertTrue;
 import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
+import java.util.EnumSet;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -40,6 +42,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
 import org.junit.Test;
@@ -124,7 +127,8 @@ public class TestLeaseRecovery {
     }
 
     DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
-    cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName);
+    cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName,
+        new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
 
     // expire lease to trigger block recovery.
     waitLeaseRecovery(cluster);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
index 84ac2a5..a4df4ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 import com.google.common.util.concurrent.Uninterruptibles;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -28,6 +29,7 @@ import org.junit.Assert;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.util.EnumSet;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -234,7 +236,8 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
     makeTestFile(path, BLOCK_SIZE, true);
 
     try {
-      client.append(path.toString(), BUFFER_LENGTH, null, null).close();
+      client.append(path.toString(), BUFFER_LENGTH,
+          EnumSet.of(CreateFlag.APPEND), null, null).close();
       fail("Append to LazyPersist file did not fail as expected");
     } catch (Throwable t) {
       LOG.info("Got expected exception ", t);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
index 6d1f452..ddf5a3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
@@ -40,9 +40,12 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -99,7 +102,7 @@ public class TestHDFSConcat {
     HdfsFileStatus fStatus;
     FSDataInputStream stm;
     
-    String trg = new String("/trg");
+    String trg = "/trg";
     Path trgPath = new Path(trg);
     DFSTestUtil.createFile(dfs, trgPath, fileLen, REPL_FACTOR, 1);
     fStatus  = nn.getFileInfo(trg);
@@ -112,7 +115,7 @@ public class TestHDFSConcat {
     long [] lens = new long [numFiles];
     
     
-    int i = 0;
+    int i;
     for(i=0; i<files.length; i++) {
       files[i] = new Path("/file"+i);
       Path path = files[i];
@@ -385,6 +388,75 @@ public class TestHDFSConcat {
     } catch (Exception e) {
       // exspected
     }
- 
+  }
+
+  /**
+   * make sure we update the quota correctly after concat
+   */
+  @Test
+  public void testConcatWithQuotaDecrease() throws IOException {
+    final short srcRepl = 3; // note this is different with REPL_FACTOR
+    final int srcNum = 10;
+    final Path foo = new Path("/foo");
+    final Path[] srcs = new Path[srcNum];
+    final Path target = new Path(foo, "target");
+    DFSTestUtil.createFile(dfs, target, blockSize, REPL_FACTOR, 0L);
+
+    dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+
+    for (int i = 0; i < srcNum; i++) {
+      srcs[i] = new Path(foo, "src" + i);
+      DFSTestUtil.createFile(dfs, srcs[i], blockSize * 2, srcRepl, 0L);
+    }
+
+    ContentSummary summary = dfs.getContentSummary(foo);
+    Assert.assertEquals(11, summary.getFileCount());
+    Assert.assertEquals(blockSize * REPL_FACTOR +
+            blockSize * 2 * srcRepl * srcNum, summary.getSpaceConsumed());
+
+    dfs.concat(target, srcs);
+    summary = dfs.getContentSummary(foo);
+    Assert.assertEquals(1, summary.getFileCount());
+    Assert.assertEquals(
+        blockSize * REPL_FACTOR + blockSize * 2 * REPL_FACTOR * srcNum,
+        summary.getSpaceConsumed());
+  }
+
+  @Test
+  public void testConcatWithQuotaIncrease() throws IOException {
+    final short repl = 3;
+    final int srcNum = 10;
+    final Path foo = new Path("/foo");
+    final Path bar = new Path(foo, "bar");
+    final Path[] srcs = new Path[srcNum];
+    final Path target = new Path(bar, "target");
+    DFSTestUtil.createFile(dfs, target, blockSize, repl, 0L);
+
+    final long dsQuota = blockSize * repl + blockSize * srcNum * REPL_FACTOR;
+    dfs.setQuota(foo, Long.MAX_VALUE - 1, dsQuota);
+
+    for (int i = 0; i < srcNum; i++) {
+      srcs[i] = new Path(bar, "src" + i);
+      DFSTestUtil.createFile(dfs, srcs[i], blockSize, REPL_FACTOR, 0L);
+    }
+
+    ContentSummary summary = dfs.getContentSummary(bar);
+    Assert.assertEquals(11, summary.getFileCount());
+    Assert.assertEquals(dsQuota, summary.getSpaceConsumed());
+
+    try {
+      dfs.concat(target, srcs);
+      fail("QuotaExceededException expected");
+    } catch (RemoteException e) {
+      Assert.assertTrue(
+          e.unwrapRemoteException() instanceof QuotaExceededException);
+    }
+
+    dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+    dfs.concat(target, srcs);
+    summary = dfs.getContentSummary(bar);
+    Assert.assertEquals(1, summary.getFileCount());
+    Assert.assertEquals(blockSize * repl * (srcNum + 1),
+        summary.getSpaceConsumed());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
index 3084f26..2e6b4a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
@@ -232,14 +232,18 @@ public class TestNamenodeRetryCache {
     
     // Retried append requests succeed
     newCall();
-    LastBlockWithStatus b = nnRpc.append(src, "holder");
-    Assert.assertEquals(b, nnRpc.append(src, "holder"));
-    Assert.assertEquals(b, nnRpc.append(src, "holder"));
+    LastBlockWithStatus b = nnRpc.append(src, "holder",
+        new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
+    Assert.assertEquals(b, nnRpc.append(src, "holder",
+        new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
+    Assert.assertEquals(b, nnRpc.append(src, "holder",
+        new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
     
     // non-retried call fails
     newCall();
     try {
-      nnRpc.append(src, "holder");
+      nnRpc.append(src, "holder",
+          new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
       Assert.fail("testAppend - expected exception is not thrown");
     } catch (Exception e) {
       // Expected
@@ -409,7 +413,7 @@ public class TestNamenodeRetryCache {
 
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
-    assertEquals(24, cacheSet.size());
+    assertEquals(25, cacheSet.size());
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
@@ -428,7 +432,7 @@ public class TestNamenodeRetryCache {
     assertTrue(namesystem.hasRetryCache());
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
         .getRetryCache().getCacheSet();
-    assertEquals(24, cacheSet.size());
+    assertEquals(25, cacheSet.size());
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
index 066fd66..916893c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
@@ -163,7 +163,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn0 = cluster.getNamesystem(0);
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
-    assertEquals(24, cacheSet.size());
+    assertEquals(25, cacheSet.size());
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
@@ -184,7 +184,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn1 = cluster.getNamesystem(1);
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
         .getRetryCache().getCacheSet();
-    assertEquals(24, cacheSet.size());
+    assertEquals(25, cacheSet.size());
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();
@@ -438,7 +438,8 @@ public class TestRetryCacheWithHA {
 
     @Override
     void invoke() throws Exception {
-      lbk = client.getNamenode().append(fileName, client.getClientName());
+      lbk = client.getNamenode().append(fileName, client.getClientName(),
+          new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
     }
     
     // check if the inode of the file is under construction
@@ -701,7 +702,8 @@ public class TestRetryCacheWithHA {
       final Path filePath = new Path(file);
       DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0);
       // append to the file and leave the last block under construction
-      out = this.client.append(file, BlockSize, null, null);
+      out = this.client.append(file, BlockSize, EnumSet.of(CreateFlag.APPEND),
+          null, null);
       byte[] appendContent = new byte[100];
       new Random().nextBytes(appendContent);
       out.write(appendContent);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
index dce3f47..da8c190 100644
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored differ


[31/34] hadoop git commit: HDFS-7423. various typos and message formatting fixes in nfs daemon and doc. (Charles Lamb via yliu)

Posted by zh...@apache.org.
HDFS-7423. various typos and message formatting fixes in nfs daemon and doc. (Charles Lamb via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/840d2143
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/840d2143
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/840d2143

Branch: refs/heads/HDFS-EC
Commit: 840d21438a7b8a4a677a61ca641ac04af096f7ba
Parents: 8ced72c
Author: yliu <yl...@apache.org>
Authored: Thu Jan 29 04:56:04 2015 +0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:27 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/nfs/nfs3/AsyncDataService.java  |  16 +--
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java       |  78 +++++------
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java    | 138 +++++++++----------
 .../hadoop/hdfs/nfs/nfs3/WriteManager.java      |  26 ++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  10 +-
 6 files changed, 136 insertions(+), 135 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d2143/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
index 429a457..ee3f90a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
@@ -22,12 +22,11 @@ import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
 /**
- * This class is a thread pool to easily schedule async data operations.Current
+ * This class is a thread pool to easily schedule async data operations. Current
  * async data operation is write back operation. In the future, we could use it
  * for readahead operations too.
  */
@@ -69,8 +68,8 @@ public class AsyncDataService {
     }
     if (LOG.isDebugEnabled()) {
       LOG.debug("Current active thread number: " + executor.getActiveCount()
-          + " queue size:" + executor.getQueue().size()
-          + " scheduled task number:" + executor.getTaskCount());
+          + " queue size: " + executor.getQueue().size()
+          + " scheduled task number: " + executor.getTaskCount());
     }
     executor.execute(task);
   }
@@ -105,10 +104,9 @@ public class AsyncDataService {
   }
 
   /**
-   * A task for write data back to HDFS for a file. Since only one thread can
-   * write for a file, any time there should be only one task(in queue or
-   * executing) for one file existing, and this should be guaranteed by the
-   * caller.
+   * A task to write data back to HDFS for a file. Since only one thread can
+   * write to a file, there should only be one task at any time for a file
+   * (in queue or executing), and this should be guaranteed by the caller.
    */
   static class WriteBackTask implements Runnable {
 
@@ -135,7 +133,7 @@ public class AsyncDataService {
       try {
         openFileCtx.executeWriteBack();
       } catch (Throwable t) {
-        LOG.error("Asyn data service got error:", t);
+        LOG.error("Async data service got error: ", t);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d2143/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
index a06d1c5..9610f48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
@@ -211,7 +211,7 @@ class OpenFileCtx {
   private long updateNonSequentialWriteInMemory(long count) {
     long newValue = nonSequentialWriteInMemory.addAndGet(count);
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Update nonSequentialWriteInMemory by " + count + " new value:"
+      LOG.debug("Update nonSequentialWriteInMemory by " + count + " new value: "
           + newValue);
     }
 
@@ -312,7 +312,7 @@ class OpenFileCtx {
     private void dump() {
       // Create dump outputstream for the first time
       if (dumpOut == null) {
-        LOG.info("Create dump file:" + dumpFilePath);
+        LOG.info("Create dump file: " + dumpFilePath);
         File dumpFile = new File(dumpFilePath);
         try {
           synchronized (this) {
@@ -367,8 +367,8 @@ class OpenFileCtx {
             updateNonSequentialWriteInMemory(-dumpedDataSize);
           }
         } catch (IOException e) {
-          LOG.error("Dump data failed:" + writeCtx + " with error:" + e
-              + " OpenFileCtx state:" + activeState);
+          LOG.error("Dump data failed: " + writeCtx + " with error: " + e
+              + " OpenFileCtx state: " + activeState);
           // Disable dump
           enabledDump = false;
           return;
@@ -428,8 +428,8 @@ class OpenFileCtx {
       return null;
     } else {
       if (xid != writeCtx.getXid()) {
-        LOG.warn("Got a repeated request, same range, with a different xid:"
-            + xid + " xid in old request:" + writeCtx.getXid());
+        LOG.warn("Got a repeated request, same range, with a different xid: "
+            + xid + " xid in old request: " + writeCtx.getXid());
         //TODO: better handling.
       }
       return writeCtx;  
@@ -441,7 +441,7 @@ class OpenFileCtx {
       IdMappingServiceProvider iug) {
     
     if (!activeState) {
-      LOG.info("OpenFileCtx is inactive, fileId:"
+      LOG.info("OpenFileCtx is inactive, fileId: "
           + request.getHandle().getFileId());
       WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
       WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
@@ -523,7 +523,7 @@ class OpenFileCtx {
     int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT;
     
     if (LOG.isDebugEnabled()) {
-      LOG.debug("requesed offset=" + offset + " and current offset="
+      LOG.debug("requested offset=" + offset + " and current offset="
           + cachedOffset);
     }
 
@@ -556,7 +556,7 @@ class OpenFileCtx {
     
     // Fail non-append call
     if (offset < cachedOffset) {
-      LOG.warn("(offset,count,nextOffset):" + "(" + offset + "," + count + ","
+      LOG.warn("(offset,count,nextOffset): " + "(" + offset + "," + count + ","
           + nextOffset + ")");
       return null;
     } else {
@@ -568,7 +568,7 @@ class OpenFileCtx {
           dataState);
       if (LOG.isDebugEnabled()) {
         LOG.debug("Add new write to the list with nextOffset " + cachedOffset
-            + " and requesed offset=" + offset);
+            + " and requested offset=" + offset);
       }
       if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
         // update the memory size
@@ -584,7 +584,7 @@ class OpenFileCtx {
               + pendingWrites.size());
         }
       } else {
-        LOG.warn("Got a repeated request, same range, with xid:" + xid
+        LOG.warn("Got a repeated request, same range, with xid: " + xid
             + " nextOffset " + +cachedOffset + " req offset=" + offset);
       }
       return writeCtx;
@@ -662,7 +662,7 @@ class OpenFileCtx {
       // offset < nextOffset
       processOverWrite(dfsClient, request, channel, xid, iug);
     } else {
-      // The writes is added to pendingWrites.
+      // The write is added to pendingWrites.
       // Check and start writing back if necessary
       boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
       if (!startWriting) {
@@ -674,7 +674,7 @@ class OpenFileCtx {
         // responses of the previous batch. So here send response immediately
         // for unstable non-sequential write
         if (stableHow != WriteStableHow.UNSTABLE) {
-          LOG.info("Have to change stable write to unstable write:"
+          LOG.info("Have to change stable write to unstable write: "
               + request.getStableHow());
           stableHow = WriteStableHow.UNSTABLE;
         }
@@ -719,7 +719,7 @@ class OpenFileCtx {
           + "Continue processing the perfect overwrite.");
     } catch (IOException e) {
       LOG.info("hsync failed when processing possible perfect overwrite, path="
-          + path + " error:" + e);
+          + path + " error: " + e);
       return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
           Nfs3Constant.WRITE_COMMIT_VERF);
     }
@@ -728,7 +728,7 @@ class OpenFileCtx {
       fis = dfsClient.createWrappedInputStream(dfsClient.open(path));
       readCount = fis.read(offset, readbuffer, 0, count);
       if (readCount < count) {
-        LOG.error("Can't read back " + count + " bytes, partial read size:"
+        LOG.error("Can't read back " + count + " bytes, partial read size: "
             + readCount);
         return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
             Nfs3Constant.WRITE_COMMIT_VERF);
@@ -757,7 +757,7 @@ class OpenFileCtx {
         postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug);
       } catch (IOException e) {
         LOG.info("Got error when processing perfect overwrite, path=" + path
-            + " error:" + e);
+            + " error: " + e);
         return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
             Nfs3Constant.WRITE_COMMIT_VERF);
       }
@@ -808,7 +808,7 @@ class OpenFileCtx {
           ret = COMMIT_STATUS.COMMIT_ERROR;
         }
       } catch (IOException e) {
-        LOG.error("Got stream error during data sync:" + e);
+        LOG.error("Got stream error during data sync: " + e);
         // Do nothing. Stream will be closed eventually by StreamMonitor.
         // status = Nfs3Status.NFS3ERR_IO;
         ret = COMMIT_STATUS.COMMIT_ERROR;
@@ -972,7 +972,7 @@ class OpenFileCtx {
     // Check the stream timeout
     if (checkStreamTimeout(streamTimeout)) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("stream can be closed for fileId:" + fileId);
+        LOG.debug("stream can be closed for fileId: " + fileId);
       }
       flag = true;
     }
@@ -988,7 +988,7 @@ class OpenFileCtx {
   private synchronized WriteCtx offerNextToWrite() {
     if (pendingWrites.isEmpty()) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("The asyn write task has no pending writes, fileId: "
+        LOG.debug("The async write task has no pending writes, fileId: "
             + latestAttr.getFileId());
       }
       // process pending commit again to handle this race: a commit is added
@@ -1021,7 +1021,7 @@ class OpenFileCtx {
         this.asyncStatus = false;
       } else if (range.getMin() < offset && range.getMax() > offset) {
         // shouldn't happen since we do sync for overlapped concurrent writers
-        LOG.warn("Got a overlapping write (" + range.getMin() + ","
+        LOG.warn("Got an overlapping write (" + range.getMin() + ", "
             + range.getMax() + "), nextOffset=" + offset
             + ". Silently drop it now");
         pendingWrites.remove(range);
@@ -1044,10 +1044,10 @@ class OpenFileCtx {
     return null;
   }
   
-  /** Invoked by AsynDataService to write back to HDFS */
+  /** Invoked by AsyncDataService to write back to HDFS */
   void executeWriteBack() {
     Preconditions.checkState(asyncStatus,
-        "openFileCtx has false asyncStatus, fileId:" + latestAttr.getFileId());
+        "openFileCtx has false asyncStatus, fileId: " + latestAttr.getFileId());
     final long startOffset = asyncWriteBackStartOffset;  
     try {
       while (activeState) {
@@ -1072,10 +1072,10 @@ class OpenFileCtx {
         if (startOffset == asyncWriteBackStartOffset) {
           asyncStatus = false;
         } else {
-          LOG.info("Another asyn task is already started before this one"
-              + " is finalized. fileId:" + latestAttr.getFileId()
-              + " asyncStatus:" + asyncStatus + " original startOffset:"
-              + startOffset + " new startOffset:" + asyncWriteBackStartOffset
+          LOG.info("Another async task is already started before this one"
+              + " is finalized. fileId: " + latestAttr.getFileId()
+              + " asyncStatus: " + asyncStatus + " original startOffset: "
+              + startOffset + " new startOffset: " + asyncWriteBackStartOffset
               + ". Won't change asyncStatus here.");
         }
       }
@@ -1104,7 +1104,7 @@ class OpenFileCtx {
       }
       status = Nfs3Status.NFS3ERR_IO;
     } catch (IOException e) {
-      LOG.error("Got stream error during data sync:", e);
+      LOG.error("Got stream error during data sync: ", e);
       // Do nothing. Stream will be closed eventually by StreamMonitor.
       status = Nfs3Status.NFS3ERR_IO;
     }
@@ -1139,9 +1139,9 @@ class OpenFileCtx {
               new VerifierNone()), commit.getXid());
       
       if (LOG.isDebugEnabled()) {
-        LOG.debug("FileId: " + latestAttr.getFileId() + " Service time:"
+        LOG.debug("FileId: " + latestAttr.getFileId() + " Service time: "
             + Nfs3Utils.getElapsedTime(commit.startTime)
-            + "ns. Sent response for commit:" + commit);
+            + "ns. Sent response for commit: " + commit);
       }
       entry = pendingCommits.firstEntry();
     }
@@ -1158,7 +1158,7 @@ class OpenFileCtx {
     FileHandle handle = writeCtx.getHandle();
     if (LOG.isDebugEnabled()) {
       LOG.debug("do write, fileId: " + handle.getFileId() + " offset: "
-          + offset + " length:" + count + " stableHow:" + stableHow.name());
+          + offset + " length: " + count + " stableHow: " + stableHow.name());
     }
 
     try {
@@ -1183,7 +1183,7 @@ class OpenFileCtx {
             updateNonSequentialWriteInMemory(-count);
             if (LOG.isDebugEnabled()) {
               LOG.debug("After writing " + handle.getFileId() + " at offset "
-                  + offset + ", updated the memory count, new value:"
+                  + offset + ", updated the memory count, new value: "
                   + nonSequentialWriteInMemory.get());
             }
           }
@@ -1192,18 +1192,18 @@ class OpenFileCtx {
       
       if (!writeCtx.getReplied()) {
         if (stableHow != WriteStableHow.UNSTABLE) {
-          LOG.info("Do sync for stable write:" + writeCtx);
+          LOG.info("Do sync for stable write: " + writeCtx);
           try {
             if (stableHow == WriteStableHow.DATA_SYNC) {
               fos.hsync();
             } else {
               Preconditions.checkState(stableHow == WriteStableHow.FILE_SYNC,
-                  "Unknown WriteStableHow:" + stableHow);
+                  "Unknown WriteStableHow: " + stableHow);
               // Sync file data and length
               fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
             }
           } catch (IOException e) {
-            LOG.error("hsync failed with writeCtx:" + writeCtx, e);
+            LOG.error("hsync failed with writeCtx: " + writeCtx, e);
             throw e;
           }
         }
@@ -1211,8 +1211,8 @@ class OpenFileCtx {
         WccAttr preOpAttr = latestAttr.getWccAttr();
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) {
-          LOG.warn("Return original count:" + writeCtx.getOriginalCount()
-              + " instead of real data count:" + count);
+          LOG.warn("Return original count: " + writeCtx.getOriginalCount()
+              + " instead of real data count: " + count);
           count = writeCtx.getOriginalCount();
         }
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
@@ -1263,8 +1263,8 @@ class OpenFileCtx {
         fos.close();
       }
     } catch (IOException e) {
-      LOG.info("Can't close stream for fileId:" + latestAttr.getFileId()
-          + ", error:" + e);
+      LOG.info("Can't close stream for fileId: " + latestAttr.getFileId()
+          + ", error: " + e);
     }
     
     // Reply error for pending writes
@@ -1272,7 +1272,7 @@ class OpenFileCtx {
     WccAttr preOpAttr = latestAttr.getWccAttr();
     while (!pendingWrites.isEmpty()) {
       OffsetRange key = pendingWrites.firstKey();
-      LOG.info("Fail pending write: (" + key.getMin() + "," + key.getMax()
+      LOG.info("Fail pending write: (" + key.getMin() + ", " + key.getMax()
           + "), nextOffset=" + nextOffset.get());
       
       WriteCtx writeCtx = pendingWrites.remove(key);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d2143/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 9204c4d..7ca21e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -160,7 +160,6 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   private final long blockSize;
   private final int bufferSize;
   private final boolean aixCompatMode;
-  private Statistics statistics;
   private String writeDumpDir; // The dir save dump files
 
   private final RpcCallCache rpcCallCache;
@@ -245,7 +244,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       pauseMonitor.start();
       metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
     }
-    writeManager.startAsyncDataSerivce();
+    writeManager.startAsyncDataService();
     try {
       infoServer.start();
     } catch (IOException e) {
@@ -331,7 +330,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     FileHandle handle = request.getHandle();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("GETATTR for fileId: " + handle.getFileId() + " client:"
+      LOG.debug("GETATTR for fileId: " + handle.getFileId() + " client: "
           + remoteAddress);
     }
 
@@ -356,7 +355,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return response;
     }
     if (attrs == null) {
-      LOG.error("Can't get path for fileId:" + handle.getFileId());
+      LOG.error("Can't get path for fileId: " + handle.getFileId());
       response.setStatus(Nfs3Status.NFS3ERR_STALE);
       return response;
     }
@@ -372,7 +371,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     if (setMode && updateFields.contains(SetAttrField.MODE)) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("set new mode:" + newAttr.getMode());
+        LOG.debug("set new mode: " + newAttr.getMode());
       }
       dfsClient.setPermission(fileIdPath,
           new FsPermission((short) (newAttr.getMode())));
@@ -392,7 +391,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         .getMilliSeconds() : -1;
     if (atime != -1 || mtime != -1) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("set atime:" + +atime + " mtime:" + mtime);
+        LOG.debug("set atime: " + +atime + " mtime: " + mtime);
       }
       dfsClient.setTimes(fileIdPath, mtime, atime);
     }
@@ -424,7 +423,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     FileHandle handle = request.getHandle();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS SETATTR fileId: " + handle.getFileId() + " client:"
+      LOG.debug("NFS SETATTR fileId: " + handle.getFileId() + " client: "
           + remoteAddress);
     }
 
@@ -440,7 +439,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
       if (preOpAttr == null) {
-        LOG.info("Can't get path for fileId:" + handle.getFileId());
+        LOG.info("Can't get path for fileId: " + handle.getFileId());
         response.setStatus(Nfs3Status.NFS3ERR_STALE);
         return response;
       }
@@ -511,7 +510,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     String fileName = request.getName();
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS LOOKUP dir fileId: " + dirHandle.getFileId() + " name: "
-          + fileName + " client:" + remoteAddress);
+          + fileName + " client: " + remoteAddress);
     }
 
     try {
@@ -520,7 +519,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           dirHandle, fileName);
       if (postOpObjAttr == null) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("NFS LOOKUP fileId: " + dirHandle.getFileId() + " name:"
+          LOG.debug("NFS LOOKUP fileId: " + dirHandle.getFileId() + " name: "
               + fileName + " does not exist");
         }
         Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient,
@@ -532,7 +531,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient,
           dirFileIdPath, iug);
       if (postOpDirAttr == null) {
-        LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId());
+        LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
         return new LOOKUP3Response(Nfs3Status.NFS3ERR_STALE);
       }
       FileHandle fileHandle = new FileHandle(postOpObjAttr.getFileId());
@@ -579,7 +578,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     Nfs3FileAttributes attrs;
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS ACCESS fileId: " + handle.getFileId() + " client:"
+      LOG.debug("NFS ACCESS fileId: " + handle.getFileId() + " client: "
           + remoteAddress);
     }
 
@@ -588,7 +587,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       attrs = writeManager.getFileAttr(dfsClient, handle, iug);
 
       if (attrs == null) {
-        LOG.error("Can't get path for fileId:" + handle.getFileId());
+        LOG.error("Can't get path for fileId: " + handle.getFileId());
         return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE);
       }
       int access = Nfs3Utils.getAccessRightsForUserGroup(
@@ -646,7 +645,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     FileHandle handle = request.getHandle();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS READLINK fileId: " + handle.getFileId() + " client:"
+      LOG.debug("NFS READLINK fileId: " + handle.getFileId() + " client: "
           + remoteAddress);
     }
 
@@ -657,15 +656,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes postOpAttr = Nfs3Utils.getFileAttr(dfsClient,
           fileIdPath, iug);
       if (postOpAttr == null) {
-        LOG.info("Can't get path for fileId:" + handle.getFileId());
+        LOG.info("Can't get path for fileId: " + handle.getFileId());
         return new READLINK3Response(Nfs3Status.NFS3ERR_STALE);
       }
       if (postOpAttr.getType() != NfsFileType.NFSLNK.toValue()) {
-        LOG.error("Not a symlink, fileId:" + handle.getFileId());
+        LOG.error("Not a symlink, fileId: " + handle.getFileId());
         return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL);
       }
       if (target == null) {
-        LOG.error("Symlink target should not be null, fileId:"
+        LOG.error("Symlink target should not be null, fileId: "
             + handle.getFileId());
         return new READLINK3Response(Nfs3Status.NFS3ERR_SERVERFAULT);
       }
@@ -726,7 +725,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset
-          + " count: " + count + " client:" + remoteAddress);
+          + " count: " + count + " client: " + remoteAddress);
     }
 
     Nfs3FileAttributes attrs;
@@ -739,13 +738,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
             Nfs3Utils.getFileIdPath(handle), iug);
       } catch (IOException e) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Get error accessing file, fileId:" + handle.getFileId(), e);
+          LOG.debug("Get error accessing file, fileId: " + handle.getFileId(), e);
         }
         return new READ3Response(Nfs3Status.NFS3ERR_IO);
       }
       if (attrs == null) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Can't get path for fileId:" + handle.getFileId());
+          LOG.debug("Can't get path for fileId: " + handle.getFileId());
         }
         return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
       }
@@ -806,8 +805,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
           iug);
       if (readCount < count) {
-        LOG.info("Partical read. Asked offset:" + offset + " count:" + count
-            + " and read back:" + readCount + "file size:" + attrs.getSize());
+        LOG.info("Partical read. Asked offset: " + offset + " count: " + count
+            + " and read back: " + readCount + " file size: "
+            + attrs.getSize());
       }
       // HDFS returns -1 for read beyond file size.
       if (readCount < 0) {
@@ -866,15 +866,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS WRITE fileId: " + handle.getFileId() + " offset: "
-          + offset + " length:" + count + " stableHow:" + stableHow.getValue()
-          + " xid:" + xid + " client:" + remoteAddress);
+          + offset + " length: " + count + " stableHow: " + stableHow.getValue()
+          + " xid: " + xid + " client: " + remoteAddress);
     }
 
     Nfs3FileAttributes preOpAttr = null;
     try {
       preOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
       if (preOpAttr == null) {
-        LOG.error("Can't get path for fileId:" + handle.getFileId());
+        LOG.error("Can't get path for fileId: " + handle.getFileId());
         return new WRITE3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -885,7 +885,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       }
 
       if (LOG.isDebugEnabled()) {
-        LOG.debug("requesed offset=" + offset + " and current filesize="
+        LOG.debug("requested offset=" + offset + " and current filesize="
             + preOpAttr.getSize());
       }
 
@@ -940,7 +940,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     String fileName = request.getName();
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS CREATE dir fileId: " + dirHandle.getFileId()
-          + " filename: " + fileName + " client:" + remoteAddress);
+          + " filename: " + fileName + " client: " + remoteAddress);
     }
 
     int createMode = request.getMode();
@@ -948,7 +948,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         && request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)
         && request.getObjAttr().getSize() != 0) {
       LOG.error("Setting file size is not supported when creating file: "
-          + fileName + " dir fileId:" + dirHandle.getFileId());
+          + fileName + " dir fileId: " + dirHandle.getFileId());
       return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
     }
 
@@ -961,7 +961,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (preOpDirAttr == null) {
-        LOG.error("Can't get path for dirHandle:" + dirHandle);
+        LOG.error("Can't get path for dirHandle: " + dirHandle);
         return new CREATE3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -985,7 +985,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       fos = dfsClient.createWrappedOutputStream(
           dfsClient.create(fileIdPath, permission, flag, false, replication,
               blockSize, null, bufferSize, null),
-          statistics);
+          null);
 
       if ((createMode == Nfs3Constant.CREATE_UNCHECKED)
           || (createMode == Nfs3Constant.CREATE_GUARDED)) {
@@ -1013,7 +1013,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         fos = null;
       } else {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Opened stream for file:" + fileName + ", fileId:"
+          LOG.debug("Opened stream for file: " + fileName + ", fileId: "
               + fileHandle.getFileId());
         }
       }
@@ -1024,7 +1024,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         try {
           fos.close();
         } catch (IOException e1) {
-          LOG.error("Can't close stream for dirFileId:" + dirHandle.getFileId()
+          LOG.error("Can't close stream for dirFileId: " + dirHandle.getFileId()
               + " filename: " + fileName, e1);
         }
       }
@@ -1033,7 +1033,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr),
               dfsClient, dirFileIdPath, iug);
         } catch (IOException e1) {
-          LOG.error("Can't get postOpDirAttr for dirFileId:"
+          LOG.error("Can't get postOpDirAttr for dirFileId: "
               + dirHandle.getFileId(), e1);
         }
       }
@@ -1073,7 +1073,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     String fileName = request.getName();
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS MKDIR dirId: " + dirHandle.getFileId() + " filename: "
-          + fileName + " client:" + remoteAddress);
+          + fileName + " client: " + remoteAddress);
     }
 
     if (request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
@@ -1090,7 +1090,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (preOpDirAttr == null) {
-        LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId());
+        LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
         return new MKDIR3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1173,7 +1173,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     String fileName = request.getName();
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS REMOVE dir fileId: " + dirHandle.getFileId()
-          + " fileName: " + fileName + " client:" + remoteAddress);
+          + " fileName: " + fileName + " client: " + remoteAddress);
     }
 
     String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
@@ -1182,7 +1182,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpDirAttr =  Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (preOpDirAttr == null) {
-        LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId());
+        LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
         return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1254,7 +1254,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS RMDIR dir fileId: " + dirHandle.getFileId()
-          + " fileName: " + fileName + " client:" + remoteAddress);
+          + " fileName: " + fileName + " client: " + remoteAddress);
     }
 
     String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
@@ -1263,7 +1263,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (preOpDirAttr == null) {
-        LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId());
+        LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
         return new RMDIR3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1341,7 +1341,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     String toName = request.getToName();
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS RENAME from: " + fromHandle.getFileId() + "/" + fromName
-          + " to: " + toHandle.getFileId() + "/" + toName + " client:"
+          + " to: " + toHandle.getFileId() + "/" + toName + " client: "
           + remoteAddress);
     }
 
@@ -1354,14 +1354,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       fromPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, fromDirFileIdPath, iug);
       if (fromPreOpAttr == null) {
-        LOG.info("Can't get path for fromHandle fileId:"
+        LOG.info("Can't get path for fromHandle fileId: "
             + fromHandle.getFileId());
         return new RENAME3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
       toPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, toDirFileIdPath, iug);
       if (toPreOpAttr == null) {
-        LOG.info("Can't get path for toHandle fileId:" + toHandle.getFileId());
+        LOG.info("Can't get path for toHandle fileId: " + toHandle.getFileId());
         return new RENAME3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1441,7 +1441,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     String linkIdPath = linkDirIdPath + "/" + name;
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS SYMLINK, target: " + symData + " link: " + linkIdPath
-          + " client:" + remoteAddress);
+          + " client: " + remoteAddress);
     }
 
     try {
@@ -1463,7 +1463,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           objAttr.getFileId()), objAttr, dirWcc);
 
     } catch (IOException e) {
-      LOG.warn("Exception:" + e);
+      LOG.warn("Exception: " + e);
       int status = mapErrorStatus(e);
       response.setStatus(status);
       return response;
@@ -1529,18 +1529,18 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     long cookie = request.getCookie();
     if (cookie < 0) {
-      LOG.error("Invalid READDIR request, with negitve cookie:" + cookie);
+      LOG.error("Invalid READDIR request, with negative cookie: " + cookie);
       return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
     }
     long count = request.getCount();
     if (count <= 0) {
-      LOG.info("Nonpositive count in invalid READDIR request:" + count);
+      LOG.info("Nonpositive count in invalid READDIR request: " + count);
       return new READDIR3Response(Nfs3Status.NFS3_OK);
     }
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS READDIR fileId: " + handle.getFileId() + " cookie: "
-          + cookie + " count: " + count + " client:" + remoteAddress);
+          + cookie + " count: " + count + " client: " + remoteAddress);
     }
 
     HdfsFileStatus dirStatus;
@@ -1551,11 +1551,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
       dirStatus = dfsClient.getFileInfo(dirFileIdPath);
       if (dirStatus == null) {
-        LOG.info("Can't get path for fileId:" + handle.getFileId());
+        LOG.info("Can't get path for fileId: " + handle.getFileId());
         return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
       }
       if (!dirStatus.isDir()) {
-        LOG.error("Can't readdir for regular file, fileId:"
+        LOG.error("Can't readdir for regular file, fileId: "
             + handle.getFileId());
         return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
       }
@@ -1588,7 +1588,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
         if (dotdotStatus == null) {
           // This should not happen
-          throw new IOException("Can't get path for handle path:"
+          throw new IOException("Can't get path for handle path: "
               + dotdotFileIdPath);
         }
         dotdotFileId = dotdotStatus.getFileId();
@@ -1606,7 +1606,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
       postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (postOpAttr == null) {
-        LOG.error("Can't get path for fileId:" + handle.getFileId());
+        LOG.error("Can't get path for fileId: " + handle.getFileId());
         return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
       }
     } catch (IOException e) {
@@ -1687,24 +1687,24 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     long cookie = request.getCookie();
     if (cookie < 0) {
-      LOG.error("Invalid READDIRPLUS request, with negitve cookie:" + cookie);
+      LOG.error("Invalid READDIRPLUS request, with negative cookie: " + cookie);
       return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
     }
     long dirCount = request.getDirCount();
     if (dirCount <= 0) {
-      LOG.info("Nonpositive dircount in invalid READDIRPLUS request:" + dirCount);
+      LOG.info("Nonpositive dircount in invalid READDIRPLUS request: " + dirCount);
       return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
     }
     int maxCount = request.getMaxCount();
     if (maxCount <= 0) {
-      LOG.info("Nonpositive maxcount in invalid READDIRPLUS request:" + maxCount);
+      LOG.info("Nonpositive maxcount in invalid READDIRPLUS request: " + maxCount);
       return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
     }
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: "
           + cookie + " dirCount: " + dirCount + " maxCount: " + maxCount
-          + " client:" + remoteAddress);
+          + " client: " + remoteAddress);
     }
 
     HdfsFileStatus dirStatus;
@@ -1716,11 +1716,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
       dirStatus = dfsClient.getFileInfo(dirFileIdPath);
       if (dirStatus == null) {
-        LOG.info("Can't get path for fileId:" + handle.getFileId());
+        LOG.info("Can't get path for fileId: " + handle.getFileId());
         return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
       }
       if (!dirStatus.isDir()) {
-        LOG.error("Can't readdirplus for regular file, fileId:"
+        LOG.error("Can't readdirplus for regular file, fileId: "
             + handle.getFileId());
         return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
       }
@@ -1751,7 +1751,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
         if (dotdotStatus == null) {
           // This should not happen
-          throw new IOException("Can't get path for handle path:"
+          throw new IOException("Can't get path for handle path: "
               + dotdotFileIdPath);
         }
         dotdotFileId = dotdotStatus.getFileId();
@@ -1769,7 +1769,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
       postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (postOpDirAttr == null) {
-        LOG.info("Can't get path for fileId:" + handle.getFileId());
+        LOG.info("Can't get path for fileId: " + handle.getFileId());
         return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
       }
     } catch (IOException e) {
@@ -1801,7 +1801,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         try {
           attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
         } catch (IOException e) {
-          LOG.error("Can't get file attributes for fileId:" + fileId, e);
+          LOG.error("Can't get file attributes for fileId: " + fileId, e);
           continue;
         }
         entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId,
@@ -1818,7 +1818,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         try {
           attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
         } catch (IOException e) {
-          LOG.error("Can't get file attributes for fileId:" + fileId, e);
+          LOG.error("Can't get file attributes for fileId: " + fileId, e);
           continue;
         }
         entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId,
@@ -1863,7 +1863,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     FileHandle handle = request.getHandle();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS FSSTAT fileId: " + handle.getFileId() + " client:"
+      LOG.debug("NFS FSSTAT fileId: " + handle.getFileId() + " client: "
           + remoteAddress);
     }
 
@@ -1875,7 +1875,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle,
           iug);
       if (attrs == null) {
-        LOG.info("Can't get path for fileId:" + handle.getFileId());
+        LOG.info("Can't get path for fileId: " + handle.getFileId());
         return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1938,7 +1938,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     FileHandle handle = request.getHandle();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS FSINFO fileId: " + handle.getFileId() + " client:"
+      LOG.debug("NFS FSINFO fileId: " + handle.getFileId() + " client: "
           + remoteAddress);
     }
 
@@ -1956,7 +1956,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes attrs = Nfs3Utils.getFileAttr(dfsClient,
           Nfs3Utils.getFileIdPath(handle), iug);
       if (attrs == null) {
-        LOG.info("Can't get path for fileId:" + handle.getFileId());
+        LOG.info("Can't get path for fileId: " + handle.getFileId());
         return new FSINFO3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -2005,7 +2005,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     Nfs3FileAttributes attrs;
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS PATHCONF fileId: " + handle.getFileId() + " client:"
+      LOG.debug("NFS PATHCONF fileId: " + handle.getFileId() + " client: "
           + remoteAddress);
     }
 
@@ -2013,7 +2013,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
           iug);
       if (attrs == null) {
-        LOG.info("Can't get path for fileId:" + handle.getFileId());
+        LOG.info("Can't get path for fileId: " + handle.getFileId());
         return new PATHCONF3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -2057,7 +2057,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS COMMIT fileId: " + handle.getFileId() + " offset="
-          + request.getOffset() + " count=" + request.getCount() + " client:"
+          + request.getOffset() + " count=" + request.getCount() + " client: "
           + remoteAddress);
     }
 
@@ -2066,7 +2066,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
       if (preOpAttr == null) {
-        LOG.info("Can't get path for fileId:" + handle.getFileId());
+        LOG.info("Can't get path for fileId: " + handle.getFileId());
         return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE);
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d2143/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
index 52c75ed..7810ce2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
@@ -101,7 +101,7 @@ public class WriteManager {
     this.fileContextCache = new OpenFileCtxCache(config, streamTimeout);
   }
 
-  void startAsyncDataSerivce() {
+  void startAsyncDataService() {
     if (asyncDataServiceStarted) {
       return;
     }
@@ -139,7 +139,7 @@ public class WriteManager {
     FileHandle fileHandle = request.getHandle();
     OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
     if (openFileCtx == null) {
-      LOG.info("No opened stream for fileId:" + fileHandle.getFileId());
+      LOG.info("No opened stream for fileId: " + fileHandle.getFileId());
 
       String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId());
       HdfsDataOutputStream fos = null;
@@ -156,14 +156,14 @@ public class WriteManager {
       } catch (RemoteException e) {
         IOException io = e.unwrapRemoteException();
         if (io instanceof AlreadyBeingCreatedException) {
-          LOG.warn("Can't append file:" + fileIdPath
-              + ". Possibly the file is being closed. Drop the request:"
+          LOG.warn("Can't append file: " + fileIdPath
+              + ". Possibly the file is being closed. Drop the request: "
               + request + ", wait for the client to retry...");
           return;
         }
         throw e;
       } catch (IOException e) {
-        LOG.error("Can't apapend to file:" + fileIdPath, e);
+        LOG.error("Can't append to file: " + fileIdPath, e);
         if (fos != null) {
           fos.close();
         }
@@ -188,7 +188,7 @@ public class WriteManager {
         try {
           fos.close();
         } catch (IOException e) {
-          LOG.error("Can't close stream for fileId:" + handle.getFileId(), e);
+          LOG.error("Can't close stream for fileId: " + handle.getFileId(), e);
         }
         // Notify client to retry
         WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
@@ -201,7 +201,7 @@ public class WriteManager {
       }
 
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Opened stream for appending file:" + fileHandle.getFileId());
+        LOG.debug("Opened stream for appending file: " + fileHandle.getFileId());
       }
     }
 
@@ -220,7 +220,7 @@ public class WriteManager {
 
     if (openFileCtx == null) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("No opened stream for fileId:" + fileHandle.getFileId()
+        LOG.debug("No opened stream for fileId: " + fileHandle.getFileId()
             + " commitOffset=" + commitOffset
             + ". Return success in this case.");
       }
@@ -254,8 +254,8 @@ public class WriteManager {
         status = Nfs3Status.NFS3_OK;
         break;
       default:
-        LOG.error("Should not get commit return code:" + ret.name());
-        throw new RuntimeException("Should not get commit return code:"
+        LOG.error("Should not get commit return code: " + ret.name());
+        throw new RuntimeException("Should not get commit return code: "
             + ret.name());
       }
     }
@@ -269,7 +269,7 @@ public class WriteManager {
     OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
 
     if (openFileCtx == null) {
-      LOG.info("No opened stream for fileId:" + fileHandle.getFileId()
+      LOG.info("No opened stream for fileId: " + fileHandle.getFileId()
           + " commitOffset=" + commitOffset + ". Return success in this case.");
       status = Nfs3Status.NFS3_OK;
       
@@ -295,8 +295,8 @@ public class WriteManager {
         status = Nfs3Status.NFS3_OK;
         break;
       default:
-        LOG.error("Should not get commit return code:" + ret.name());
-        throw new RuntimeException("Should not get commit return code:"
+        LOG.error("Should not get commit return code: " + ret.name());
+        throw new RuntimeException("Should not get commit return code: "
             + ret.name());
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d2143/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fd29408..4932c80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -813,6 +813,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7611. deleteSnapshot and delete of a file can leave orphaned blocks
     in the blocksMap on NameNode restart. (jing9 and Byron Wong)
 
+    HDFS-7423. various typos and message formatting fixes in nfs daemon and
+    doc. (Charles Lamb via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d2143/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 8cebda1..71bf0d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -357,9 +357,9 @@ public class DFSOutputStream extends FSOutputSummer
     
     @Override
     public String toString() {
-      return "packet seqno:" + this.seqno +
-      " offsetInBlock:" + this.offsetInBlock + 
-      " lastPacketInBlock:" + this.lastPacketInBlock +
+      return "packet seqno: " + this.seqno +
+      " offsetInBlock: " + this.offsetInBlock +
+      " lastPacketInBlock: " + this.lastPacketInBlock +
       " lastByteOffsetInBlock: " + this.getLastByteOffsetBlock();
     }
   }
@@ -2007,7 +2007,7 @@ public class DFSOutputStream extends FSOutputSummer
         // bytesCurBlock potentially incremented if there was buffered data
 
         if (DFSClient.LOG.isDebugEnabled()) {
-          DFSClient.LOG.debug("DFSClient flush():"
+          DFSClient.LOG.debug("DFSClient flush(): "
               + " bytesCurBlock=" + bytesCurBlock
               + " lastFlushOffset=" + lastFlushOffset
               + " createNewBlock=" + endBlock);
@@ -2103,7 +2103,7 @@ public class DFSOutputStream extends FSOutputSummer
       DFSClient.LOG.warn("Error while syncing", e);
       synchronized (this) {
         if (!isClosed()) {
-          lastException.set(new IOException("IOException flush:" + e));
+          lastException.set(new IOException("IOException flush: " + e));
           closeThreads(true);
         }
       }


[27/34] hadoop git commit: MAPREDUCE-6230. Fixed RMContainerAllocator to update the new AMRMToken service name properly. Contributed by Jason Lowe

Posted by zh...@apache.org.
MAPREDUCE-6230. Fixed RMContainerAllocator to update the new AMRMToken service name properly. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd5946aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd5946aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd5946aa

Branch: refs/heads/HDFS-EC
Commit: dd5946aa4f9342a26f4fb69797c697a0541cdb62
Parents: d4edaed
Author: Jian He <ji...@apache.org>
Authored: Wed Jan 28 15:51:30 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:26 2015 -0800

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 +
 .../v2/app/rm/RMContainerAllocator.java         |  5 +-
 .../v2/app/rm/TestRMContainerAllocator.java     | 93 ++++++++++++++++++++
 3 files changed, 98 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5946aa/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 35ceb2e..b576c29 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -314,6 +314,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-3283. mapred classpath CLI does not display the complete classpath
     (Varun Saxena via cnauroth)
 
+    MAPREDUCE-6230. Fixed RMContainerAllocator to update the new AMRMToken
+    service name properly. (Jason Lowe via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5946aa/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 0a4f2f3..1acfeec 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.yarn.api.records.PreemptionMessage;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.client.api.NMTokenCache;
 import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
@@ -783,10 +784,8 @@ public class RMContainerAllocator extends RMContainerRequestor
           .getIdentifier().array(), token.getPassword().array(), new Text(
           token.getKind()), new Text(token.getService()));
     UserGroupInformation currentUGI = UserGroupInformation.getCurrentUser();
-    if (UserGroupInformation.isSecurityEnabled()) {
-      currentUGI = UserGroupInformation.getLoginUser();
-    }
     currentUGI.addToken(amrmToken);
+    amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig()));
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5946aa/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 3642670..4759693 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -31,6 +31,7 @@ import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
@@ -45,6 +46,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -75,7 +77,9 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -110,6 +114,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.ControlledClock;
@@ -2295,6 +2300,93 @@ public class TestRMContainerAllocator {
 
   }
 
+  @Test(timeout=60000)
+  public void testAMRMTokenUpdate() throws Exception {
+    LOG.info("Running testAMRMTokenUpdate");
+
+    final String rmAddr = "somermaddress:1234";
+    final Configuration conf = new YarnConfiguration();
+    conf.setLong(
+      YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 8);
+    conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 2000);
+    conf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, rmAddr);
+
+    final MyResourceManager rm = new MyResourceManager(conf);
+    rm.start();
+    AMRMTokenSecretManager secretMgr =
+        rm.getRMContext().getAMRMTokenSecretManager();
+    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+        .getDispatcher();
+
+    // Submit the application
+    RMApp app = rm.submitApp(1024);
+    dispatcher.await();
+
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    amNodeManager.nodeHeartbeat(true);
+    dispatcher.await();
+
+    final ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+        .getAppAttemptId();
+    final ApplicationId appId = app.getApplicationId();
+    rm.sendAMLaunched(appAttemptId);
+    dispatcher.await();
+
+    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+    final Job mockJob = mock(Job.class);
+    when(mockJob.getReport()).thenReturn(
+        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
+
+    final Token<AMRMTokenIdentifier> oldToken = rm.getRMContext().getRMApps()
+        .get(appId).getRMAppAttempt(appAttemptId).getAMRMToken();
+    Assert.assertNotNull("app should have a token", oldToken);
+    UserGroupInformation testUgi = UserGroupInformation.createUserForTesting(
+        "someuser", new String[0]);
+    Token<AMRMTokenIdentifier> newToken = testUgi.doAs(
+        new PrivilegedExceptionAction<Token<AMRMTokenIdentifier>>() {
+          @Override
+          public Token<AMRMTokenIdentifier> run() throws Exception {
+            MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+                appAttemptId, mockJob);
+
+            // Keep heartbeating until RM thinks the token has been updated
+            Token<AMRMTokenIdentifier> currentToken = oldToken;
+            long startTime = Time.monotonicNow();
+            while (currentToken == oldToken) {
+              if (Time.monotonicNow() - startTime > 20000) {
+                Assert.fail("Took to long to see AMRM token change");
+              }
+              Thread.sleep(100);
+              allocator.schedule();
+              currentToken = rm.getRMContext().getRMApps().get(appId)
+                  .getRMAppAttempt(appAttemptId).getAMRMToken();
+            }
+
+            return currentToken;
+          }
+        });
+
+    // verify there is only one AMRM token in the UGI and it matches the
+    // updated token from the RM
+    int tokenCount = 0;
+    Token<? extends TokenIdentifier> ugiToken = null;
+    for (Token<? extends TokenIdentifier> token : testUgi.getTokens()) {
+      if (AMRMTokenIdentifier.KIND_NAME.equals(token.getKind())) {
+        ugiToken = token;
+        ++tokenCount;
+      }
+    }
+
+    Assert.assertEquals("too many AMRM tokens", 1, tokenCount);
+    Assert.assertArrayEquals("token identifier not updated",
+        newToken.getIdentifier(), ugiToken.getIdentifier());
+    Assert.assertArrayEquals("token password not updated",
+        newToken.getPassword(), ugiToken.getPassword());
+    Assert.assertEquals("AMRM token service not updated",
+        new Text(rmAddr), ugiToken.getService());
+  }
+
   public static void main(String[] args) throws Exception {
     TestRMContainerAllocator t = new TestRMContainerAllocator();
     t.testSimple();
@@ -2304,6 +2396,7 @@ public class TestRMContainerAllocator {
     t.testReportedAppProgressWithOnlyMaps();
     t.testBlackListedNodes();
     t.testCompletedTasksRecalculateSchedule();
+    t.testAMRMTokenUpdate();
   }
 
 }


[06/34] hadoop git commit: YARN-3092. Created a common ResourceUsage class to track labeled resource usages in Capacity Scheduler. Contributed by Wangda Tan

Posted by zh...@apache.org.
YARN-3092. Created a common ResourceUsage class to track labeled resource usages in Capacity Scheduler. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21c74e62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21c74e62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21c74e62

Branch: refs/heads/HDFS-EC
Commit: 21c74e62a0d25936f096bae687ab96938a048916
Parents: 79d0787
Author: Jian He <ji...@apache.org>
Authored: Mon Jan 26 15:21:22 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:23 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../scheduler/ResourceUsage.java                | 332 +++++++++++++++++++
 .../scheduler/TestResourceUsage.java            | 138 ++++++++
 3 files changed, 473 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21c74e62/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 872f16e..924bfa6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -203,6 +203,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3024. LocalizerRunner should give DIE action when all resources are
     localized. (Chengbing Liu via xgong)
 
+    YARN-3092. Created a common ResourceUsage class to track labeled resource
+    usages in Capacity Scheduler. (Wangda Tan via jianhe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21c74e62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
new file mode 100644
index 0000000..5a4cced
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
@@ -0,0 +1,332 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.util.resource.Resources;
+
+/**
+ * Resource Usage by Labels for following fields by label - AM resource (to
+ * enforce max-am-resource-by-label after YARN-2637) - Used resource (includes
+ * AM resource usage) - Reserved resource - Pending resource - Headroom
+ * 
+ * This class can be used to track resource usage in queue/user/app.
+ * 
+ * And it is thread-safe
+ */
+public class ResourceUsage {
+  private ReadLock readLock;
+  private WriteLock writeLock;
+  private Map<String, UsageByLabel> usages;
+  // short for no-label :)
+  private static final String NL = CommonNodeLabelsManager.NO_LABEL;
+
+  public ResourceUsage() {
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    readLock = lock.readLock();
+    writeLock = lock.writeLock();
+
+    usages = new HashMap<String, UsageByLabel>();
+  }
+
+  // Usage enum here to make implement cleaner
+  private enum ResourceType {
+    USED(0), PENDING(1), AMUSED(2), RESERVED(3), HEADROOM(4);
+
+    private int value;
+
+    private ResourceType(int value) {
+      this.value = value;
+    }
+
+    public int getValue() {
+      return this.value;
+    }
+  }
+
+  private static class UsageByLabel {
+    // usage by label, contains all UsageType
+    private Resource[] resArr;
+
+    public UsageByLabel(String label) {
+      resArr = new Resource[ResourceType.values().length];
+      for (int i = 0; i < resArr.length; i++) {
+        resArr[i] = Resource.newInstance(0, 0);
+      }
+    }
+
+    public Resource get(ResourceType type) {
+      return resArr[type.getValue()];
+    }
+
+    public void set(ResourceType type, Resource res) {
+      resArr[type.getValue()] = res;
+    }
+
+    public void inc(ResourceType type, Resource res) {
+      Resources.addTo(resArr[type.getValue()], res);
+    }
+
+    public void dec(ResourceType type, Resource res) {
+      Resources.subtractFrom(resArr[type.getValue()], res);
+    }
+  }
+
+  /*
+   * Used
+   */
+  public Resource getUsed() {
+    return getUsed(NL);
+  }
+
+  public Resource getUsed(String label) {
+    return internalGet(label, ResourceType.USED);
+  }
+
+  public void incUsed(String label, Resource res) {
+    internalInc(label, ResourceType.USED, res);
+  }
+
+  public void incUsed(Resource res) {
+    incUsed(NL, res);
+  }
+
+  public void decUsed(Resource res) {
+    decUsed(NL, res);
+  }
+
+  public void decUsed(String label, Resource res) {
+    internalDec(label, ResourceType.USED, res);
+  }
+
+  public void setUsed(Resource res) {
+    setUsed(NL, res);
+  }
+
+  public void setUsed(String label, Resource res) {
+    internalSet(label, ResourceType.USED, res);
+  }
+
+  /*
+   * Pending
+   */
+  public Resource getPending() {
+    return getPending(NL);
+  }
+
+  public Resource getPending(String label) {
+    return internalGet(label, ResourceType.PENDING);
+  }
+
+  public void incPending(String label, Resource res) {
+    internalInc(label, ResourceType.PENDING, res);
+  }
+
+  public void incPending(Resource res) {
+    incPending(NL, res);
+  }
+
+  public void decPending(Resource res) {
+    decPending(NL, res);
+  }
+
+  public void decPending(String label, Resource res) {
+    internalDec(label, ResourceType.PENDING, res);
+  }
+
+  public void setPending(Resource res) {
+    setPending(NL, res);
+  }
+
+  public void setPending(String label, Resource res) {
+    internalSet(label, ResourceType.PENDING, res);
+  }
+
+  /*
+   * Reserved
+   */
+  public Resource getReserved() {
+    return getReserved(NL);
+  }
+
+  public Resource getReserved(String label) {
+    return internalGet(label, ResourceType.RESERVED);
+  }
+
+  public void incReserved(String label, Resource res) {
+    internalInc(label, ResourceType.RESERVED, res);
+  }
+
+  public void incReserved(Resource res) {
+    incReserved(NL, res);
+  }
+
+  public void decReserved(Resource res) {
+    decReserved(NL, res);
+  }
+
+  public void decReserved(String label, Resource res) {
+    internalDec(label, ResourceType.RESERVED, res);
+  }
+
+  public void setReserved(Resource res) {
+    setReserved(NL, res);
+  }
+
+  public void setReserved(String label, Resource res) {
+    internalSet(label, ResourceType.RESERVED, res);
+  }
+
+  /*
+   * Headroom
+   */
+  public Resource getHeadroom() {
+    return getHeadroom(NL);
+  }
+
+  public Resource getHeadroom(String label) {
+    return internalGet(label, ResourceType.HEADROOM);
+  }
+
+  public void incHeadroom(String label, Resource res) {
+    internalInc(label, ResourceType.HEADROOM, res);
+  }
+
+  public void incHeadroom(Resource res) {
+    incHeadroom(NL, res);
+  }
+
+  public void decHeadroom(Resource res) {
+    decHeadroom(NL, res);
+  }
+
+  public void decHeadroom(String label, Resource res) {
+    internalDec(label, ResourceType.HEADROOM, res);
+  }
+
+  public void setHeadroom(Resource res) {
+    setHeadroom(NL, res);
+  }
+
+  public void setHeadroom(String label, Resource res) {
+    internalSet(label, ResourceType.HEADROOM, res);
+  }
+
+  /*
+   * AM-Used
+   */
+  public Resource getAMUsed() {
+    return getAMUsed(NL);
+  }
+
+  public Resource getAMUsed(String label) {
+    return internalGet(label, ResourceType.AMUSED);
+  }
+
+  public void incAMUsed(String label, Resource res) {
+    internalInc(label, ResourceType.AMUSED, res);
+  }
+
+  public void incAMUsed(Resource res) {
+    incAMUsed(NL, res);
+  }
+
+  public void decAMUsed(Resource res) {
+    decAMUsed(NL, res);
+  }
+
+  public void decAMUsed(String label, Resource res) {
+    internalDec(label, ResourceType.AMUSED, res);
+  }
+
+  public void setAMUsed(Resource res) {
+    setAMUsed(NL, res);
+  }
+
+  public void setAMUsed(String label, Resource res) {
+    internalSet(label, ResourceType.AMUSED, res);
+  }
+
+  private static Resource normalize(Resource res) {
+    if (res == null) {
+      return Resources.none();
+    }
+    return res;
+  }
+
+  private Resource internalGet(String label, ResourceType type) {
+    try {
+      readLock.lock();
+      UsageByLabel usage = usages.get(label);
+      if (null == usage) {
+        return Resources.none();
+      }
+      return normalize(usage.get(type));
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  private UsageByLabel getAndAddIfMissing(String label) {
+    if (!usages.containsKey(label)) {
+      UsageByLabel u = new UsageByLabel(label);
+      usages.put(label, u);
+      return u;
+    }
+
+    return usages.get(label);
+  }
+
+  private void internalSet(String label, ResourceType type, Resource res) {
+    try {
+      writeLock.lock();
+      UsageByLabel usage = getAndAddIfMissing(label);
+      usage.set(type, res);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  private void internalInc(String label, ResourceType type, Resource res) {
+    try {
+      writeLock.lock();
+      UsageByLabel usage = getAndAddIfMissing(label);
+      usage.inc(type, res);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  private void internalDec(String label, ResourceType type, Resource res) {
+    try {
+      writeLock.lock();
+      UsageByLabel usage = getAndAddIfMissing(label);
+      usage.dec(type, res);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21c74e62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestResourceUsage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestResourceUsage.java
new file mode 100644
index 0000000..b6dfacb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestResourceUsage.java
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+@RunWith(Parameterized.class)
+public class TestResourceUsage {
+  private static final Log LOG = LogFactory.getLog(TestResourceUsage.class);
+  private String suffix;
+
+  @Parameterized.Parameters
+  public static Collection<String[]> getParameters() {
+    return Arrays.asList(new String[][] { { "Pending" }, { "Used" },
+        { "Headroom" }, { "Reserved" }, { "AMUsed" } });
+  }
+
+  public TestResourceUsage(String suffix) {
+    this.suffix = suffix;
+  }
+
+  private static void dec(ResourceUsage obj, String suffix, Resource res,
+      String label) throws Exception {
+    executeByName(obj, "dec" + suffix, res, label);
+  }
+
+  private static void inc(ResourceUsage obj, String suffix, Resource res,
+      String label) throws Exception {
+    executeByName(obj, "inc" + suffix, res, label);
+  }
+
+  private static void set(ResourceUsage obj, String suffix, Resource res,
+      String label) throws Exception {
+    executeByName(obj, "set" + suffix, res, label);
+  }
+
+  private static Resource get(ResourceUsage obj, String suffix, String label)
+      throws Exception {
+    return executeByName(obj, "get" + suffix, null, label);
+  }
+
+  // Use reflection to avoid too much avoid code
+  private static Resource executeByName(ResourceUsage obj, String methodName,
+      Resource arg, String label) throws Exception {
+    // We have 4 kinds of method
+    // 1. getXXX() : Resource
+    // 2. getXXX(label) : Resource
+    // 3. set/inc/decXXX(res) : void
+    // 4. set/inc/decXXX(label, res) : void
+    if (methodName.startsWith("get")) {
+      Resource result;
+      if (label == null) {
+        // 1.
+        Method method = ResourceUsage.class.getDeclaredMethod(methodName);
+        result = (Resource) method.invoke(obj);
+      } else {
+        // 2.
+        Method method =
+            ResourceUsage.class.getDeclaredMethod(methodName, String.class);
+        result = (Resource) method.invoke(obj, label);
+      }
+      return result;
+    } else {
+      if (label == null) {
+        // 3.
+        Method method =
+            ResourceUsage.class.getDeclaredMethod(methodName, Resource.class);
+        method.invoke(obj, arg);
+      } else {
+        // 4.
+        Method method =
+            ResourceUsage.class.getDeclaredMethod(methodName, String.class,
+                Resource.class);
+        method.invoke(obj, label, arg);
+      }
+      return null;
+    }
+  }
+
+  private void internalTestModifyAndRead(String label) throws Exception {
+    ResourceUsage usage = new ResourceUsage();
+    Resource res;
+
+    // First get returns 0 always
+    res = get(usage, suffix, label);
+    check(0, 0, res);
+
+    // Add 1,1 should returns 1,1
+    inc(usage, suffix, Resource.newInstance(1, 1), label);
+    check(1, 1, get(usage, suffix, label));
+
+    // Set 2,2
+    set(usage, suffix, Resource.newInstance(2, 2), label);
+    check(2, 2, get(usage, suffix, label));
+
+    // dec 2,2
+    dec(usage, suffix, Resource.newInstance(2, 2), label);
+    check(0, 0, get(usage, suffix, label));
+  }
+
+  void check(int mem, int cpu, Resource res) {
+    Assert.assertEquals(mem, res.getMemory());
+    Assert.assertEquals(cpu, res.getVirtualCores());
+  }
+
+  @Test
+  public void testModifyAndRead() throws Exception {
+    LOG.info("Test - " + suffix);
+    internalTestModifyAndRead(null);
+    internalTestModifyAndRead("label");
+  }
+}


[07/34] hadoop git commit: HADOOP-11466: move to 2.6.1

Posted by zh...@apache.org.
HADOOP-11466: move to 2.6.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9feb6b36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9feb6b36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9feb6b36

Branch: refs/heads/HDFS-EC
Commit: 9feb6b3666f766721ddc3e25e765ed21af0d075e
Parents: 7c85bb3
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Mon Jan 26 11:28:02 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:23 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 20 ++++++++++++++++----
 1 file changed, 16 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9feb6b36/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 598f750..e0da851 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -742,10 +742,6 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11327. BloomFilter#not() omits the last bit, resulting in an
     incorrect filter (Eric Payne via jlowe)
 
-    HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC
-    architecture because it is slower there (Suman Somasundar via Colin P.
-    McCabe)
-
     HADOOP-11209. Configuration#updatingResource/finalParameters are not
     thread-safe. (Varun Saxena via ozawa)
 
@@ -767,6 +763,22 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11499. Check of executorThreadsStarted in
     ValueQueue#submitRefillTask() evades lock acquisition (Ted Yu via jlowe)
 
+Release 2.6.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC
+    architecture because it is slower there (Suman Somasundar via Colin P.
+    McCabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES


[08/34] hadoop git commit: YARN-2897. CrossOriginFilter needs more log statements (Mit Desai via jeagles)

Posted by zh...@apache.org.
YARN-2897. CrossOriginFilter needs more log statements (Mit Desai via jeagles)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94f3ffeb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94f3ffeb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94f3ffeb

Branch: refs/heads/HDFS-EC
Commit: 94f3ffeb6eafc53fc0dee76533e40910d5034f5d
Parents: a966dff
Author: Jonathan Eagles <je...@gmail.com>
Authored: Tue Jan 27 16:06:39 2015 -0600
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:24 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                   |  3 +++
 .../server/timeline/webapp/CrossOriginFilter.java | 18 ++++++++++++++++++
 2 files changed, 21 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94f3ffeb/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 054b394..56ffe97 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -209,6 +209,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3086. Make NodeManager memory configurable in MiniYARNCluster.
     (Robert Metzger via ozawa)
 
+    YARN-2897. CrossOriginFilter needs more log statements (Mit Desai via
+    jeagles)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94f3ffeb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilter.java
index d5fab7a..9edaefb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilter.java
@@ -106,25 +106,43 @@ public class CrossOriginFilter implements Filter {
 
     String originsList = encodeHeader(req.getHeader(ORIGIN));
     if (!isCrossOrigin(originsList)) {
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Header origin is null. Returning");
+      }
       return;
     }
 
     if (!areOriginsAllowed(originsList)) {
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Header origins '" + originsList + "' not allowed. Returning");
+      }
       return;
     }
 
     String accessControlRequestMethod =
         req.getHeader(ACCESS_CONTROL_REQUEST_METHOD);
     if (!isMethodAllowed(accessControlRequestMethod)) {
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Access control method '" + accessControlRequestMethod +
+            "' not allowed. Returning");
+      }
       return;
     }
 
     String accessControlRequestHeaders =
         req.getHeader(ACCESS_CONTROL_REQUEST_HEADERS);
     if (!areHeadersAllowed(accessControlRequestHeaders)) {
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Access control headers '" + accessControlRequestHeaders +
+            "' not allowed. Returning");
+      }
       return;
     }
 
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("Completed cross origin filter checks. Populating " +
+          "HttpServletResponse");
+    }
     res.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, originsList);
     res.setHeader(ACCESS_CONTROL_ALLOW_CREDENTIALS, Boolean.TRUE.toString());
     res.setHeader(ACCESS_CONTROL_ALLOW_METHODS, getAllowedMethodsHeader());


[18/34] hadoop git commit: HDFS-7566. Remove obsolete entries from hdfs-default.xml (Ray Chiang via aw)

Posted by zh...@apache.org.
HDFS-7566. Remove obsolete entries from hdfs-default.xml (Ray Chiang via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/928bb8ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/928bb8ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/928bb8ea

Branch: refs/heads/HDFS-EC
Commit: 928bb8ea4088c827842a8b719fab55938aeb5fe2
Parents: 94f3ffe
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Jan 27 14:31:27 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:25 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../src/main/resources/hdfs-default.xml         | 26 --------------------
 .../resources/job_1329348432655_0001_conf.xml   |  2 --
 .../src/main/data/2jobs2min-rumen-jh.json       |  6 -----
 4 files changed, 3 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/928bb8ea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b867a70..beea13b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -798,6 +798,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-49. MiniDFSCluster.stopDataNode will always shut down a node in
     the cluster if a matching name is not found. (stevel)
 
+    HDFS-7566. Remove obsolete entries from hdfs-default.xml (Ray Chiang
+    via aw)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/928bb8ea/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index c24f7be..966f5f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -31,16 +31,6 @@
 </property>
 
 <property>
-  <name>dfs.namenode.logging.level</name>
-  <value>info</value>
-  <description>
-    The logging level for dfs namenode. Other values are "dir" (trace
-    namespace mutations), "block" (trace block under/over replications
-    and block creations/deletions), or "all".
-  </description>
-</property>
-
-<property>
   <name>dfs.namenode.rpc-address</name>
   <value></value>
   <description>
@@ -155,14 +145,6 @@
 </property>
 
 <property>
-  <name>dfs.https.enable</name>
-  <value>false</value>
-  <description>
-    Deprecated. Use "dfs.http.policy" instead.
-  </description>
-</property>
-
-<property>
   <name>dfs.http.policy</name>
   <value>HTTP_ONLY</value>
   <description>Decide if HTTPS(SSL) is supported on HDFS
@@ -1245,14 +1227,6 @@
 </property>
 
 <property>
-  <name>dfs.support.append</name>
-  <value>true</value>
-  <description>
-    Does HDFS allow appends to files?
-  </description>
-</property>
-
-<property>
   <name>dfs.client.use.datanode.hostname</name>
   <value>false</value>
   <description>Whether clients should use datanode hostnames when

http://git-wip-us.apache.org/repos/asf/hadoop/blob/928bb8ea/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
index d886e89..608b8ab 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
@@ -113,7 +113,6 @@
 <property><!--Loaded from job.xml--><name>hadoop.proxyuser.user.groups</name><value>users</value></property>
 <property><!--Loaded from job.xml--><name>dfs.namenode.name.dir.restore</name><value>false</value></property>
 <property><!--Loaded from job.xml--><name>io.seqfile.lazydecompress</name><value>true</value></property>
-<property><!--Loaded from job.xml--><name>dfs.https.enable</name><value>false</value></property>
 <property><!--Loaded from job.xml--><name>mapreduce.reduce.merge.inmem.threshold</name><value>1000</value></property>
 <property><!--Loaded from job.xml--><name>mapreduce.input.fileinputformat.split.minsize</name><value>0</value></property>
 <property><!--Loaded from job.xml--><name>dfs.replication</name><value>3</value></property>
@@ -209,7 +208,6 @@
 <property><!--Loaded from job.xml--><name>mapreduce.job.dir</name><value>/tmp/hadoop-yarn/staging/user/.staging/job_1329348432655_0001</value></property>
 <property><!--Loaded from job.xml--><name>io.map.index.skip</name><value>0</value></property>
 <property><!--Loaded from job.xml--><name>net.topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.ScriptBasedMapping</value></property>
-<property><!--Loaded from job.xml--><name>dfs.namenode.logging.level</name><value>info</value></property>
 <property><!--Loaded from job.xml--><name>fs.s3.maxRetries</name><value>4</value></property>
 <property><!--Loaded from job.xml--><name>s3native.client-write-packet-size</name><value>65536</value></property>
 <property><!--Loaded from job.xml--><name>yarn.resourcemanager.amliveliness-monitor.interval-ms</name><value>1000</value></property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/928bb8ea/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
index 59ae8d7..70ff8af 100644
--- a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
+++ b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
@@ -4657,7 +4657,6 @@
     "mapreduce.tasktracker.taskcontroller" : "org.apache.hadoop.mapred.DefaultTaskController",
     "yarn.scheduler.fair.preemption" : "true",
     "mapreduce.reduce.shuffle.parallelcopies" : "5",
-    "dfs.support.append" : "true",
     "yarn.nodemanager.env-whitelist" : "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME",
     "mapreduce.jobtracker.heartbeats.in.second" : "100",
     "mapreduce.job.maxtaskfailures.per.tracker" : "3",
@@ -4674,7 +4673,6 @@
     "dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
     "ha.zookeeper.parent-znode" : "/hadoop-ha",
     "io.seqfile.lazydecompress" : "true",
-    "dfs.https.enable" : "false",
     "mapreduce.reduce.merge.inmem.threshold" : "1000",
     "mapreduce.input.fileinputformat.split.minsize" : "0",
     "dfs.replication" : "3",
@@ -4783,7 +4781,6 @@
     "io.map.index.skip" : "0",
     "net.topology.node.switch.mapping.impl" : "org.apache.hadoop.net.ScriptBasedMapping",
     "fs.s3.maxRetries" : "4",
-    "dfs.namenode.logging.level" : "info",
     "ha.failover-controller.new-active.rpc-timeout.ms" : "60000",
     "s3native.client-write-packet-size" : "65536",
     "yarn.resourcemanager.amliveliness-monitor.interval-ms" : "1000",
@@ -9770,7 +9767,6 @@
     "mapreduce.tasktracker.taskcontroller" : "org.apache.hadoop.mapred.DefaultTaskController",
     "yarn.scheduler.fair.preemption" : "true",
     "mapreduce.reduce.shuffle.parallelcopies" : "5",
-    "dfs.support.append" : "true",
     "yarn.nodemanager.env-whitelist" : "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME",
     "mapreduce.jobtracker.heartbeats.in.second" : "100",
     "mapreduce.job.maxtaskfailures.per.tracker" : "3",
@@ -9787,7 +9783,6 @@
     "dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
     "ha.zookeeper.parent-znode" : "/hadoop-ha",
     "io.seqfile.lazydecompress" : "true",
-    "dfs.https.enable" : "false",
     "mapreduce.reduce.merge.inmem.threshold" : "1000",
     "mapreduce.input.fileinputformat.split.minsize" : "0",
     "dfs.replication" : "3",
@@ -9896,7 +9891,6 @@
     "io.map.index.skip" : "0",
     "net.topology.node.switch.mapping.impl" : "org.apache.hadoop.net.ScriptBasedMapping",
     "fs.s3.maxRetries" : "4",
-    "dfs.namenode.logging.level" : "info",
     "ha.failover-controller.new-active.rpc-timeout.ms" : "60000",
     "s3native.client-write-packet-size" : "65536",
     "yarn.resourcemanager.amliveliness-monitor.interval-ms" : "1000",


[24/34] hadoop git commit: HDFS-7611. deleteSnapshot and delete of a file can leave orphaned blocks in the blocksMap on NameNode restart. Contributed by Jing Zhao and Byron Wong.

Posted by zh...@apache.org.
HDFS-7611. deleteSnapshot and delete of a file can leave orphaned blocks in the blocksMap on NameNode restart. Contributed by Jing Zhao and Byron Wong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4edaedf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4edaedf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4edaedf

Branch: refs/heads/HDFS-EC
Commit: d4edaedf1d51c6cea2271df1a07dbaf5958b985f
Parents: 620dd74
Author: Jing Zhao <ji...@apache.org>
Authored: Wed Jan 28 15:24:28 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:26 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/server/namenode/FSDirDeleteOp.java     | 11 ++++--
 .../hdfs/server/namenode/FSDirRenameOp.java     | 11 ++++--
 .../hdfs/server/namenode/FSDirectory.java       | 36 ++++++++++++--------
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  4 ++-
 .../namenode/snapshot/TestSnapshotDeletion.java | 27 +++++++++++++++
 6 files changed, 71 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4edaedf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4bd2c55..4d2b41d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -807,6 +807,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7677. DistributedFileSystem#truncate should resolve symlinks. (yliu)
 
+    HDFS-7611. deleteSnapshot and delete of a file can leave orphaned blocks
+    in the blocksMap on NameNode restart. (jing9 and Byron Wong)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4edaedf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index c93d1f6..978451c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -223,20 +223,25 @@ class FSDirDeleteOp {
     // set the parent's modification time
     final INodeDirectory parent = targetNode.getParent();
     parent.updateModificationTime(mtime, latestSnapshot);
+
+    fsd.updateCountForDelete(targetNode, iip);
     if (removed == 0) {
       return 0;
     }
 
-    // collect block
+    // collect block and update quota
     if (!targetNode.isInLatestSnapshot(latestSnapshot)) {
       targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes);
     } else {
       Quota.Counts counts = targetNode.cleanSubtree(CURRENT_STATE_ID,
           latestSnapshot, collectedBlocks, removedINodes, true);
-      parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
-          -counts.get(Quota.DISKSPACE), true);
       removed = counts.get(Quota.NAMESPACE);
+      // TODO: quota verification may fail the deletion here. We should not
+      // count the snapshot diff into quota usage in the future.
+      fsd.updateCount(iip, -counts.get(Quota.NAMESPACE),
+          -counts.get(Quota.DISKSPACE), true);
     }
+
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
           + iip.getPath() + " is removed");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4edaedf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index b994104..9ed2492 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -625,9 +625,12 @@ class FSDirRenameOp {
         NameNode.stateChangeLog.warn("DIR* FSDirRenameOp.unprotectedRenameTo:" +
             error);
         throw new IOException(error);
+      } else {
+        // update the quota count if necessary
+        fsd.updateCountForDelete(srcChild, srcIIP);
+        srcIIP = INodesInPath.replace(srcIIP, srcIIP.length() - 1, null);
+        return removedNum;
       }
-      srcIIP = INodesInPath.replace(srcIIP, srcIIP.length() - 1, null);
-      return removedNum;
     }
 
     boolean removeSrc4OldRename() throws IOException {
@@ -638,6 +641,8 @@ class FSDirRenameOp {
             " can not be removed");
         return false;
       } else {
+        // update the quota count if necessary
+        fsd.updateCountForDelete(srcChild, srcIIP);
         srcIIP = INodesInPath.replace(srcIIP, srcIIP.length() - 1, null);
         return true;
       }
@@ -647,6 +652,8 @@ class FSDirRenameOp {
       long removedNum = fsd.removeLastINode(dstIIP);
       if (removedNum != -1) {
         oldDstChild = dstIIP.getLastINode();
+        // update the quota count if necessary
+        fsd.updateCountForDelete(oldDstChild, dstIIP);
         dstIIP = INodesInPath.replace(dstIIP, dstIIP.length() - 1, null);
       }
       return removedNum;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4edaedf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index c012847..7242cca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -601,7 +601,22 @@ public class FSDirectory implements Closeable {
       writeUnlock();
     }
   }
-  
+
+  /**
+   * Update the quota usage after deletion. The quota update is only necessary
+   * when image/edits have been loaded and the file/dir to be deleted is not
+   * contained in snapshots.
+   */
+  void updateCountForDelete(final INode inode, final INodesInPath iip)
+      throws QuotaExceededException {
+    if (getFSNamesystem().isImageLoaded() &&
+        !inode.isInLatestSnapshot(iip.getLatestSnapshotId())) {
+      Quota.Counts counts = inode.computeQuotaUsage();
+      updateCount(iip, -counts.get(Quota.NAMESPACE),
+          -counts.get(Quota.DISKSPACE), false);
+    }
+  }
+
   void updateCount(INodesInPath iip, long nsDelta, long dsDelta,
       boolean checkQuota) throws QuotaExceededException {
     updateCount(iip, iip.length() - 1, nsDelta, dsDelta, checkQuota);
@@ -904,11 +919,12 @@ public class FSDirectory implements Closeable {
 
   /**
    * Remove the last inode in the path from the namespace.
-   * Count of each ancestor with quota is also updated.
+   * Note: the caller needs to update the ancestors' quota count.
+   *
    * @return -1 for failing to remove;
    *          0 for removing a reference whose referred inode has other 
    *            reference nodes;
-   *         >0 otherwise. 
+   *          1 otherwise.
    */
   long removeLastINode(final INodesInPath iip) throws QuotaExceededException {
     final int latestSnapshot = iip.getLatestSnapshotId();
@@ -917,19 +933,9 @@ public class FSDirectory implements Closeable {
     if (!parent.removeChild(last, latestSnapshot)) {
       return -1;
     }
-    
-    if (!last.isInLatestSnapshot(latestSnapshot)) {
-      final Quota.Counts counts = last.computeQuotaUsage();
-      updateCountNoQuotaCheck(iip, iip.length() - 1,
-          -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
 
-      if (INodeReference.tryRemoveReference(last) > 0) {
-        return 0;
-      } else {
-        return counts.get(Quota.NAMESPACE);
-      }
-    }
-    return 1;
+    return (!last.isInLatestSnapshot(latestSnapshot)
+        && INodeReference.tryRemoveReference(last) > 0) ? 0 : 1;
   }
 
   static String normalizePath(String src) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4edaedf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 8551263..33bd4e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -1191,7 +1191,9 @@ public class MiniDFSCluster {
         } catch (InterruptedException e) {
         }
         if (++i > 10) {
-          throw new IOException("Timed out waiting for Mini HDFS Cluster to start");
+          final String msg = "Timed out waiting for Mini HDFS Cluster to start";
+          LOG.error(msg);
+          throw new IOException(msg);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4edaedf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index 1450a7d..b616891 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -1122,4 +1122,31 @@ public class TestSnapshotDeletion {
     // wait till the cluster becomes active
     cluster.waitClusterUp();
   }
+
+  @Test
+  public void testCorrectNumberOfBlocksAfterRestart() throws IOException {
+    final Path foo = new Path("/foo");
+    final Path bar = new Path(foo, "bar");
+    final Path file = new Path(foo, "file");
+    final String snapshotName = "ss0";
+
+    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
+    hdfs.mkdirs(bar);
+    hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+    hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+    hdfs.allowSnapshot(foo);
+
+    hdfs.createSnapshot(foo, snapshotName);
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    hdfs.saveNamespace();
+
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+    hdfs.deleteSnapshot(foo, snapshotName);
+    hdfs.delete(bar, true);
+    hdfs.delete(foo, true);
+
+    long numberOfBlocks = cluster.getNamesystem().getBlocksTotal();
+    cluster.restartNameNode(0);
+    assertEquals(numberOfBlocks, cluster.getNamesystem().getBlocksTotal());
+  }
 }


[14/34] hadoop git commit: HADOOP-10626. Limit Returning Attributes for LDAP search. Contributed by Jason Hubbard.

Posted by zh...@apache.org.
HADOOP-10626. Limit Returning Attributes for LDAP search. Contributed by Jason Hubbard.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a966dff2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a966dff2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a966dff2

Branch: refs/heads/HDFS-EC
Commit: a966dff2cbf4481e9623c6cb829e53dadc8e4f9b
Parents: ec5b307
Author: Aaron T. Myers <at...@apache.org>
Authored: Tue Jan 27 13:50:45 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:24 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../main/java/org/apache/hadoop/security/LdapGroupsMapping.java   | 2 ++
 2 files changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a966dff2/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index fce2c81..0396e7d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -505,6 +505,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-4297. Enable Java assertions when running tests.
     (Tsz Wo Nicholas Sze via wheat9)
 
+    HADOOP-10626. Limit Returning Attributes for LDAP search. (Jason Hubbard
+    via atm)
+
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a966dff2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index c0c8d2b..d463ac7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -341,6 +341,8 @@ public class LdapGroupsMapping
 
     int dirSearchTimeout = conf.getInt(DIRECTORY_SEARCH_TIMEOUT, DIRECTORY_SEARCH_TIMEOUT_DEFAULT);
     SEARCH_CONTROLS.setTimeLimit(dirSearchTimeout);
+    // Limit the attributes returned to only those required to speed up the search. See HADOOP-10626 for more details.
+    SEARCH_CONTROLS.setReturningAttributes(new String[] {groupNameAttr});
 
     this.conf = conf;
   }


[20/34] hadoop git commit: YARN-3028. Better syntax for replaceLabelsOnNode in RMAdmin CLI. Contributed by Rohith Sharmaks

Posted by zh...@apache.org.
YARN-3028. Better syntax for replaceLabelsOnNode in RMAdmin CLI. Contributed by Rohith Sharmaks


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c994cbc1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c994cbc1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c994cbc1

Branch: refs/heads/HDFS-EC
Commit: c994cbc1a1b74aa15e7bd0a0f1b32dafc5802389
Parents: 928bb8e
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Jan 27 15:23:45 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:25 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../hadoop/yarn/client/cli/RMAdminCLI.java      | 19 ++++++--
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java  | 48 ++++++++++----------
 .../nodelabels/TestCommonNodeLabelsManager.java | 23 +++++++++-
 4 files changed, 63 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c994cbc1/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 56ffe97..2752824 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -212,6 +212,9 @@ Release 2.7.0 - UNRELEASED
     YARN-2897. CrossOriginFilter needs more log statements (Mit Desai via
     jeagles)
 
+    YARN-3028. Better syntax for replaceLabelsOnNode in RMAdmin CLI
+    (Rohith Sharmaks via wangda)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c994cbc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 9ea333c..6f1bbd0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -100,7 +100,8 @@ public class RMAdminCLI extends HAAdmin {
               new UsageInfo("[label1,label2,label3] (label splitted by \",\")",
                   "remove from cluster node labels"))
           .put("-replaceLabelsOnNode",
-              new UsageInfo("[node1:port,label1,label2 node2:port,label1,label2]",
+              new UsageInfo(
+                  "[node1[:port]=label1,label2 node2[:port]=label1,label2]",
                   "replace labels on nodes"))
           .put("-directlyAccessNodeLabelStore",
               new UsageInfo("", "Directly access node label store, "
@@ -199,7 +200,7 @@ public class RMAdminCLI extends HAAdmin {
       " [-getGroup [username]]" +
       " [[-addToClusterNodeLabels [label1,label2,label3]]" +
       " [-removeFromClusterNodeLabels [label1,label2,label3]]" +
-      " [-replaceLabelsOnNode [node1:port,label1,label2 node2:port,label1]" +
+      " [-replaceLabelsOnNode [node1[:port]=label1,label2 node2[:port]=label1]" +
       " [-directlyAccessNodeLabelStore]]");
     if (isHAEnabled) {
       appendHAUsage(summary);
@@ -398,8 +399,18 @@ public class RMAdminCLI extends HAAdmin {
         continue;
       }
 
-      String[] splits = nodeToLabels.split(",");
+      // "," also supported for compatibility
+      String[] splits = nodeToLabels.split("=");
+      int index = 0;
+      if (splits.length != 2) {
+        splits = nodeToLabels.split(",");
+        index = 1;
+      }
+
       String nodeIdStr = splits[0];
+      if (index == 0) {
+        splits = splits[1].split(",");
+      }
 
       if (nodeIdStr.trim().isEmpty()) {
         throw new IOException("node name cannot be empty");
@@ -408,7 +419,7 @@ public class RMAdminCLI extends HAAdmin {
       NodeId nodeId = ConverterUtils.toNodeIdWithDefaultPort(nodeIdStr);
       map.put(nodeId, new HashSet<String>());
 
-      for (int i = 1; i < splits.length; i++) {
+      for (int i = index; i < splits.length; i++) {
         if (!splits[i].trim().isEmpty()) {
           map.get(nodeId).add(splits[i].trim());
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c994cbc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
index 92af27d..1dfeac2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
@@ -73,7 +73,6 @@ public class TestRMAdminCLI {
   @Before
   public void configure() throws IOException, YarnException {
     remoteAdminServiceAccessed = false;
-    dummyNodeLabelsManager = new DummyCommonNodeLabelsManager();
     admin = mock(ResourceManagerAdministrationProtocol.class);
     when(admin.addToClusterNodeLabels(any(AddToClusterNodeLabelsRequest.class)))
         .thenAnswer(new Answer<AddToClusterNodeLabelsResponse>() {
@@ -105,6 +104,7 @@ public class TestRMAdminCLI {
         return haServiceTarget;
       }
     };
+    initDummyNodeLabelsManager();
     rmAdminCLI.localNodeLabelsManager = dummyNodeLabelsManager;
 
     YarnConfiguration conf = new YarnConfiguration();
@@ -124,6 +124,13 @@ public class TestRMAdminCLI {
     };
   }
   
+  private void initDummyNodeLabelsManager() {
+    Configuration conf = new YarnConfiguration();
+    conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
+    dummyNodeLabelsManager = new DummyCommonNodeLabelsManager();
+    dummyNodeLabelsManager.init(conf);
+  }
+  
   @Test(timeout=500)
   public void testRefreshQueues() throws Exception {
     String[] args = { "-refreshQueues" };
@@ -281,7 +288,7 @@ public class TestRMAdminCLI {
               "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup" +
               " [username]] [[-addToClusterNodeLabels [label1,label2,label3]]" +
               " [-removeFromClusterNodeLabels [label1,label2,label3]] [-replaceLabelsOnNode " +
-              "[node1:port,label1,label2 node2:port,label1] [-directlyAccessNodeLabelStore]] " +
+              "[node1[:port]=label1,label2 node2[:port]=label1] [-directlyAccessNodeLabelStore]] " +
               "[-help [cmd]]"));
       assertTrue(dataOut
           .toString()
@@ -361,7 +368,7 @@ public class TestRMAdminCLI {
               "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup" +
               " [username]] [[-addToClusterNodeLabels [label1,label2,label3]]" +
               " [-removeFromClusterNodeLabels [label1,label2,label3]] [-replaceLabelsOnNode " +
-              "[node1:port,label1,label2 node2:port,label1] [-directlyAccessNodeLabelStore]] " +
+              "[node1[:port]=label1,label2 node2[:port]=label1] [-directlyAccessNodeLabelStore]] " +
               "[-transitionToActive [--forceactive] <serviceId>] " + 
               "[-transitionToStandby <serviceId>] [-failover" +
               " [--forcefence] [--forceactive] <serviceId> <serviceId>] " +
@@ -501,24 +508,29 @@ public class TestRMAdminCLI {
   @Test
   public void testReplaceLabelsOnNode() throws Exception {
     // Successfully replace labels
-    dummyNodeLabelsManager.addToCluserNodeLabels(ImmutableSet.of("x", "Y"));
+    dummyNodeLabelsManager
+        .addToCluserNodeLabels(ImmutableSet.of("x", "y", "Y"));
     String[] args =
-        { "-replaceLabelsOnNode", "node1,x,Y node2,Y",
+        { "-replaceLabelsOnNode",
+            "node1:8000,x,y node2:8000=y node3,x,Y node4=Y",
             "-directlyAccessNodeLabelStore" };
     assertEquals(0, rmAdminCLI.run(args));
     assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey(
-        NodeId.newInstance("node1", 0)));
+        NodeId.newInstance("node1", 8000)));
     assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey(
-        NodeId.newInstance("node2", 0)));
-    
+        NodeId.newInstance("node2", 8000)));
+    assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey(
+        NodeId.newInstance("node3", 0)));
+    assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey(
+        NodeId.newInstance("node4", 0)));
+
     // no labels, should fail
     args = new String[] { "-replaceLabelsOnNode" };
     assertTrue(0 != rmAdminCLI.run(args));
-    
+
     // no labels, should fail
     args =
-        new String[] { "-replaceLabelsOnNode",
-            "-directlyAccessNodeLabelStore" };
+        new String[] { "-replaceLabelsOnNode", "-directlyAccessNodeLabelStore" };
     assertTrue(0 != rmAdminCLI.run(args));
 
     // no labels, should fail
@@ -529,20 +541,6 @@ public class TestRMAdminCLI {
     assertTrue(0 != rmAdminCLI.run(args));
   }
 
-  @Test
-  public void testReplaceLabelsOnNodeWithPort() throws Exception {
-    // Successfully replace labels
-    dummyNodeLabelsManager.addToCluserNodeLabels(ImmutableSet.of("x", "y"));
-    String[] args =
-      { "-replaceLabelsOnNode", "node1:8000,x,y node2:8000,y",
-      "-directlyAccessNodeLabelStore" };
-    assertEquals(0, rmAdminCLI.run(args));
-    assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey(
-        NodeId.newInstance("node1", 8000)));
-    assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey(
-        NodeId.newInstance("node2", 8000)));
-  }
-
   private void testError(String[] args, String template,
       ByteArrayOutputStream data, int resultCode) throws Exception {
     int actualResultCode = rmAdminCLI.run(args);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c994cbc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java
index 242f59c..0ab1115 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java
@@ -333,23 +333,32 @@ public class TestCommonNodeLabelsManager extends NodeLabelTestBase {
   public void testNodeLabelsDisabled() throws IOException {
     DummyCommonNodeLabelsManager mgr = new DummyCommonNodeLabelsManager();
     Configuration conf = new YarnConfiguration();
-    conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
+    conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, false);
     mgr.init(conf);
     mgr.start();
+    boolean caught = false;
     
     // add labels
     try {
       mgr.addToCluserNodeLabels(ImmutableSet.of("x"));
     } catch (IOException e) {
       assertNodeLabelsDisabledErrorMessage(e);
+      caught = true;
     }
+    // check exception caught
+    Assert.assertTrue(caught);
+    caught = false;
     
     // remove labels
     try {
       mgr.removeFromClusterNodeLabels(ImmutableSet.of("x"));
     } catch (IOException e) {
       assertNodeLabelsDisabledErrorMessage(e);
+      caught = true;
     }
+    // check exception caught
+    Assert.assertTrue(caught);
+    caught = false;
     
     // add labels to node
     try {
@@ -357,7 +366,11 @@ public class TestCommonNodeLabelsManager extends NodeLabelTestBase {
           CommonNodeLabelsManager.EMPTY_STRING_SET));
     } catch (IOException e) {
       assertNodeLabelsDisabledErrorMessage(e);
+      caught = true;
     }
+    // check exception caught
+    Assert.assertTrue(caught);
+    caught = false;
     
     // remove labels from node
     try {
@@ -365,7 +378,11 @@ public class TestCommonNodeLabelsManager extends NodeLabelTestBase {
           CommonNodeLabelsManager.EMPTY_STRING_SET));
     } catch (IOException e) {
       assertNodeLabelsDisabledErrorMessage(e);
+      caught = true;
     }
+    // check exception caught
+    Assert.assertTrue(caught);
+    caught = false;
     
     // replace labels on node
     try {
@@ -373,7 +390,11 @@ public class TestCommonNodeLabelsManager extends NodeLabelTestBase {
           CommonNodeLabelsManager.EMPTY_STRING_SET));
     } catch (IOException e) {
       assertNodeLabelsDisabledErrorMessage(e);
+      caught = true;
     }
+    // check exception caught
+    Assert.assertTrue(caught);
+    caught = false;
     
     mgr.close();
   }


[12/34] hadoop git commit: HDFS-7683. Combine usages and percent stats in NameNode UI. Contributed by Vinayakumar B.

Posted by zh...@apache.org.
HDFS-7683. Combine usages and percent stats in NameNode UI. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2323609b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2323609b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2323609b

Branch: refs/heads/HDFS-EC
Commit: 2323609bd9e688acfcf82acbb9e0270ce2582c13
Parents: 803dd11
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Jan 27 11:09:29 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:24 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                 | 3 +++
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html        | 9 +++------
 2 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2323609b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5189eab..1e1af97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -549,6 +549,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7224. Allow reuse of NN connections via webhdfs (Eric Payne via
     kihwal)
 
+    HDFS-7683. Combine usages and percent stats in NameNode UI.
+    (Vinayakumar B via wheat9)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2323609b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 4971e90..9c83f3a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -153,13 +153,10 @@
 {#nn}
 <table class="table table-bordered table-striped">
   <tr><th> Configured Capacity:</th><td>{Total|fmt_bytes}</td></tr>
-  <tr><th> DFS Used:</th><td>{Used|fmt_bytes}</td></tr>
+  <tr><th> DFS Used:</th><td>{Used|fmt_bytes} ({PercentUsed|fmt_percentage})</td></tr>
   <tr><th> Non DFS Used:</th><td>{NonDfsUsedSpace|fmt_bytes}</td></tr>
-  <tr><th> DFS Remaining:</th><td>{Free|fmt_bytes}</td></tr>
-  <tr><th> DFS Used%:</th><td>{PercentUsed|fmt_percentage}</td></tr>
-  <tr><th> DFS Remaining%:</th><td>{PercentRemaining|fmt_percentage}</td></tr>
-  <tr><th> Block Pool Used:</th><td>{BlockPoolUsedSpace|fmt_bytes}</td></tr>
-  <tr><th> Block Pool Used%:</th><td>{PercentBlockPoolUsed|fmt_percentage}</td></tr>
+  <tr><th> DFS Remaining:</th><td>{Free|fmt_bytes} ({PercentRemaining|fmt_percentage})</td></tr>
+  <tr><th> Block Pool Used:</th><td>{BlockPoolUsedSpace|fmt_bytes} ({PercentBlockPoolUsed|fmt_percentage})</td></tr>
   <tr><th> DataNodes usages% (Min/Median/Max/stdDev): </th>
 	<td>{#NodeUsage.nodeUsage}{min} / {median} / {max} / {stdDev}{/NodeUsage.nodeUsage}</td></tr>
 {/nn}


[19/34] hadoop git commit: YARN-2932. Add entry for preemptable status (enabled/disabled) to scheduler web UI and queue initialize/refresh logging. (Eric Payne via wangda)

Posted by zh...@apache.org.
YARN-2932. Add entry for preemptable status (enabled/disabled) to scheduler web UI and queue initialize/refresh logging. (Eric Payne via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c690f6a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c690f6a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c690f6a8

Branch: refs/heads/HDFS-EC
Commit: c690f6a86359a8177ed5a517f1fb46a7c7e57a32
Parents: c994cbc
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Jan 27 15:36:09 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:25 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  4 ++
 .../ProportionalCapacityPreemptionPolicy.java   | 20 ++----
 .../scheduler/capacity/AbstractCSQueue.java     | 52 +++++++++++++--
 .../scheduler/capacity/CSQueue.java             |  6 ++
 .../CapacitySchedulerConfiguration.java         | 31 +++++++++
 .../scheduler/capacity/LeafQueue.java           |  3 +-
 .../webapp/CapacitySchedulerPage.java           |  3 +-
 .../dao/CapacitySchedulerLeafQueueInfo.java     |  6 ++
 ...estProportionalCapacityPreemptionPolicy.java | 68 +++++++++++---------
 .../capacity/TestCapacityScheduler.java         | 53 +++++++++++++++
 .../webapp/TestRMWebServicesCapacitySched.java  |  2 +-
 11 files changed, 196 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c690f6a8/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2752824..225e126 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -215,6 +215,10 @@ Release 2.7.0 - UNRELEASED
     YARN-3028. Better syntax for replaceLabelsOnNode in RMAdmin CLI
     (Rohith Sharmaks via wangda)
 
+    YARN-2932. Add entry for "preemptable" status (enabled/disabled) to 
+    scheduler web UI and queue initialize/refresh logging. 
+    (Eric Payne via wangda)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c690f6a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 0743f60..738f527 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -116,9 +116,6 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
   public static final String NATURAL_TERMINATION_FACTOR =
       "yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor";
 
-  public static final String BASE_YARN_RM_PREEMPTION = "yarn.scheduler.capacity.";
-  public static final String SUFFIX_DISABLE_PREEMPTION = ".disable_preemption";
-
   // the dispatcher to send preempt and kill events
   public EventHandler<ContainerPreemptEvent> dispatcher;
 
@@ -227,7 +224,7 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
     // extract a summary of the queues from scheduler
     TempQueue tRoot;
     synchronized (scheduler) {
-      tRoot = cloneQueues(root, clusterResources, false);
+      tRoot = cloneQueues(root, clusterResources);
     }
 
     // compute the ideal distribution of resources among queues
@@ -728,11 +725,9 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
    *
    * @param root the root of the CapacityScheduler queue hierarchy
    * @param clusterResources the total amount of resources in the cluster
-   * @param parentDisablePreempt true if disable preemption is set for parent
    * @return the root of the cloned queue hierarchy
    */
-  private TempQueue cloneQueues(CSQueue root, Resource clusterResources,
-      boolean parentDisablePreempt) {
+  private TempQueue cloneQueues(CSQueue root, Resource clusterResources) {
     TempQueue ret;
     synchronized (root) {
       String queueName = root.getQueueName();
@@ -744,12 +739,6 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
       Resource guaranteed = Resources.multiply(clusterResources, absCap);
       Resource maxCapacity = Resources.multiply(clusterResources, absMaxCap);
 
-      boolean queueDisablePreemption = false;
-      String queuePropName = BASE_YARN_RM_PREEMPTION + root.getQueuePath()
-                               + SUFFIX_DISABLE_PREEMPTION;
-      queueDisablePreemption = scheduler.getConfiguration()
-                              .getBoolean(queuePropName, parentDisablePreempt);
-
       Resource extra = Resource.newInstance(0, 0);
       if (Resources.greaterThan(rc, clusterResources, current, guaranteed)) {
         extra = Resources.subtract(current, guaranteed);
@@ -759,7 +748,7 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
         Resource pending = l.getTotalResourcePending();
         ret = new TempQueue(queueName, current, pending, guaranteed,
             maxCapacity);
-        if (queueDisablePreemption) {
+        if (root.getPreemptionDisabled()) {
           ret.untouchableExtra = extra;
         } else {
           ret.preemptableExtra = extra;
@@ -771,8 +760,7 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
             maxCapacity);
         Resource childrensPreemptable = Resource.newInstance(0, 0);
         for (CSQueue c : root.getChildQueues()) {
-          TempQueue subq =
-                cloneQueues(c, clusterResources, queueDisablePreemption);
+          TempQueue subq = cloneQueues(c, clusterResources);
           Resources.addTo(childrensPreemptable, subq.preemptableExtra);
           ret.addChild(subq);
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c690f6a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index fec3a56..5ac6058 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
@@ -38,14 +39,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
-
 import com.google.common.collect.Sets;
 
 public abstract class AbstractCSQueue implements CSQueue {
   
   CSQueue parent;
   final String queueName;
-  
   float capacity;
   float maximumCapacity;
   float absoluteCapacity;
@@ -74,10 +73,12 @@ public abstract class AbstractCSQueue implements CSQueue {
   Map<QueueACL, AccessControlList> acls = 
       new HashMap<QueueACL, AccessControlList>();
   boolean reservationsContinueLooking;
-  
+  private boolean preemptionDisabled;
+
   private final RecordFactory recordFactory = 
       RecordFactoryProvider.getRecordFactory(null);
-  
+  private CapacitySchedulerContext csContext;
+
   public AbstractCSQueue(CapacitySchedulerContext cs, 
       String queueName, CSQueue parent, CSQueue old) throws IOException {
     this.minimumAllocation = cs.getMinimumResourceCapability();
@@ -120,6 +121,8 @@ public abstract class AbstractCSQueue implements CSQueue {
     maxCapacityByNodeLabels =
         cs.getConfiguration().getMaximumNodeLabelCapacities(getQueuePath(),
             accessibleLabels, labelManager);
+
+    this.csContext = cs;
   }
   
   @Override
@@ -318,6 +321,8 @@ public abstract class AbstractCSQueue implements CSQueue {
         absoluteCapacityByNodeLabels, absoluteCapacityByNodeLabels);
     
     this.reservationsContinueLooking = reservationContinueLooking;
+
+    this.preemptionDisabled = isQueueHierarchyPreemptionDisabled(this);
   }
   
   protected QueueInfo getQueueInfo() {
@@ -454,4 +459,43 @@ public abstract class AbstractCSQueue implements CSQueue {
   public Resource getUsedResourceByLabel(String nodeLabel) {
     return usedResourcesByNodeLabels.get(nodeLabel);
   }
+
+  @Private
+  public boolean getPreemptionDisabled() {
+    return preemptionDisabled;
+  }
+
+  /**
+   * The specified queue is preemptable if system-wide preemption is turned on
+   * unless any queue in the <em>qPath</em> hierarchy has explicitly turned
+   * preemption off.
+   * NOTE: Preemptability is inherited from a queue's parent.
+   * 
+   * @return true if queue has preemption disabled, false otherwise
+   */
+  private boolean isQueueHierarchyPreemptionDisabled(CSQueue q) {
+    CapacitySchedulerConfiguration csConf = csContext.getConfiguration();
+    boolean systemWidePreemption =
+        csConf.getBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS,
+                       YarnConfiguration.DEFAULT_RM_SCHEDULER_ENABLE_MONITORS);
+    CSQueue parentQ = q.getParent();
+
+    // If the system-wide preemption switch is turned off, all of the queues in
+    // the qPath hierarchy have preemption disabled, so return true.
+    if (!systemWidePreemption) return true;
+
+    // If q is the root queue and the system-wide preemption switch is turned
+    // on, then q does not have preemption disabled (default=false, below)
+    // unless the preemption_disabled property is explicitly set.
+    if (parentQ == null) {
+      return csConf.getPreemptionDisabled(q.getQueuePath(), false);
+    }
+
+    // If this is not the root queue, inherit the default value for the
+    // preemption_disabled property from the parent. Preemptability will be
+    // inherited from the parent's hierarchy unless explicitly overridden at
+    // this level.
+    return csConf.getPreemptionDisabled(q.getQueuePath(),
+                                        parentQ.getPreemptionDisabled());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c690f6a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
index 07a7e0e..46ee93c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
@@ -300,4 +300,10 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
    * @return capacity by node label
    */
   public float getCapacityByNodeLabel(String nodeLabel);
+
+  /**
+   * Check whether <em>disable_preemption</em> property is set for this queue
+   * @return true if <em>disable_preemption</em> is set, false if not
+   */
+  public boolean getPreemptionDisabled();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c690f6a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 5bbb436..55c6c0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -181,6 +181,9 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
   public static final boolean DEFAULT_ENABLE_QUEUE_MAPPING_OVERRIDE = false;
 
   @Private
+  public static final String QUEUE_PREEMPTION_DISABLED = "disable_preemption";
+
+  @Private
   public static class QueueMapping {
 
     public enum MappingType {
@@ -802,4 +805,32 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
             DEFAULT_RESERVATION_ENFORCEMENT_WINDOW);
     return enforcementWindow;
   }
+
+  /**
+   * Sets the <em>disable_preemption</em> property in order to indicate
+   * whether or not container preemption will be disabled for the specified
+   * queue.
+   * 
+   * @param queue queue path
+   * @param preemptionDisabled true if preemption is disabled on queue
+   */
+  public void setPreemptionDisabled(String queue, boolean preemptionDisabled) {
+    setBoolean(getQueuePrefix(queue) + QUEUE_PREEMPTION_DISABLED,
+               preemptionDisabled); 
+  }
+
+  /**
+   * Indicates whether preemption is disabled on the specified queue.
+   * 
+   * @param queue queue path to query
+   * @param defaultVal used as default if the <em>disable_preemption</em>
+   * is not set in the configuration
+   * @return true if preemption is disabled on <em>queue</em>, false otherwise
+   */
+  public boolean getPreemptionDisabled(String queue, boolean defaultVal) {
+    boolean preemptionDisabled =
+        getBoolean(getQueuePrefix(queue) + QUEUE_PREEMPTION_DISABLED,
+                   defaultVal);
+    return preemptionDisabled;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c690f6a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index fd8a7ee..9ae7e60 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -296,7 +296,8 @@ public class LeafQueue extends AbstractCSQueue {
         "labels=" + labelStrBuilder.toString() + "\n" +
         "nodeLocalityDelay = " +  nodeLocalityDelay + "\n" +
         "reservationsContinueLooking = " +
-        reservationsContinueLooking + "\n");
+        reservationsContinueLooking + "\n" +
+        "preemptionDisabled = " + getPreemptionDisabled() + "\n");
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c690f6a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 1f65b88..83df72b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -121,7 +121,8 @@ class CapacitySchedulerPage extends RmView {
           _("Configured Minimum User Limit Percent:", Integer.toString(lqinfo.getUserLimit()) + "%").
           _("Configured User Limit Factor:", String.format("%.1f", lqinfo.getUserLimitFactor())).
           _r("Active Users: ", activeUserList.toString()).
-          _("Accessible Node Labels:", StringUtils.join(",", lqinfo.getNodeLabels()));
+          _("Accessible Node Labels:", StringUtils.join(",", lqinfo.getNodeLabels())).
+          _("Preemption:", lqinfo.getPreemptionDisabled() ? "disabled" : "enabled");
 
       html._(InfoBlock.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c690f6a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
index bb4c749..a8b0d32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
@@ -37,6 +37,7 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
   protected float userLimitFactor;
   protected ResourceInfo aMResourceLimit;
   protected ResourceInfo userAMResourceLimit;
+  protected boolean preemptionDisabled;
 
   CapacitySchedulerLeafQueueInfo() {
   };
@@ -53,6 +54,7 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
     userLimitFactor = q.getUserLimitFactor();
     aMResourceLimit = new ResourceInfo(q.getAMResourceLimit());
     userAMResourceLimit = new ResourceInfo(q.getUserAMResourceLimit());
+    preemptionDisabled = q.getPreemptionDisabled();
   }
 
   public int getNumActiveApplications() {
@@ -95,4 +97,8 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
   public ResourceInfo getUserAMResourceLimit() {
     return userAMResourceLimit; 
   }
+
+  public boolean getPreemptionDisabled() {
+    return preemptionDisabled;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c690f6a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
index 0a147f4..696b9bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
@@ -17,12 +17,10 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
-import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.BASE_YARN_RM_PREEMPTION;
 import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.MAX_IGNORED_OVER_CAPACITY;
 import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.MONITORING_INTERVAL;
 import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.NATURAL_TERMINATION_FACTOR;
 import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.OBSERVE_ONLY;
-import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.SUFFIX_DISABLE_PREEMPTION;
 import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.TOTAL_PREEMPTION_PER_ROUND;
 import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.WAIT_TIME_BEFORE_KILL;
 import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEventType.KILL_CONTAINER;
@@ -52,6 +50,7 @@ import java.util.Map;
 import java.util.NavigableSet;
 import java.util.Random;
 import java.util.Set;
+import java.util.StringTokenizer;
 import java.util.TreeSet;
 
 import org.apache.commons.collections.map.HashedMap;
@@ -322,24 +321,22 @@ public class TestProportionalCapacityPreemptionPolicy {
         {   3,   0,   0,  0 },  // subqueues
       };
 
-    schedConf.setBoolean(BASE_YARN_RM_PREEMPTION
-        + "root.queueB" + SUFFIX_DISABLE_PREEMPTION, true);
+    schedConf.setPreemptionDisabled("root.queueB", true);
 
     ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
     policy.editSchedule();
-    // With PREEMPTION_DISABLED set for queueB, get resources from queueC
+    // Since queueB is not preemptable, get resources from queueC
     verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appC)));
     verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appB)));
 
-    // With no PREEMPTION_DISABLED set for queueB, resources will be preempted
-    // from both queueB and queueC. Test must be reset for so that the mDisp
+    // Since queueB is preemptable, resources will be preempted
+    // from both queueB and queueC. Test must be reset so that the mDisp
     // event handler will count only events from the following test and not the
     // previous one.
     setup();
+    schedConf.setPreemptionDisabled("root.queueB", false);
     ProportionalCapacityPreemptionPolicy policy2 = buildPolicy(qData);
-    
-    schedConf.setBoolean(BASE_YARN_RM_PREEMPTION
-        + "root.queueB" + SUFFIX_DISABLE_PREEMPTION, false);
+
     policy2.editSchedule();
 
     verify(mDisp, times(4)).handle(argThat(new IsPreemptionRequestFor(appB)));
@@ -375,9 +372,8 @@ public class TestProportionalCapacityPreemptionPolicy {
 
     // Need to call setup() again to reset mDisp
     setup();
-    // Disable preemption for queueB and it's children
-    schedConf.setBoolean(BASE_YARN_RM_PREEMPTION
-        + "root.queueA.queueB" + SUFFIX_DISABLE_PREEMPTION, true);
+    // Turn off preemption for queueB and it's children
+    schedConf.setPreemptionDisabled("root.queueA.queueB", true);
     ProportionalCapacityPreemptionPolicy policy2 = buildPolicy(qData);
     policy2.editSchedule();
     ApplicationAttemptId expectedAttemptOnQueueC = 
@@ -423,9 +419,8 @@ public class TestProportionalCapacityPreemptionPolicy {
 
     // Need to call setup() again to reset mDisp
     setup();
-    // Disable preemption for queueB(appA)
-    schedConf.setBoolean(BASE_YARN_RM_PREEMPTION
-        + "root.queueA.queueB" + SUFFIX_DISABLE_PREEMPTION, true);
+    // Turn off preemption for queueB(appA)
+    schedConf.setPreemptionDisabled("root.queueA.queueB", true);
     ProportionalCapacityPreemptionPolicy policy2 = buildPolicy(qData);
     policy2.editSchedule();
     // Now that queueB(appA) is not preemptable, verify that resources come
@@ -434,11 +429,9 @@ public class TestProportionalCapacityPreemptionPolicy {
     verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA)));
 
     setup();
-    // Disable preemption for two of the 3 queues with over-capacity.
-    schedConf.setBoolean(BASE_YARN_RM_PREEMPTION
-        + "root.queueD.queueE" + SUFFIX_DISABLE_PREEMPTION, true);
-    schedConf.setBoolean(BASE_YARN_RM_PREEMPTION
-        + "root.queueA.queueB" + SUFFIX_DISABLE_PREEMPTION, true);
+    // Turn off preemption for two of the 3 queues with over-capacity.
+    schedConf.setPreemptionDisabled("root.queueD.queueE", true);
+    schedConf.setPreemptionDisabled("root.queueA.queueB", true);
     ProportionalCapacityPreemptionPolicy policy3 = buildPolicy(qData);
     policy3.editSchedule();
 
@@ -476,11 +469,10 @@ public class TestProportionalCapacityPreemptionPolicy {
     verify(mDisp, times(16)).handle(argThat(new IsPreemptionRequestFor(appA)));
     verify(mDisp, times(182)).handle(argThat(new IsPreemptionRequestFor(appB)));
 
-    // Disable preemption for queueA and it's children. queueF(appC)'s request
+    // Turn off preemption for queueA and it's children. queueF(appC)'s request
     // should starve.
     setup(); // Call setup() to reset mDisp
-    schedConf.setBoolean(BASE_YARN_RM_PREEMPTION
-        + "root.queueA" + SUFFIX_DISABLE_PREEMPTION, true);
+    schedConf.setPreemptionDisabled("root.queueA", true);
     ProportionalCapacityPreemptionPolicy policy2 = buildPolicy(qData);
     policy2.editSchedule();
     verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA))); // queueC
@@ -504,8 +496,7 @@ public class TestProportionalCapacityPreemptionPolicy {
       {   -1,   -1,    1,    1,    1,   -1,    1,    1,    1 },  // req granularity
       {    2,    3,    0,    0,    0,    3,    0,    0,    0 },  // subqueues
     };
-    schedConf.setBoolean(BASE_YARN_RM_PREEMPTION
-        + "root.queueA.queueC" + SUFFIX_DISABLE_PREEMPTION, true);
+    schedConf.setPreemptionDisabled("root.queueA.queueC", true);
     ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
     policy.editSchedule();
     // Although queueC(appB) is way over capacity and is untouchable,
@@ -529,9 +520,8 @@ public class TestProportionalCapacityPreemptionPolicy {
         {   3,   2,   0,   0,   2,   0,   0,   2,   0,   0 },  // subqueues
    };
 
+    schedConf.setPreemptionDisabled("root", true);
     ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
-    schedConf.setBoolean(BASE_YARN_RM_PREEMPTION
-        + "root" + SUFFIX_DISABLE_PREEMPTION, true);
     policy.editSchedule();
     // All queues should be non-preemptable, so request should starve.
     verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appB))); // queueC
@@ -893,7 +883,7 @@ public class TestProportionalCapacityPreemptionPolicy {
     verify(mDisp, times(4)).handle(argThat(new IsPreemptionRequestFor(appA)));
     setAMContainer = false;
   }
-  
+
   static class IsPreemptionRequestFor
       extends ArgumentMatcher<ContainerPreemptEvent> {
     private final ApplicationAttemptId appAttId;
@@ -952,6 +942,8 @@ public class TestProportionalCapacityPreemptionPolicy {
     when(root.getAbsoluteCapacity()).thenReturn(abs[0] / tot);
     when(root.getAbsoluteMaximumCapacity()).thenReturn(maxCap[0] / tot);
     when(root.getQueuePath()).thenReturn("root");
+    boolean preemptionDisabled = mockPreemptionStatus("root");
+    when(root.getPreemptionDisabled()).thenReturn(preemptionDisabled);
 
     for (int i = 1; i < queues.length; ++i) {
       final CSQueue q;
@@ -971,11 +963,29 @@ public class TestProportionalCapacityPreemptionPolicy {
       parentPathName = (parentPathName == null) ? "root" : parentPathName;
       String queuePathName = (parentPathName+"."+queueName).replace("/","root");
       when(q.getQueuePath()).thenReturn(queuePathName);
+      preemptionDisabled = mockPreemptionStatus(queuePathName);
+      when(q.getPreemptionDisabled()).thenReturn(preemptionDisabled);
     }
     assert 0 == pqs.size();
     return root;
   }
 
+  // Determine if any of the elements in the queupath have preemption disabled.
+  // Also must handle the case where preemption disabled property is explicitly
+  // set to something other than the default. Assumes system-wide preemption
+  // property is true.
+  private boolean mockPreemptionStatus(String queuePathName) {
+    boolean preemptionDisabled = false;
+    StringTokenizer tokenizer = new StringTokenizer(queuePathName, ".");
+    String qName = "";
+    while(tokenizer.hasMoreTokens()) {
+      qName += tokenizer.nextToken();
+      preemptionDisabled = schedConf.getPreemptionDisabled(qName, preemptionDisabled);
+      qName += ".";
+    }
+    return preemptionDisabled;
+  }
+
   ParentQueue mockParentQueue(ParentQueue p, int subqueues,
       Deque<ParentQueue> pqs) {
     ParentQueue pq = mock(ParentQueue.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c690f6a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 85696a1..b6da94d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
@@ -2071,4 +2072,56 @@ public class TestCapacityScheduler {
     Assert.assertEquals(0, report.getNumReservedContainers());
     rm.stop();
   }
+
+  @Test
+  public void testPreemptionDisabled() throws Exception {
+    CapacityScheduler cs = new CapacityScheduler();
+    CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
+    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
+    RMContextImpl rmContext =  new RMContextImpl(null, null, null, null, null,
+        null, new RMContainerTokenSecretManager(conf),
+        new NMTokenSecretManagerInRM(conf),
+        new ClientToAMTokenSecretManagerInRM(), null);
+    setupQueueConfiguration(conf);
+    cs.setConf(new YarnConfiguration());
+    cs.setRMContext(resourceManager.getRMContext());
+    cs.init(conf);
+    cs.start();
+    cs.reinitialize(conf, rmContext);
+
+    CSQueue rootQueue = cs.getRootQueue();
+    CSQueue queueB = findQueue(rootQueue, B);
+    CSQueue queueB2 = findQueue(queueB, B2);
+
+    // When preemption turned on for the whole system
+    // (yarn.resourcemanager.scheduler.monitor.enable=true), and with no other 
+    // preemption properties set, queue root.b.b2 should be preemptable.
+    assertFalse("queue " + B2 + " should default to preemptable",
+               queueB2.getPreemptionDisabled());
+
+    // Disable preemption at the root queue level.
+    // The preemption property should be inherited from root all the
+    // way down so that root.b.b2 should NOT be preemptable.
+    conf.setPreemptionDisabled(rootQueue.getQueuePath(), true);
+    cs.reinitialize(conf, rmContext);
+    assertTrue(
+        "queue " + B2 + " should have inherited non-preemptability from root",
+        queueB2.getPreemptionDisabled());
+
+    // Enable preemption for root (grandparent) but disable for root.b (parent).
+    // root.b.b2 should inherit property from parent and NOT be preemptable
+    conf.setPreemptionDisabled(rootQueue.getQueuePath(), false);
+    conf.setPreemptionDisabled(queueB.getQueuePath(), true);
+    cs.reinitialize(conf, rmContext);
+    assertTrue(
+        "queue " + B2 + " should have inherited non-preemptability from parent",
+        queueB2.getPreemptionDisabled());
+
+    // When preemption is turned on for root.b.b2, it should be preemptable
+    // even though preemption is disabled on root.b (parent).
+    conf.setPreemptionDisabled(queueB2.getQueuePath(), false);
+    cs.reinitialize(conf, rmContext);
+    assertFalse("queue " + B2 + " should have been preemptable",
+        queueB2.getPreemptionDisabled());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c690f6a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index ef7435a..94040b5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -347,7 +347,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     int numExpectedElements = 13;
     boolean isParentQueue = true;
     if (!info.has("queues")) {
-      numExpectedElements = 23;
+      numExpectedElements = 24;
       isParentQueue = false;
     }
     assertEquals("incorrect number of elements", numExpectedElements, info.length());


[33/34] hadoop git commit: HADOOP-10525. Remove DRFA.MaxBackupIndex config from log4j.properties (aajisaka)

Posted by zh...@apache.org.
HADOOP-10525. Remove DRFA.MaxBackupIndex config from log4j.properties (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/899b5e16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/899b5e16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/899b5e16

Branch: refs/heads/HDFS-EC
Commit: 899b5e166f1ae94e7bcd47c0efea5bde811b926d
Parents: 1ca84c1
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Jan 29 16:13:59 2015 +0900
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:27 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                  | 3 +++
 .../hadoop-common/src/main/conf/log4j.properties                 | 4 +---
 2 files changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/899b5e16/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index f78de23..f0b4799 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -510,6 +510,9 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-11317. Increment SLF4J version to 1.7.10. (Tim Robertson via ozawa)
 
+    HADOOP-10525. Remove DRFA.MaxBackupIndex config from log4j.properties
+    (aajisaka)
+
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/899b5e16/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 5fa21fa..316c48e 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -54,11 +54,9 @@ log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
 log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
 
-# Rollver at midnight
+# Rollover at midnight
 log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
 
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
 log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
 
 # Pattern format: Date LogLevel LoggerName LogMessage


[28/34] hadoop git commit: HADOOP-10574. Bump the maven plugin versions too -moving the numbers into properties. (aajisaka)

Posted by zh...@apache.org.
HADOOP-10574. Bump the maven plugin versions too -moving the numbers into properties. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a64335ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a64335ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a64335ee

Branch: refs/heads/HDFS-EC
Commit: a64335eebdfb08d0454ae9b7940229e09a01b067
Parents: dd72a41
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri Jan 30 01:47:59 2015 +0900
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:27 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 hadoop-project/pom.xml                          | 43 +++++++++++++-------
 pom.xml                                         | 40 ++++++++++++------
 3 files changed, 59 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a64335ee/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index f0b4799..6550135 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -513,6 +513,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-10525. Remove DRFA.MaxBackupIndex config from log4j.properties
     (aajisaka)
 
+    HADOOP-10574. Bump the maven plugin versions too -moving the numbers into
+    properties. (aajisaka)
+
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a64335ee/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index a13168d..a77c090 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -91,6 +91,21 @@
     <maven-surefire-plugin.version>2.17</maven-surefire-plugin.version>
     <maven-surefire-report-plugin.version>${maven-surefire-plugin.version}</maven-surefire-report-plugin.version>
     <maven-failsafe-plugin.version>${maven-surefire-plugin.version}</maven-failsafe-plugin.version>
+
+    <maven-clean-plugin.version>2.5</maven-clean-plugin.version>
+    <maven-compiler-plugin.version>3.1</maven-compiler-plugin.version>
+    <maven-install-plugin.version>2.5.1</maven-install-plugin.version>
+    <maven-resources-plugin.version>2.6</maven-resources-plugin.version>
+    <maven-jar-plugin.version>2.5</maven-jar-plugin.version>
+    <maven-war-plugin.version>2.4</maven-war-plugin.version>
+    <maven-source-plugin.version>2.3</maven-source-plugin.version>
+    <maven-checkstyle-plugin.version>2.12.1</maven-checkstyle-plugin.version>
+    <maven-project-info-reports-plugin.version>2.7</maven-project-info-reports-plugin.version>
+    <maven-pdf-plugin.version>1.2</maven-pdf-plugin.version>
+    <build-helper-maven-plugin.version>1.9</build-helper-maven-plugin.version>
+    <exec-maven-plugin.version>1.3.1</exec-maven-plugin.version>
+    <make-maven-plugin.version>1.0-beta-1</make-maven-plugin.version>
+    <native-maven-plugin.version>1.0-alpha-8</native-maven-plugin.version>
   </properties>
 
   <dependencyManagement>
@@ -933,12 +948,12 @@
       <plugins>
         <plugin>
           <artifactId>maven-clean-plugin</artifactId>
-          <version>2.4.1</version>
+          <version>${maven-clean-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-compiler-plugin</artifactId>
-          <version>2.5.1</version>
+          <version>${maven-compiler-plugin.version}</version>
           <configuration>
             <source>${javac.version}</source>
             <target>${javac.version}</target>
@@ -952,7 +967,7 @@
         <plugin>
           <groupId>org.codehaus.mojo</groupId>
           <artifactId>build-helper-maven-plugin</artifactId>
-          <version>1.5</version>
+          <version>${build-helper-maven-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
@@ -962,12 +977,12 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-install-plugin</artifactId>
-          <version>2.3.1</version>
+          <version>${maven-install-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-jar-plugin</artifactId>
-          <version>2.3.1</version>
+          <version>${maven-jar-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
@@ -977,7 +992,7 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-war-plugin</artifactId>
-          <version>2.1</version>
+          <version>${maven-war-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.codehaus.mojo</groupId>
@@ -987,22 +1002,22 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-checkstyle-plugin</artifactId>
-          <version>2.6</version>
+          <version>${maven-checkstyle-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.codehaus.mojo</groupId>
           <artifactId>native-maven-plugin</artifactId>
-          <version>1.0-alpha-7</version>
+          <version>${native-maven-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.codehaus.mojo</groupId>
           <artifactId>make-maven-plugin</artifactId>
-          <version>1.0-beta-1</version>
+          <version>${make-maven-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-source-plugin</artifactId>
-          <version>2.1.2</version>
+          <version>${maven-source-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.avro</groupId>
@@ -1012,22 +1027,22 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-project-info-reports-plugin</artifactId>
-          <version>2.4</version>
+          <version>${maven-project-info-reports-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-resources-plugin</artifactId>
-          <version>2.2</version>
+          <version>${maven-resources-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.codehaus.mojo</groupId>
           <artifactId>exec-maven-plugin</artifactId>
-          <version>1.2</version>
+          <version>${exec-maven-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-pdf-plugin</artifactId>
-          <version>1.1</version>
+          <version>${maven-pdf-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.hadoop</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a64335ee/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index e33865f..3bad969 100644
--- a/pom.xml
+++ b/pom.xml
@@ -89,6 +89,20 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
     <!-- platform encoding override -->
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
+
+    <!-- maven plugin versions -->
+    <maven-deploy-plugin.version>2.8.1</maven-deploy-plugin.version>
+    <maven-site-plugin.version>3.4</maven-site-plugin.version>
+    <maven-antrun-plugin.version>1.7</maven-antrun-plugin.version>
+    <maven-assembly-plugin.version>2.4</maven-assembly-plugin.version>
+    <maven-dependency-plugin.version>2.8</maven-dependency-plugin.version>
+    <maven-enforcer-plugin.version>1.3.1</maven-enforcer-plugin.version>
+    <maven-javadoc-plugin.version>2.9.1</maven-javadoc-plugin.version>
+    <apache-rat-plugin.version>0.10</apache-rat-plugin.version>
+    <wagon-ssh.version>1.0</wagon-ssh.version>
+    <maven-clover2-plugin.version>3.3.0</maven-clover2-plugin.version>
+    <maven-bundle-plugin.version>2.5.0</maven-bundle-plugin.version>
+    <lifecycle-mapping.version>1.0.0</lifecycle-mapping.version>
   </properties>
 
   <modules>
@@ -112,12 +126,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-dependency-plugin</artifactId>
-          <version>2.4</version>
+          <version>${maven-dependency-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-enforcer-plugin</artifactId>
-          <version>1.3.1</version>
+          <version>${maven-enforcer-plugin.version}</version>
           <configuration>
             <rules>
               <requireMavenVersion>
@@ -132,32 +146,32 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-assembly-plugin</artifactId>
-          <version>2.3</version>
+          <version>${maven-assembly-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-deploy-plugin</artifactId>
-          <version>2.5</version>
+          <version>${maven-deploy-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.rat</groupId>
           <artifactId>apache-rat-plugin</artifactId>
-          <version>0.7</version>
+          <version>${apache-rat-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-antrun-plugin</artifactId>
-          <version>1.7</version>
+          <version>${maven-antrun-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-site-plugin</artifactId>
-          <version>3.4</version>
+          <version>${maven-site-plugin.version}</version>
           <dependencies>
             <dependency><!-- add support for ssh/scp -->
               <groupId>org.apache.maven.wagon</groupId>
               <artifactId>wagon-ssh</artifactId>
-              <version>1.0</version>
+              <version>${wagon-ssh.version}</version>
             </dependency>
           </dependencies>
         </plugin>
@@ -166,7 +180,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
         <plugin>
           <groupId>org.eclipse.m2e</groupId>
           <artifactId>lifecycle-mapping</artifactId>
-          <version>1.0.0</version>
+          <version>${lifecycle-mapping.version}</version>
           <configuration>
             <lifecycleMappingMetadata>
               <pluginExecutions>
@@ -271,12 +285,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
         <plugin>
           <groupId>com.atlassian.maven.plugins</groupId>
           <artifactId>maven-clover2-plugin</artifactId>
-          <version>3.0.5</version>
+          <version>${maven-clover2-plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.felix</groupId>
           <artifactId>maven-bundle-plugin</artifactId>
-          <version>2.4.0</version>
+          <version>${maven-bundle-plugin.version}</version>
         </plugin>
       </plugins>
     </pluginManagement>
@@ -352,7 +366,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-javadoc-plugin</artifactId>
-        <version>2.8.1</version>
+        <version>${maven-javadoc-plugin.version}</version>
         <inherited>false</inherited>
         <reportSets>
           <reportSet>
@@ -415,7 +429,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-dependency-plugin</artifactId>
-        <version>2.4</version>
+        <version>${maven-dependency-plugin.version}</version>
         <reportSets>
           <reportSet>
             <reports>


[03/34] hadoop git commit: HDFS-49. MiniDFSCluster.stopDataNode will always shut down a node in the cluster if a matching name is not found. (stevel)

Posted by zh...@apache.org.
HDFS-49. MiniDFSCluster.stopDataNode will always shut down a node in the cluster if a matching name is not found. (stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b67115f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b67115f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b67115f

Branch: refs/heads/HDFS-EC
Commit: 6b67115f88ca72e732d47fd9103567302e2fcf5a
Parents: 21c74e6
Author: Steve Loughran <st...@apache.org>
Authored: Tue Jan 27 11:10:46 2015 +0000
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:23 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt              |  3 +++
 .../test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java | 11 ++++++++---
 2 files changed, 11 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b67115f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 39453d1..5189eab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -790,6 +790,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7676. Fix TestFileTruncate to avoid bug of HDFS-7611. (shv)
 
+    HDFS-49. MiniDFSCluster.stopDataNode will always shut down a node in
+    the cluster if a matching name is not found. (stevel)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b67115f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 7367ffb..8551263 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -1923,6 +1923,9 @@ public class MiniDFSCluster {
 
   /*
    * Shutdown a particular datanode
+   * @param i node index
+   * @return null if the node index is out of range, else the properties of the
+   * removed node
    */
   public synchronized DataNodeProperties stopDataNode(int i) {
     if (i < 0 || i >= dataNodes.size()) {
@@ -1941,18 +1944,20 @@ public class MiniDFSCluster {
 
   /*
    * Shutdown a datanode by name.
+   * @return the removed datanode or null if there was no match
    */
   public synchronized DataNodeProperties stopDataNode(String dnName) {
-    int i;
-    for (i = 0; i < dataNodes.size(); i++) {
+    int node = -1;
+    for (int i = 0; i < dataNodes.size(); i++) {
       DataNode dn = dataNodes.get(i).datanode;
       LOG.info("DN name=" + dnName + " found DN=" + dn +
           " with name=" + dn.getDisplayName());
       if (dnName.equals(dn.getDatanodeId().getXferAddr())) {
+        node = i;
         break;
       }
     }
-    return stopDataNode(i);
+    return stopDataNode(node);
   }
 
   /**


[13/34] hadoop git commit: YARN-3011. Possible IllegalArgumentException in ResourceLocalizationService might lead NM to crash. Contributed by Varun Saxena

Posted by zh...@apache.org.
YARN-3011. Possible IllegalArgumentException in ResourceLocalizationService might lead NM to crash. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec5b3071
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec5b3071
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec5b3071

Branch: refs/heads/HDFS-EC
Commit: ec5b307145b0762003f9d8a1ccb4b34fd1a658f0
Parents: 32548f4
Author: Jian He <ji...@apache.org>
Authored: Tue Jan 27 13:31:22 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:24 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../localizer/ResourceLocalizationService.java  | 10 +++++++++
 .../TestResourceLocalizationService.java        | 23 ++++++++++++++++++--
 3 files changed, 34 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec5b3071/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 52b3cab..054b394 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -418,6 +418,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3088. LinuxContainerExecutor.deleteAsUser can throw NPE if native
     executor returns an error (Eric Payne via jlowe)
 
+    YARN-3011. Possible IllegalArgumentException in ResourceLocalizationService
+    might lead NM to crash. (Varun Saxena via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec5b3071/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 2f4fa5e..8c84132 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -794,6 +794,13 @@ public class ResourceLocalizationService extends CompositeService
               .getResource().getRequest(), e.getMessage()));
             LOG.error("Local path for public localization is not found. "
                 + " May be disks failed.", e);
+          } catch (IllegalArgumentException ie) {
+            rsrc.unlock();
+            publicRsrc.handle(new ResourceFailedLocalizationEvent(request
+                .getResource().getRequest(), ie.getMessage()));
+            LOG.error("Local path for public localization is not found. "
+                + " Incorrect path. " + request.getResource().getRequest()
+                .getPath(), ie);
           } catch (RejectedExecutionException re) {
             rsrc.unlock();
             publicRsrc.handle(new ResourceFailedLocalizationEvent(request
@@ -1015,6 +1022,9 @@ public class ResourceLocalizationService extends CompositeService
         } catch (IOException e) {
           LOG.error("local path for PRIVATE localization could not be " +
             "found. Disks might have failed.", e);
+        } catch (IllegalArgumentException e) {
+          LOG.error("Inorrect path for PRIVATE localization."
+              + next.getResource().getFile(), e);
         } catch (URISyntaxException e) {
             //TODO fail? Already translated several times...
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec5b3071/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index 9ed18dd..30af5a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -1253,14 +1253,33 @@ public class TestResourceLocalizationService {
             user, appId);
       Assert.assertNull(tracker.getLocalizedResource(pubReq));
 
-      // test RejectedExecutionException
+      // test IllegalArgumentException
+      String name = Long.toHexString(r.nextLong());
+      URL url = getPath("/local/PRIVATE/" + name + "/");
+      final LocalResource rsrc =
+          BuilderUtils.newLocalResource(url, LocalResourceType.FILE,
+          LocalResourceVisibility.PUBLIC, r.nextInt(1024) + 1024L,
+          r.nextInt(1024) + 2048L, false);
+      final LocalResourceRequest pubReq1 = new LocalResourceRequest(rsrc);
+      Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req1 =
+          new HashMap<LocalResourceVisibility, 
+          Collection<LocalResourceRequest>>();
+      req1.put(LocalResourceVisibility.PUBLIC,
+          Collections.singletonList(pubReq1));
       Mockito
         .doCallRealMethod()
         .when(dirsHandlerSpy)
         .getLocalPathForWrite(isA(String.class), Mockito.anyLong(),
           Mockito.anyBoolean());
+      // send request
+      spyService.handle(new ContainerLocalizationRequestEvent(c, req1));
+      dispatcher.await();
+      tracker =
+          spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,
+          user, appId);
+      Assert.assertNull(tracker.getLocalizedResource(pubReq));
 
-      // shutdown the thread pool
+      // test RejectedExecutionException by shutting down the thread pool
       PublicLocalizer publicLocalizer = spyService.getPublicLocalizer();
       publicLocalizer.threadPool.shutdown();
 


[09/34] hadoop git commit: HDFS-3689. Add support for variable length block. Contributed by Jing Zhao.

Posted by zh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/32548f4b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
index 6e8078b..2a1b549 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <EDITS>
-  <EDITS_VERSION>-61</EDITS_VERSION>
+  <EDITS_VERSION>-62</EDITS_VERSION>
   <RECORD>
     <OPCODE>OP_START_LOG_SEGMENT</OPCODE>
     <DATA>
@@ -13,8 +13,8 @@
       <TXID>2</TXID>
       <DELEGATION_KEY>
         <KEY_ID>1</KEY_ID>
-        <EXPIRY_DATE>1421826999207</EXPIRY_DATE>
-        <KEY>ca9a0c8b240570b3</KEY>
+        <EXPIRY_DATE>1422569009939</EXPIRY_DATE>
+        <KEY>907cb34000041937</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -24,8 +24,8 @@
       <TXID>3</TXID>
       <DELEGATION_KEY>
         <KEY_ID>2</KEY_ID>
-        <EXPIRY_DATE>1421826999210</EXPIRY_DATE>
-        <KEY>833c25a6fb2b0a6f</KEY>
+        <EXPIRY_DATE>1422569009941</EXPIRY_DATE>
+        <KEY>178fa1bd83474b43</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -37,19 +37,19 @@
       <INODEID>16386</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135800328</MTIME>
-      <ATIME>1421135800328</ATIME>
+      <MTIME>1421877810832</MTIME>
+      <ATIME>1421877810832</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-986598042_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>9</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>6</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -60,60 +60,93 @@
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135800357</MTIME>
-      <ATIME>1421135800328</ATIME>
+      <MTIME>1421877810888</MTIME>
+      <ATIME>1421877810832</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <OVERWRITE>false</OVERWRITE>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_SET_STORAGE_POLICY</OPCODE>
+    <OPCODE>OP_APPEND</OPCODE>
     <DATA>
       <TXID>6</TXID>
       <PATH>/file_create</PATH>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-986598042_1</CLIENT_NAME>
+      <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
+      <NEWBLOCK>false</NEWBLOCK>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>8</RPC_CALLID>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_CLOSE</OPCODE>
+    <DATA>
+      <TXID>7</TXID>
+      <LENGTH>0</LENGTH>
+      <INODEID>0</INODEID>
+      <PATH>/file_create</PATH>
+      <REPLICATION>1</REPLICATION>
+      <MTIME>1421877810899</MTIME>
+      <ATIME>1421877810832</ATIME>
+      <BLOCKSIZE>512</BLOCKSIZE>
+      <CLIENT_NAME></CLIENT_NAME>
+      <CLIENT_MACHINE></CLIENT_MACHINE>
+      <OVERWRITE>false</OVERWRITE>
+      <PERMISSION_STATUS>
+        <USERNAME>jing</USERNAME>
+        <GROUPNAME>supergroup</GROUPNAME>
+        <MODE>420</MODE>
+      </PERMISSION_STATUS>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_SET_STORAGE_POLICY</OPCODE>
+    <DATA>
+      <TXID>8</TXID>
+      <PATH>/file_create</PATH>
       <POLICYID>7</POLICYID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_RENAME_OLD</OPCODE>
     <DATA>
-      <TXID>7</TXID>
+      <TXID>9</TXID>
       <LENGTH>0</LENGTH>
       <SRC>/file_create</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1421135800368</TIMESTAMP>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>12</RPC_CALLID>
+      <TIMESTAMP>1421877810907</TIMESTAMP>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>11</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_DELETE</OPCODE>
     <DATA>
-      <TXID>8</TXID>
+      <TXID>10</TXID>
       <LENGTH>0</LENGTH>
       <PATH>/file_moved</PATH>
-      <TIMESTAMP>1421135800377</TIMESTAMP>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>13</RPC_CALLID>
+      <TIMESTAMP>1421877810915</TIMESTAMP>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>12</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_MKDIR</OPCODE>
     <DATA>
-      <TXID>9</TXID>
+      <TXID>11</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>16387</INODEID>
       <PATH>/directory_mkdir</PATH>
-      <TIMESTAMP>1421135800394</TIMESTAMP>
+      <TIMESTAMP>1421877810923</TIMESTAMP>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>493</MODE>
       </PERMISSION_STATUS>
@@ -122,94 +155,94 @@
   <RECORD>
     <OPCODE>OP_ALLOW_SNAPSHOT</OPCODE>
     <DATA>
-      <TXID>10</TXID>
+      <TXID>12</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_DISALLOW_SNAPSHOT</OPCODE>
     <DATA>
-      <TXID>11</TXID>
+      <TXID>13</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ALLOW_SNAPSHOT</OPCODE>
     <DATA>
-      <TXID>12</TXID>
+      <TXID>14</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_CREATE_SNAPSHOT</OPCODE>
     <DATA>
-      <TXID>13</TXID>
+      <TXID>15</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>18</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>17</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_RENAME_SNAPSHOT</OPCODE>
     <DATA>
-      <TXID>14</TXID>
+      <TXID>16</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
       <SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>19</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>18</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_DELETE_SNAPSHOT</OPCODE>
     <DATA>
-      <TXID>15</TXID>
+      <TXID>17</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>20</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>19</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD</OPCODE>
     <DATA>
-      <TXID>16</TXID>
+      <TXID>18</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>16388</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135800442</MTIME>
-      <ATIME>1421135800442</ATIME>
+      <MTIME>1421877810946</MTIME>
+      <ATIME>1421877810946</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-986598042_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>21</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>20</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>17</TXID>
+      <TXID>19</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135800445</MTIME>
-      <ATIME>1421135800442</ATIME>
+      <MTIME>1421877810948</MTIME>
+      <ATIME>1421877810946</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <OVERWRITE>false</OVERWRITE>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -218,7 +251,7 @@
   <RECORD>
     <OPCODE>OP_SET_REPLICATION</OPCODE>
     <DATA>
-      <TXID>18</TXID>
+      <TXID>20</TXID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
     </DATA>
@@ -226,7 +259,7 @@
   <RECORD>
     <OPCODE>OP_SET_PERMISSIONS</OPCODE>
     <DATA>
-      <TXID>19</TXID>
+      <TXID>21</TXID>
       <SRC>/file_create</SRC>
       <MODE>511</MODE>
     </DATA>
@@ -234,7 +267,7 @@
   <RECORD>
     <OPCODE>OP_SET_OWNER</OPCODE>
     <DATA>
-      <TXID>20</TXID>
+      <TXID>22</TXID>
       <SRC>/file_create</SRC>
       <USERNAME>newOwner</USERNAME>
     </DATA>
@@ -242,7 +275,7 @@
   <RECORD>
     <OPCODE>OP_TIMES</OPCODE>
     <DATA>
-      <TXID>21</TXID>
+      <TXID>23</TXID>
       <LENGTH>0</LENGTH>
       <PATH>/file_create</PATH>
       <MTIME>1285195527000</MTIME>
@@ -252,7 +285,7 @@
   <RECORD>
     <OPCODE>OP_SET_QUOTA</OPCODE>
     <DATA>
-      <TXID>22</TXID>
+      <TXID>24</TXID>
       <SRC>/directory_mkdir</SRC>
       <NSQUOTA>1000</NSQUOTA>
       <DSQUOTA>-1</DSQUOTA>
@@ -261,57 +294,57 @@
   <RECORD>
     <OPCODE>OP_RENAME</OPCODE>
     <DATA>
-      <TXID>23</TXID>
+      <TXID>25</TXID>
       <LENGTH>0</LENGTH>
       <SRC>/file_create</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1421135800485</TIMESTAMP>
+      <TIMESTAMP>1421877810968</TIMESTAMP>
       <OPTIONS>NONE</OPTIONS>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>28</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>27</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD</OPCODE>
     <DATA>
-      <TXID>24</TXID>
+      <TXID>26</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>16389</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135800495</MTIME>
-      <ATIME>1421135800495</ATIME>
+      <MTIME>1421877810972</MTIME>
+      <ATIME>1421877810972</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-986598042_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>30</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>29</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
     <DATA>
-      <TXID>25</TXID>
+      <TXID>27</TXID>
       <BLOCK_ID>1073741825</BLOCK_ID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>26</TXID>
+      <TXID>28</TXID>
       <GENSTAMPV2>1001</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_BLOCK</OPCODE>
     <DATA>
-      <TXID>27</TXID>
+      <TXID>29</TXID>
       <PATH>/file_concat_target</PATH>
       <BLOCK>
         <BLOCK_ID>1073741825</BLOCK_ID>
@@ -325,21 +358,21 @@
   <RECORD>
     <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
     <DATA>
-      <TXID>28</TXID>
+      <TXID>30</TXID>
       <BLOCK_ID>1073741826</BLOCK_ID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>29</TXID>
+      <TXID>31</TXID>
       <GENSTAMPV2>1002</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_BLOCK</OPCODE>
     <DATA>
-      <TXID>30</TXID>
+      <TXID>32</TXID>
       <PATH>/file_concat_target</PATH>
       <BLOCK>
         <BLOCK_ID>1073741825</BLOCK_ID>
@@ -358,21 +391,21 @@
   <RECORD>
     <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
     <DATA>
-      <TXID>31</TXID>
+      <TXID>33</TXID>
       <BLOCK_ID>1073741827</BLOCK_ID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>32</TXID>
+      <TXID>34</TXID>
       <GENSTAMPV2>1003</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_BLOCK</OPCODE>
     <DATA>
-      <TXID>33</TXID>
+      <TXID>35</TXID>
       <PATH>/file_concat_target</PATH>
       <BLOCK>
         <BLOCK_ID>1073741826</BLOCK_ID>
@@ -391,13 +424,13 @@
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>34</TXID>
+      <TXID>36</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135801050</MTIME>
-      <ATIME>1421135800495</ATIME>
+      <MTIME>1421877811083</MTIME>
+      <ATIME>1421877810972</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -418,7 +451,7 @@
         <GENSTAMP>1003</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -427,44 +460,44 @@
   <RECORD>
     <OPCODE>OP_ADD</OPCODE>
     <DATA>
-      <TXID>35</TXID>
+      <TXID>37</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>16390</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135801053</MTIME>
-      <ATIME>1421135801053</ATIME>
+      <MTIME>1421877811086</MTIME>
+      <ATIME>1421877811086</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-986598042_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>41</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>39</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
     <DATA>
-      <TXID>36</TXID>
+      <TXID>38</TXID>
       <BLOCK_ID>1073741828</BLOCK_ID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>37</TXID>
+      <TXID>39</TXID>
       <GENSTAMPV2>1004</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_BLOCK</OPCODE>
     <DATA>
-      <TXID>38</TXID>
+      <TXID>40</TXID>
       <PATH>/file_concat_0</PATH>
       <BLOCK>
         <BLOCK_ID>1073741828</BLOCK_ID>
@@ -478,21 +511,21 @@
   <RECORD>
     <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
     <DATA>
-      <TXID>39</TXID>
+      <TXID>41</TXID>
       <BLOCK_ID>1073741829</BLOCK_ID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>40</TXID>
+      <TXID>42</TXID>
       <GENSTAMPV2>1005</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_BLOCK</OPCODE>
     <DATA>
-      <TXID>41</TXID>
+      <TXID>43</TXID>
       <PATH>/file_concat_0</PATH>
       <BLOCK>
         <BLOCK_ID>1073741828</BLOCK_ID>
@@ -511,21 +544,21 @@
   <RECORD>
     <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
     <DATA>
-      <TXID>42</TXID>
+      <TXID>44</TXID>
       <BLOCK_ID>1073741830</BLOCK_ID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>43</TXID>
+      <TXID>45</TXID>
       <GENSTAMPV2>1006</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_BLOCK</OPCODE>
     <DATA>
-      <TXID>44</TXID>
+      <TXID>46</TXID>
       <PATH>/file_concat_0</PATH>
       <BLOCK>
         <BLOCK_ID>1073741829</BLOCK_ID>
@@ -544,13 +577,13 @@
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>45</TXID>
+      <TXID>47</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135801091</MTIME>
-      <ATIME>1421135801053</ATIME>
+      <MTIME>1421877811108</MTIME>
+      <ATIME>1421877811086</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -571,7 +604,7 @@
         <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -580,44 +613,44 @@
   <RECORD>
     <OPCODE>OP_ADD</OPCODE>
     <DATA>
-      <TXID>46</TXID>
+      <TXID>48</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>16391</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135801095</MTIME>
-      <ATIME>1421135801095</ATIME>
+      <MTIME>1421877811110</MTIME>
+      <ATIME>1421877811110</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-986598042_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>50</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>48</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
     <DATA>
-      <TXID>47</TXID>
+      <TXID>49</TXID>
       <BLOCK_ID>1073741831</BLOCK_ID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>48</TXID>
+      <TXID>50</TXID>
       <GENSTAMPV2>1007</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_BLOCK</OPCODE>
     <DATA>
-      <TXID>49</TXID>
+      <TXID>51</TXID>
       <PATH>/file_concat_1</PATH>
       <BLOCK>
         <BLOCK_ID>1073741831</BLOCK_ID>
@@ -631,21 +664,21 @@
   <RECORD>
     <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
     <DATA>
-      <TXID>50</TXID>
+      <TXID>52</TXID>
       <BLOCK_ID>1073741832</BLOCK_ID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>51</TXID>
+      <TXID>53</TXID>
       <GENSTAMPV2>1008</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_BLOCK</OPCODE>
     <DATA>
-      <TXID>52</TXID>
+      <TXID>54</TXID>
       <PATH>/file_concat_1</PATH>
       <BLOCK>
         <BLOCK_ID>1073741831</BLOCK_ID>
@@ -664,21 +697,21 @@
   <RECORD>
     <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
     <DATA>
-      <TXID>53</TXID>
+      <TXID>55</TXID>
       <BLOCK_ID>1073741833</BLOCK_ID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>54</TXID>
+      <TXID>56</TXID>
       <GENSTAMPV2>1009</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_BLOCK</OPCODE>
     <DATA>
-      <TXID>55</TXID>
+      <TXID>57</TXID>
       <PATH>/file_concat_1</PATH>
       <BLOCK>
         <BLOCK_ID>1073741832</BLOCK_ID>
@@ -697,13 +730,13 @@
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>56</TXID>
+      <TXID>58</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135801126</MTIME>
-      <ATIME>1421135801095</ATIME>
+      <MTIME>1421877811131</MTIME>
+      <ATIME>1421877811110</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -724,7 +757,7 @@
         <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -733,59 +766,59 @@
   <RECORD>
     <OPCODE>OP_CONCAT_DELETE</OPCODE>
     <DATA>
-      <TXID>57</TXID>
+      <TXID>59</TXID>
       <LENGTH>0</LENGTH>
       <TRG>/file_concat_target</TRG>
-      <TIMESTAMP>1421135801130</TIMESTAMP>
+      <TIMESTAMP>1421877811134</TIMESTAMP>
       <SOURCES>
         <SOURCE1>/file_concat_0</SOURCE1>
         <SOURCE2>/file_concat_1</SOURCE2>
       </SOURCES>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>58</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>56</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD</OPCODE>
     <DATA>
-      <TXID>58</TXID>
+      <TXID>60</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>16392</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135810102</MTIME>
-      <ATIME>1421135810102</ATIME>
+      <MTIME>1421877811137</MTIME>
+      <ATIME>1421877811137</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-986598042_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>63</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>58</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
     <DATA>
-      <TXID>59</TXID>
+      <TXID>61</TXID>
       <BLOCK_ID>1073741834</BLOCK_ID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>60</TXID>
+      <TXID>62</TXID>
       <GENSTAMPV2>1010</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_BLOCK</OPCODE>
     <DATA>
-      <TXID>61</TXID>
+      <TXID>63</TXID>
       <PATH>/file_create</PATH>
       <BLOCK>
         <BLOCK_ID>1073741834</BLOCK_ID>
@@ -799,21 +832,21 @@
   <RECORD>
     <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
     <DATA>
-      <TXID>62</TXID>
+      <TXID>64</TXID>
       <BLOCK_ID>1073741835</BLOCK_ID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>63</TXID>
+      <TXID>65</TXID>
       <GENSTAMPV2>1011</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_BLOCK</OPCODE>
     <DATA>
-      <TXID>64</TXID>
+      <TXID>66</TXID>
       <PATH>/file_create</PATH>
       <BLOCK>
         <BLOCK_ID>1073741834</BLOCK_ID>
@@ -832,13 +865,13 @@
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>65</TXID>
+      <TXID>67</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135810122</MTIME>
-      <ATIME>1421135810102</ATIME>
+      <MTIME>1421877811152</MTIME>
+      <ATIME>1421877811137</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -854,7 +887,7 @@
         <GENSTAMP>1011</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -863,74 +896,74 @@
   <RECORD>
     <OPCODE>OP_TRUNCATE</OPCODE>
     <DATA>
-      <TXID>66</TXID>
+      <TXID>68</TXID>
       <SRC>/file_create</SRC>
-      <CLIENTNAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENTNAME>
+      <CLIENTNAME>DFSClient_NONMAPREDUCE_-986598042_1</CLIENTNAME>
       <CLIENTMACHINE>127.0.0.1</CLIENTMACHINE>
       <NEWLENGTH>512</NEWLENGTH>
-      <TIMESTAMP>1421135810125</TIMESTAMP>
+      <TIMESTAMP>1421877811154</TIMESTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SYMLINK</OPCODE>
     <DATA>
-      <TXID>67</TXID>
+      <TXID>69</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>16393</INODEID>
       <PATH>/file_symlink</PATH>
       <VALUE>/file_concat_target</VALUE>
-      <MTIME>1421135810132</MTIME>
-      <ATIME>1421135810132</ATIME>
+      <MTIME>1421877811160</MTIME>
+      <ATIME>1421877811160</ATIME>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>511</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>70</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>65</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD</OPCODE>
     <DATA>
-      <TXID>68</TXID>
+      <TXID>70</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>16394</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135810135</MTIME>
-      <ATIME>1421135810135</ATIME>
+      <MTIME>1421877811163</MTIME>
+      <ATIME>1421877811163</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-986598042_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>71</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>66</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
     <DATA>
-      <TXID>69</TXID>
+      <TXID>71</TXID>
       <BLOCK_ID>1073741836</BLOCK_ID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>70</TXID>
+      <TXID>72</TXID>
       <GENSTAMPV2>1012</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_BLOCK</OPCODE>
     <DATA>
-      <TXID>71</TXID>
+      <TXID>73</TXID>
       <PATH>/hard-lease-recovery-test</PATH>
       <BLOCK>
         <BLOCK_ID>1073741836</BLOCK_ID>
@@ -944,7 +977,7 @@
   <RECORD>
     <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
-      <TXID>72</TXID>
+      <TXID>74</TXID>
       <PATH>/hard-lease-recovery-test</PATH>
       <BLOCK>
         <BLOCK_ID>1073741836</BLOCK_ID>
@@ -958,15 +991,15 @@
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>73</TXID>
+      <TXID>75</TXID>
       <GENSTAMPV2>1013</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_REASSIGN_LEASE</OPCODE>
     <DATA>
-      <TXID>74</TXID>
-      <LEASEHOLDER>DFSClient_NONMAPREDUCE_240777107_1</LEASEHOLDER>
+      <TXID>76</TXID>
+      <LEASEHOLDER>DFSClient_NONMAPREDUCE_-986598042_1</LEASEHOLDER>
       <PATH>/hard-lease-recovery-test</PATH>
       <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
     </DATA>
@@ -974,13 +1007,13 @@
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>75</TXID>
+      <TXID>77</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1421135812235</MTIME>
-      <ATIME>1421135810135</ATIME>
+      <MTIME>1421877813736</MTIME>
+      <ATIME>1421877811163</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -991,7 +1024,7 @@
         <GENSTAMP>1013</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>shv</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -1000,72 +1033,72 @@
   <RECORD>
     <OPCODE>OP_ADD_CACHE_POOL</OPCODE>
     <DATA>
-      <TXID>76</TXID>
+      <TXID>78</TXID>
       <POOLNAME>pool1</POOLNAME>
-      <OWNERNAME>shv</OWNERNAME>
-      <GROUPNAME>shv</GROUPNAME>
+      <OWNERNAME>jing</OWNERNAME>
+      <GROUPNAME>staff</GROUPNAME>
       <MODE>493</MODE>
       <LIMIT>9223372036854775807</LIMIT>
       <MAXRELATIVEEXPIRY>2305843009213693951</MAXRELATIVEEXPIRY>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>78</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>73</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_MODIFY_CACHE_POOL</OPCODE>
     <DATA>
-      <TXID>77</TXID>
+      <TXID>79</TXID>
       <POOLNAME>pool1</POOLNAME>
       <LIMIT>99</LIMIT>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>79</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>74</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_CACHE_DIRECTIVE</OPCODE>
     <DATA>
-      <TXID>78</TXID>
+      <TXID>80</TXID>
       <ID>1</ID>
       <PATH>/path</PATH>
       <REPLICATION>1</REPLICATION>
       <POOL>pool1</POOL>
-      <EXPIRATION>2305844430349507141</EXPIRATION>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>80</RPC_CALLID>
+      <EXPIRATION>2305844431091508160</EXPIRATION>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>75</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_MODIFY_CACHE_DIRECTIVE</OPCODE>
     <DATA>
-      <TXID>79</TXID>
+      <TXID>81</TXID>
       <ID>1</ID>
       <REPLICATION>2</REPLICATION>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>81</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>76</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_REMOVE_CACHE_DIRECTIVE</OPCODE>
     <DATA>
-      <TXID>80</TXID>
+      <TXID>82</TXID>
       <ID>1</ID>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>82</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>77</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_REMOVE_CACHE_POOL</OPCODE>
     <DATA>
-      <TXID>81</TXID>
+      <TXID>83</TXID>
       <POOLNAME>pool1</POOLNAME>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>83</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>78</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_ACL</OPCODE>
     <DATA>
-      <TXID>82</TXID>
+      <TXID>84</TXID>
       <SRC>/file_concat_target</SRC>
       <ENTRY>
         <SCOPE>ACCESS</SCOPE>
@@ -1098,62 +1131,62 @@
   <RECORD>
     <OPCODE>OP_SET_XATTR</OPCODE>
     <DATA>
-      <TXID>83</TXID>
+      <TXID>85</TXID>
       <SRC>/file_concat_target</SRC>
       <XATTR>
         <NAMESPACE>USER</NAMESPACE>
         <NAME>a1</NAME>
         <VALUE>0x313233</VALUE>
       </XATTR>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>85</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>80</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_XATTR</OPCODE>
     <DATA>
-      <TXID>84</TXID>
+      <TXID>86</TXID>
       <SRC>/file_concat_target</SRC>
       <XATTR>
         <NAMESPACE>USER</NAMESPACE>
         <NAME>a2</NAME>
         <VALUE>0x373839</VALUE>
       </XATTR>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>86</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>81</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_REMOVE_XATTR</OPCODE>
     <DATA>
-      <TXID>85</TXID>
+      <TXID>87</TXID>
       <SRC>/file_concat_target</SRC>
       <XATTR>
         <NAMESPACE>USER</NAMESPACE>
         <NAME>a2</NAME>
       </XATTR>
-      <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
-      <RPC_CALLID>87</RPC_CALLID>
+      <RPC_CLIENTID>1730855b-1f27-4f17-9f72-b9f92eb3a8bd</RPC_CLIENTID>
+      <RPC_CALLID>82</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ROLLING_UPGRADE_START</OPCODE>
     <DATA>
-      <TXID>86</TXID>
-      <STARTTIME>1421135813268</STARTTIME>
+      <TXID>88</TXID>
+      <STARTTIME>1421877814254</STARTTIME>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ROLLING_UPGRADE_FINALIZE</OPCODE>
     <DATA>
-      <TXID>87</TXID>
-      <FINALIZETIME>1421135813268</FINALIZETIME>
+      <TXID>89</TXID>
+      <FINALIZETIME>1421877814254</FINALIZETIME>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
     <DATA>
-      <TXID>88</TXID>
+      <TXID>90</TXID>
     </DATA>
   </RECORD>
 </EDITS>