You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ar...@apache.org on 2013/08/21 19:47:17 UTC

svn commit: r1516230 [1/2] - in /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project: hadoop-hdfs-nfs/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/ hadoop-hdfs/src/contrib/bkjournal/ hadoop-hdfs/src/main/java/ hadoop-hdf...

Author: arp
Date: Wed Aug 21 17:47:10 2013
New Revision: 1516230

URL: http://svn.apache.org/r1516230
Log:
Merge all changes from trunk to branch HDFS-2832

Added:
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java
      - copied unchanged from r1516228, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dataNodeHome.jsp
      - copied unchanged from r1516228, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dataNodeHome.jsp
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
      - copied unchanged from r1516228, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
      - copied unchanged from r1516228, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java
      - copied unchanged from r1516228, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java
Modified:
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java

Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1513717-1516228

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml Wed Aug 21 17:47:10 2013
@@ -77,7 +77,6 @@ http://maven.apache.org/xsd/maven-4.0.0.
     <dependency>
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
-      <version>3.4.2</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java Wed Aug 21 17:47:10 2013
@@ -32,7 +32,6 @@ import java.util.concurrent.locks.Reentr
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
@@ -70,7 +69,7 @@ class OpenFileCtx {
   // The stream write-back status. True means one thread is doing write back.
   private boolean asyncStatus;
 
-  private final FSDataOutputStream fos;
+  private final HdfsDataOutputStream fos;
   private final Nfs3FileAttributes latestAttr;
   private long nextOffset;
 
@@ -114,7 +113,7 @@ class OpenFileCtx {
     return nonSequentialWriteInMemory;
   }
   
-  OpenFileCtx(FSDataOutputStream fos, Nfs3FileAttributes latestAttr,
+  OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
       String dumpFilePath) {
     this.fos = fos;
     this.latestAttr = latestAttr;
@@ -438,7 +437,7 @@ class OpenFileCtx {
     FSDataInputStream fis = null;
     try {
       // Sync file data and length to avoid partial read failure
-      ((HdfsDataOutputStream) fos).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
+      fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
       
       fis = new FSDataInputStream(dfsClient.open(path));
       readCount = fis.read(offset, readbuffer, 0, count);
@@ -527,7 +526,7 @@ class OpenFileCtx {
     int ret = COMMIT_WAIT;
     try {
       // Sync file data and length
-      ((HdfsDataOutputStream) fos).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
+      fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
       // Nothing to do for metadata since attr related change is pass-through
       ret = COMMIT_FINISHED;
     } catch (IOException e) {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Wed Aug 21 17:47:10 2013
@@ -29,7 +29,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FileUtil;
@@ -629,7 +629,7 @@ public class RpcProgramNfs3 extends RpcP
       return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
     }
 
-    FSDataOutputStream fos = null;
+    HdfsDataOutputStream fos = null;
     String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
     WccAttr preOpDirAttr = null;
     Nfs3FileAttributes postOpObjAttr = null;
@@ -652,7 +652,8 @@ public class RpcProgramNfs3 extends RpcP
       EnumSet<CreateFlag> flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ? EnumSet
           .of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet
           .of(CreateFlag.CREATE);
-      fos = new FSDataOutputStream(dfsClient.create(fileIdPath, permission,
+      
+      fos = new HdfsDataOutputStream(dfsClient.create(fileIdPath, permission,
           flag, false, replication, blockSize, null, bufferSize, null),
           statistics);
       

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Aug 21 17:47:10 2013
@@ -6,6 +6,9 @@ Trunk (Unreleased)
 
     HDFS-3034. Remove the deprecated DFSOutputStream.sync() method.  (szetszwo)
 
+    HDFS-5079. Cleaning up NNHAStatusHeartbeat.State from
+    DatanodeProtocolProtos. (Tao Luo via shv)
+
   NEW FEATURES
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
@@ -117,9 +120,6 @@ Trunk (Unreleased)
 
     HDFS-4904. Remove JournalService. (Arpit Agarwal via cnauroth)
 
-    HDFS-5004. Add additional JMX bean for NameNode status data
-    (Trevor Lorimer via cos)
-
   OPTIMIZATIONS
 
   BUG FIXES
@@ -256,6 +256,12 @@ Release 2.3.0 - UNRELEASED
     HDFS-4817.  Make HDFS advisory caching configurable on a per-file basis.
     (Colin Patrick McCabe)
 
+    HDFS-5004. Add additional JMX bean for NameNode status data
+    (Trevor Lorimer via cos)
+
+    HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options.
+    (shv)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -267,6 +273,9 @@ Release 2.3.0 - UNRELEASED
 
     HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi)
 
+    HDFS-4816. transitionToActive blocks if the SBN is doing checkpoint image
+    transfer. (Andrew Wang)
+
 Release 2.1.1-beta - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -279,6 +288,17 @@ Release 2.1.1-beta - UNRELEASED
 
     HDFS-4763 Add script changes/utility for starting NFS gateway (brandonli)
 
+    HDFS-5076 Add MXBean methods to query NN's transaction information and 
+    JournalNode's journal status. (jing9)
+
+    HDFS-5104 Support dotdot name in NFS LOOKUP operation (brandonli)
+
+    HDFS-5107 Fix array copy error in Readdir and Readdirplus responses
+    (brandonli)
+
+    HDFS-5110 Change FSDataOutputStream to HdfsDataOutputStream for opened
+    streams to fix type cast error. (brandonli)
+
   IMPROVEMENTS
 
     HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may
@@ -296,6 +316,12 @@ Release 2.1.1-beta - UNRELEASED
     HDFS-5047. Supress logging of full stack trace of quota and lease
     exceptions. (Robert Parker via kihwal)
 
+    HDFS-2933. Improve DataNode Web UI Index Page. (Vivek Ganesan via
+    Arpit Agarwal)
+
+    HDFS-5111. Remove duplicated error message for snapshot commands when 
+    processing invalid arguments. (jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -312,7 +338,44 @@ Release 2.1.1-beta - UNRELEASED
     HDFS-5091. Support for spnego keytab separate from the JournalNode keytab 
     for secure HA. (jing9)
 
-Release 2.1.0-beta - 2013-08-06
+    HDFS-5051. nn fails to download checkpointed image from snn in some
+    setups. (Vinay and suresh via suresh)
+
+    HDFS-4898. BlockPlacementPolicyWithNodeGroup.chooseRemoteRack() fails to
+    properly fallback to local rack. (szetszwo)
+
+    HDFS-4632. globStatus using backslash for escaping does not work on Windows.
+    (Chuan Liu via cnauroth)
+
+    HDFS-5093. TestGlobPaths should re-use the MiniDFSCluster to avoid failure
+    on Windows. (Chuan Liu via cnauroth)
+
+    HDFS-5080. BootstrapStandby not working with QJM when the existing NN is 
+    active. (jing9)
+
+    HDFS-5099. Namenode#copyEditLogSegmentsToSharedDir should close
+    EditLogInputStreams upon finishing. (Chuan Liu via cnauroth)
+
+    HDFS-2994. If lease soft limit is recovered successfully
+    the append can fail. (Tao Luo via shv)
+
+    HDFS-5100. TestNamenodeRetryCache fails on Windows due to incorrect cleanup.
+    (Chuan Liu via cnauroth)
+
+    HDFS-5103. TestDirectoryScanner fails on Windows. (Chuan Liu via cnauroth)
+
+    HDFS-5102. Snapshot names should not be allowed to contain slash characters.
+    (jing9)
+
+    HDFS-5105. TestFsck fails on Windows. (Chuan Liu via arp)
+
+    HDFS-5106. TestDatanodeBlockScanner fails on Windows due to incorrect path
+    format. (Chuan Liu via cnauroth)
+
+    HDFS-4594. WebHDFS open sets Content-Length header to what is specified by
+    length parameter rather than how much data is actually returned. (cnauroth)
+
+Release 2.1.0-beta - 2013-08-22
 
   INCOMPATIBLE CHANGES
 

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml Wed Aug 21 17:47:10 2013
@@ -63,7 +63,6 @@ http://maven.apache.org/xsd/maven-4.0.0.
     <dependency>
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
-      <version>3.4.2</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
@@ -417,7 +416,8 @@ http://maven.apache.org/xsd/maven-4.0.0.
               <goal>protoc</goal>
             </goals>
             <configuration>
-              <protocVersion>2.5.0</protocVersion>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
                 <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>
@@ -442,7 +442,8 @@ http://maven.apache.org/xsd/maven-4.0.0.
               <goal>protoc</goal>
             </goals>
             <configuration>
-              <protocVersion>2.5.0</protocVersion>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
                 <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>
@@ -464,7 +465,8 @@ http://maven.apache.org/xsd/maven-4.0.0.
               <goal>protoc</goal>
             </goals>
             <configuration>
-              <protocVersion>2.5.0</protocVersion>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
                 <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>
@@ -486,7 +488,8 @@ http://maven.apache.org/xsd/maven-4.0.0.
               <goal>protoc</goal>
             </goals>
             <configuration>
-              <protocVersion>2.5.0</protocVersion>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
                 <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml Wed Aug 21 17:47:10 2013
@@ -103,7 +103,8 @@ http://maven.apache.org/xsd/maven-4.0.0.
               <goal>protoc</goal>
             </goals>
             <configuration>
-              <protocVersion>2.5.0</protocVersion>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
                 <param>${basedir}/../../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto</param>

Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1513717-1516228

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Wed Aug 21 17:47:10 2013
@@ -70,6 +70,7 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
@@ -204,13 +205,20 @@ public class DFSUtil {
     String[] components = StringUtils.split(src, '/');
     for (int i = 0; i < components.length; i++) {
       String element = components[i];
-      if (element.equals("..") || 
-          element.equals(".")  ||
+      if (element.equals(".")  ||
           (element.indexOf(":") >= 0)  ||
           (element.indexOf("/") >= 0)) {
         return false;
       }
-      
+      // ".." is allowed in path starting with /.reserved/.inodes
+      if (element.equals("..")) {
+        if (components.length > 4
+            && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
+            && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
+          continue;
+        }
+        return false;
+      }
       // The string may start or end with a /, but not have
       // "//" in the middle.
       if (element.isEmpty() && i != components.length - 1 &&

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Wed Aug 21 17:47:10 2013
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -1311,10 +1312,10 @@ public class PBHelper {
       NNHAStatusHeartbeatProto.newBuilder();
     switch (hb.getState()) {
       case ACTIVE:
-        builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE);
+        builder.setState(HAServiceProtocolProtos.HAServiceStateProto.ACTIVE);
         break;
       case STANDBY:
-        builder.setState(NNHAStatusHeartbeatProto.State.STANDBY);
+        builder.setState(HAServiceProtocolProtos.HAServiceStateProto.STANDBY);
         break;
       default:
         throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" +

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java Wed Aug 21 17:47:10 2013
@@ -109,7 +109,7 @@ interface AsyncLogger {
    * Fetch the list of edit logs available on the remote node.
    */
   public ListenableFuture<RemoteEditLogManifest> getEditLogManifest(
-      long fromTxnId, boolean forReading);
+      long fromTxnId, boolean forReading, boolean inProgressOk);
 
   /**
    * Prepare recovery. See the HDFS-3077 design document for details.

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java Wed Aug 21 17:47:10 2013
@@ -262,14 +262,14 @@ class AsyncLoggerSet {
     return QuorumCall.create(calls);
   }
 
-  public QuorumCall<AsyncLogger, RemoteEditLogManifest>
-      getEditLogManifest(long fromTxnId, boolean forReading) {
+  public QuorumCall<AsyncLogger, RemoteEditLogManifest> getEditLogManifest(
+      long fromTxnId, boolean forReading, boolean inProgressOk) {
     Map<AsyncLogger,
         ListenableFuture<RemoteEditLogManifest>> calls
         = Maps.newHashMap();
     for (AsyncLogger logger : loggers) {
       ListenableFuture<RemoteEditLogManifest> future =
-          logger.getEditLogManifest(fromTxnId, forReading);
+          logger.getEditLogManifest(fromTxnId, forReading, inProgressOk);
       calls.put(logger, future);
     }
     return QuorumCall.create(calls);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java Wed Aug 21 17:47:10 2013
@@ -519,12 +519,13 @@ public class IPCLoggerChannel implements
 
   @Override
   public ListenableFuture<RemoteEditLogManifest> getEditLogManifest(
-      final long fromTxnId, final boolean forReading) {
+      final long fromTxnId, final boolean forReading, 
+      final boolean inProgressOk) {
     return executor.submit(new Callable<RemoteEditLogManifest>() {
       @Override
       public RemoteEditLogManifest call() throws IOException {
         GetEditLogManifestResponseProto ret = getProxy().getEditLogManifest(
-            journalId, fromTxnId, forReading);
+            journalId, fromTxnId, forReading, inProgressOk);
         // Update the http port, since we need this to build URLs to any of the
         // returned logs.
         httpPort = ret.getHttpPort();

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java Wed Aug 21 17:47:10 2013
@@ -456,7 +456,7 @@ public class QuorumJournalManager implem
       long fromTxnId, boolean inProgressOk, boolean forReading) throws IOException {
 
     QuorumCall<AsyncLogger, RemoteEditLogManifest> q =
-        loggers.getEditLogManifest(fromTxnId, forReading);
+        loggers.getEditLogManifest(fromTxnId, forReading, inProgressOk);
     Map<AsyncLogger, RemoteEditLogManifest> resps =
         loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs,
             "selectInputStreams");
@@ -480,8 +480,7 @@ public class QuorumJournalManager implem
         allStreams.add(elis);
       }
     }
-    JournalSet.chainAndMakeRedundantStreams(
-        streams, allStreams, fromTxnId, inProgressOk);
+    JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId);
   }
   
   @Override

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java Wed Aug 21 17:47:10 2013
@@ -125,10 +125,13 @@ public interface QJournalProtocol {
    * @param sinceTxId the first transaction which the client cares about
    * @param forReading whether or not the caller intends to read from the edit
    *        logs
+   * @param inProgressOk whether or not to check the in-progress edit log 
+   *        segment       
    * @return a list of edit log segments since the given transaction ID.
    */
-  public GetEditLogManifestResponseProto getEditLogManifest(
-      String jid, long sinceTxId, boolean forReading) throws IOException;
+  public GetEditLogManifestResponseProto getEditLogManifest(String jid,
+      long sinceTxId, boolean forReading, boolean inProgressOk)
+      throws IOException;
   
   /**
    * Begin the recovery process for a given segment. See the HDFS-3077

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java Wed Aug 21 17:47:10 2013
@@ -203,7 +203,8 @@ public class QJournalProtocolServerSideT
       return impl.getEditLogManifest(
           request.getJid().getIdentifier(),
           request.getSinceTxId(),
-          request.getForReading());
+          request.getForReading(),
+          request.getInProgressOk());
     } catch (IOException e) {
       throw new ServiceException(e);
     }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java Wed Aug 21 17:47:10 2013
@@ -228,13 +228,15 @@ public class QJournalProtocolTranslatorP
 
   @Override
   public GetEditLogManifestResponseProto getEditLogManifest(String jid,
-      long sinceTxId, boolean forReading) throws IOException {
+      long sinceTxId, boolean forReading, boolean inProgressOk)
+      throws IOException {
     try {
       return rpcProxy.getEditLogManifest(NULL_CONTROLLER,
           GetEditLogManifestRequestProto.newBuilder()
             .setJid(convertJournalId(jid))
             .setSinceTxId(sinceTxId)
             .setForReading(forReading)
+            .setInProgressOk(inProgressOk)
             .build());
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java Wed Aug 21 17:47:10 2013
@@ -25,10 +25,9 @@ import java.io.InputStream;
 import java.io.OutputStreamWriter;
 import java.net.URL;
 import java.security.PrivilegedExceptionAction;
+import java.util.Iterator;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -36,8 +35,8 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException;
-import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
 import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
@@ -50,6 +49,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.JournalManager;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.util.AtomicFileOutputStream;
 import org.apache.hadoop.hdfs.util.BestEffortLongFile;
@@ -630,14 +630,31 @@ class Journal implements Closeable {
    * @see QJournalProtocol#getEditLogManifest(String, long)
    */
   public RemoteEditLogManifest getEditLogManifest(long sinceTxId,
-      boolean forReading) throws IOException {
+      boolean forReading, boolean inProgressOk) throws IOException {
     // No need to checkRequest() here - anyone may ask for the list
     // of segments.
     checkFormatted();
     
-    RemoteEditLogManifest manifest = new RemoteEditLogManifest(
-        fjm.getRemoteEditLogs(sinceTxId, forReading));
-    return manifest;
+    // if this is for reading, ignore the in-progress editlog segment
+    inProgressOk = forReading ? false : inProgressOk;
+    List<RemoteEditLog> logs = fjm.getRemoteEditLogs(sinceTxId, forReading,
+        inProgressOk);
+    
+    if (inProgressOk) {
+      RemoteEditLog log = null;
+      for (Iterator<RemoteEditLog> iter = logs.iterator(); iter.hasNext();) {
+        log = iter.next();
+        if (log.isInProgress()) {
+          iter.remove();
+          break;
+        }
+      }
+      if (log != null && log.isInProgress()) {
+        logs.add(new RemoteEditLog(log.getStartTxId(), getHighestWrittenTxId()));
+      }
+    }
+    
+    return new RemoteEditLogManifest(logs);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java Wed Aug 21 17:47:10 2013
@@ -18,8 +18,10 @@
 package org.apache.hadoop.hdfs.qjournal.server;
 
 import java.io.File;
+import java.io.FileFilter;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.commons.logging.Log;
@@ -34,11 +36,13 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.mortbay.util.ajax.JSON;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Maps;
@@ -51,7 +55,7 @@ import com.google.common.collect.Maps;
  * in the quorum protocol.
  */
 @InterfaceAudience.Private
-public class JournalNode implements Tool, Configurable {
+public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
   public static final Log LOG = LogFactory.getLog(JournalNode.class);
   private Configuration conf;
   private JournalNodeRpcServer rpcServer;
@@ -128,6 +132,8 @@ public class JournalNode implements Tool
     SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
         DFSConfigKeys.DFS_JOURNALNODE_USER_NAME_KEY, socAddr.getHostName());
     
+    registerJNMXBean();
+    
     httpServer = new JournalNodeHttpServer(conf, this);
     httpServer.start();
 
@@ -208,6 +214,50 @@ public class JournalNode implements Tool
     return new File(new File(dir), jid);
   }
 
+  @Override // JournalNodeMXBean
+  public String getJournalsStatus() {
+    // jid:{Formatted:True/False}
+    Map<String, Map<String, String>> status = 
+        new HashMap<String, Map<String, String>>();
+    synchronized (this) {
+      for (Map.Entry<String, Journal> entry : journalsById.entrySet()) {
+        Map<String, String> jMap = new HashMap<String, String>();
+        jMap.put("Formatted", Boolean.toString(entry.getValue().isFormatted()));
+        status.put(entry.getKey(), jMap);
+      }
+    }
+    
+    // It is possible that some journals have been formatted before, while the 
+    // corresponding journals are not in journalsById yet (because of restarting
+    // JN, e.g.). For simplicity, let's just assume a journal is formatted if
+    // there is a directory for it. We can also call analyzeStorage method for
+    // these directories if necessary.
+    // Also note that we do not need to check localDir here since
+    // validateAndCreateJournalDir has been called before we register the
+    // MXBean.
+    File[] journalDirs = localDir.listFiles(new FileFilter() {
+      @Override
+      public boolean accept(File file) {
+        return file.isDirectory();
+      }
+    });
+    for (File journalDir : journalDirs) {
+      String jid = journalDir.getName();
+      if (!status.containsKey(jid)) {
+        Map<String, String> jMap = new HashMap<String, String>();
+        jMap.put("Formatted", "true");
+        status.put(jid, jMap);
+      }
+    }
+    return JSON.toString(status);
+  }
+  
+  /**
+   * Register JournalNodeMXBean
+   */
+  private void registerJNMXBean() {
+    MBeans.register("JournalNode", "JournalNodeInfo", this);
+  }
   
   private class ErrorReporter implements StorageErrorReporter {
     @Override

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java Wed Aug 21 17:47:10 2013
@@ -36,8 +36,8 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authorize.AccessControlList;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /**
  * Encapsulates the HTTP server started by the Journal Service.
@@ -69,16 +69,15 @@ public class JournalNodeHttpServer {
         bindAddr.getHostName()));
 
     int tmpInfoPort = bindAddr.getPort();
-    httpServer = new HttpServer("journal", bindAddr.getHostName(),
-        tmpInfoPort, tmpInfoPort == 0, conf, new AccessControlList(conf
-            .get(DFS_ADMIN, " "))) {
-      {
-        if (UserGroupInformation.isSecurityEnabled()) {
-          initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
-              DFSUtil.getSpnegoKeytabKey(conf, DFS_JOURNALNODE_KEYTAB_FILE_KEY));
-        }
-      }
-    };
+    httpServer = new HttpServer.Builder().setName("journal")
+        .setBindAddress(bindAddr.getHostName()).setPort(tmpInfoPort)
+        .setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
+            new AccessControlList(conf.get(DFS_ADMIN, " ")))
+        .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
+        .setUsernameConfKey(
+            DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY)
+        .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
+            DFS_JOURNALNODE_KEYTAB_FILE_KEY)).build();
     httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
     httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
     httpServer.addInternalServlet("getJournal", "/getJournal",

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java Wed Aug 21 17:47:10 2013
@@ -175,10 +175,11 @@ class JournalNodeRpcServer implements QJ
 
   @Override
   public GetEditLogManifestResponseProto getEditLogManifest(String jid,
-      long sinceTxId, boolean forReading) throws IOException {
+      long sinceTxId, boolean forReading, boolean inProgressOk)
+      throws IOException {
     
     RemoteEditLogManifest manifest = jn.getOrCreateJournal(jid)
-        .getEditLogManifest(sinceTxId, forReading);
+        .getEditLogManifest(sinceTxId, forReading, inProgressOk);
     
     return GetEditLogManifestResponseProto.newBuilder()
         .setManifest(PBHelper.convert(manifest))

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java Wed Aug 21 17:47:10 2013
@@ -81,6 +81,28 @@ public class DelegationTokenSecretManage
     return new DelegationTokenIdentifier();
   }
   
+  @Override
+  public synchronized byte[] retrievePassword(
+      DelegationTokenIdentifier identifier) throws InvalidToken {
+    try {
+      // this check introduces inconsistency in the authentication to a
+      // HA standby NN.  non-token auths are allowed into the namespace which
+      // decides whether to throw a StandbyException.  tokens are a bit
+      // different in that a standby may be behind and thus not yet know
+      // of all tokens issued by the active NN.  the following check does
+      // not allow ANY token auth, however it should allow known tokens in
+      checkAvailableForRead();
+    } catch (StandbyException se) {
+      // FIXME: this is a hack to get around changing method signatures by
+      // tunneling a non-InvalidToken exception as the cause which the
+      // RPC server will unwrap before returning to the client
+      InvalidToken wrappedStandby = new InvalidToken("StandbyException");
+      wrappedStandby.initCause(se);
+      throw wrappedStandby;
+    }
+    return super.retrievePassword(identifier);
+  }
+  
   @Override //SecretManager
   public void checkAvailableForRead() throws StandbyException {
     namesystem.checkOperation(OperationCategory.READ);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java Wed Aug 21 17:47:10 2013
@@ -169,16 +169,17 @@ public class BlockPlacementPolicyWithNod
       long blocksize, int maxReplicasPerRack, List<DatanodeDescriptor> results,
       boolean avoidStaleNodes) throws NotEnoughReplicasException {
     int oldNumOfReplicas = results.size();
-    // randomly choose one node from remote racks
+
+    final String rackLocation = NetworkTopology.getFirstHalf(
+        localMachine.getNetworkLocation());
     try {
-      chooseRandom(
-          numOfReplicas,
-          "~" + NetworkTopology.getFirstHalf(localMachine.getNetworkLocation()),
-          excludedNodes, blocksize, maxReplicasPerRack, results,
-          avoidStaleNodes);
+      // randomly choose from remote racks
+      chooseRandom(numOfReplicas, "~" + rackLocation, excludedNodes, blocksize,
+          maxReplicasPerRack, results, avoidStaleNodes);
     } catch (NotEnoughReplicasException e) {
+      // fall back to the local rack
       chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas),
-          localMachine.getNetworkLocation(), excludedNodes, blocksize,
+          rackLocation, excludedNodes, blocksize,
           maxReplicasPerRack, results, avoidStaleNodes);
     }
   }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Aug 21 17:47:10 2013
@@ -385,11 +385,15 @@ public class DataNode extends Configured
     String infoHost = infoSocAddr.getHostName();
     int tmpInfoPort = infoSocAddr.getPort();
     this.infoServer = (secureResources == null) 
-       ? new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, 
-           conf, new AccessControlList(conf.get(DFS_ADMIN, " ")))
-       : new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0,
-           conf, new AccessControlList(conf.get(DFS_ADMIN, " ")),
-           secureResources.getListener());
+        ? new HttpServer.Builder().setName("datanode")
+            .setBindAddress(infoHost).setPort(tmpInfoPort)
+            .setFindPort(tmpInfoPort == 0).setConf(conf)
+            .setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))).build()
+        : new HttpServer.Builder().setName("datanode")
+            .setBindAddress(infoHost).setPort(tmpInfoPort)
+            .setFindPort(tmpInfoPort == 0).setConf(conf)
+            .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
+            .setConnector(secureResources.getListener()).build();
     LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
     if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
       boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Wed Aug 21 17:47:10 2013
@@ -19,7 +19,9 @@ package org.apache.hadoop.hdfs.server.da
 
 import java.io.File;
 import java.io.IOException;
+import java.net.InetAddress;
 import java.net.InetSocketAddress;
+import java.net.URI;
 import java.net.URL;
 import java.net.URLEncoder;
 import java.security.PrivilegedExceptionAction;
@@ -27,6 +29,7 @@ import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.List;
 
+import javax.servlet.ServletContext;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.jsp.JspWriter;
@@ -36,6 +39,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -43,6 +47,9 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
 import org.apache.hadoop.http.HtmlQuoting;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.net.NetUtils;
@@ -50,6 +57,7 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.VersionInfo;
 
 @InterfaceAudience.Private
 public class DatanodeJspHelper {
@@ -712,4 +720,24 @@ public class DatanodeJspHelper {
     final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS);
     return getDFSClient(ugi, nnAddr, conf);
   }
+
+  /** Return a table containing version information. */
+  public static String getVersionTable(ServletContext context) {
+    StringBuilder sb = new StringBuilder();
+    final DataNode dataNode = (DataNode) context.getAttribute("datanode");
+    sb.append("<div class='dfstable'><table>");
+    sb.append("<tr><td class='col1'>Version:</td><td>");
+    sb.append(VersionInfo.getVersion() + ", " + VersionInfo.getRevision());
+    sb.append("</td></tr>\n" + "\n  <tr><td class='col1'>Compiled:</td><td>"
+        + VersionInfo.getDate());
+    sb.append(" by " + VersionInfo.getUser() + " from "
+        + VersionInfo.getBranch());
+    if (dataNode != null) {
+      sb.append("</td></tr>\n  <tr><td class='col1'>Cluster ID:</td><td>"
+          + dataNode.getClusterId());
+    }
+    sb.append("</td></tr>\n</table></div>");
+    return sb.toString();
+  }
+
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Wed Aug 21 17:47:10 2013
@@ -410,8 +410,9 @@ public class DatanodeWebHdfsMethods {
         throw ioe;
       }
       
-      final long n = length.getValue() != null? length.getValue()
-        : in.getVisibleLength() - offset.getValue();
+      final long n = length.getValue() != null ?
+        Math.min(length.getValue(), in.getVisibleLength() - offset.getValue()) :
+        in.getVisibleLength() - offset.getValue();
       return Response.ok(new OpenEntity(in, n, dfsclient)).type(
           MediaType.APPLICATION_OCTET_STREAM).build();
     }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed Aug 21 17:47:10 2013
@@ -2093,6 +2093,10 @@ public class FSDirectory implements Clos
   /** Verify if the snapshot name is legal. */
   void verifySnapshotName(String snapshotName, String path)
       throws PathComponentTooLongException {
+    if (snapshotName.contains(Path.SEPARATOR)) {
+      throw new HadoopIllegalArgumentException(
+          "Snapshot name cannot contain \"" + Path.SEPARATOR + "\"");
+    }
     final byte[] bytes = DFSUtil.string2Bytes(snapshotName);
     verifyINodeName(bytes);
     verifyMaxComponentLength(bytes, path, 0);
@@ -2726,6 +2730,19 @@ public class FSDirectory implements Clos
       throw new FileNotFoundException(
           "File for given inode path does not exist: " + src);
     }
+    
+    // Handle single ".." for NFS lookup support.
+    if ((pathComponents.length > 4)
+        && DFSUtil.bytes2String(pathComponents[4]).equals("..")) {
+      INode parent = inode.getParent();
+      if (parent == null || parent.getId() == INodeId.ROOT_INODE_ID) {
+        // inode is root, or its parent is root.
+        return Path.SEPARATOR;
+      } else {
+        return parent.getFullPathName();
+      }
+    }
+
     StringBuilder path = id == INodeId.ROOT_INODE_ID ? new StringBuilder()
         : new StringBuilder(inode.getFullPathName());
     for (int i = 4; i < pathComponents.length; i++) {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Wed Aug 21 17:47:10 2013
@@ -1274,6 +1274,7 @@ public class FSEditLog implements LogsPu
     }
   }
   
+  @Override
   public void selectInputStreams(Collection<EditLogInputStream> streams,
       long fromTxId, boolean inProgressOk, boolean forReading) {
     journalSet.selectInputStreams(streams, fromTxId, inProgressOk, forReading);
@@ -1284,18 +1285,27 @@ public class FSEditLog implements LogsPu
     return selectInputStreams(fromTxId, toAtLeastTxId, null, true);
   }
 
+  /** Select a list of input streams to load */
+  public Collection<EditLogInputStream> selectInputStreams(
+      long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery,
+      boolean inProgressOk) throws IOException {
+    return selectInputStreams(fromTxId, toAtLeastTxId, recovery, inProgressOk,
+        true);
+  }
+  
   /**
-   * Select a list of input streams to load.
+   * Select a list of input streams.
    * 
    * @param fromTxId first transaction in the selected streams
    * @param toAtLeast the selected streams must contain this transaction
    * @param inProgessOk set to true if in-progress streams are OK
+   * @param forReading whether or not to use the streams to load the edit log
    */
   public synchronized Collection<EditLogInputStream> selectInputStreams(
       long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery,
-      boolean inProgressOk) throws IOException {
+      boolean inProgressOk, boolean forReading) throws IOException {
     List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
-    selectInputStreams(streams, fromTxId, inProgressOk, true);
+    selectInputStreams(streams, fromTxId, inProgressOk, forReading);
 
     try {
       checkForGaps(streams, fromTxId, toAtLeastTxId, inProgressOk);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Aug 21 17:47:10 2013
@@ -2153,10 +2153,15 @@ public class FSNamesystem implements Nam
         throw new FileNotFoundException("failed to append to non-existent file "
           + src + " on client " + clientMachine);
       }
-      final INodeFile myFile = INodeFile.valueOf(inode, src, true);
+      INodeFile myFile = INodeFile.valueOf(inode, src, true);
       // Opening an existing file for write - may need to recover lease.
       recoverLeaseInternal(myFile, src, holder, clientMachine, false);
-
+      
+      // recoverLeaseInternal may create a new InodeFile via 
+      // finalizeINodeFileUnderConstruction so we need to refresh 
+      // the referenced file.  
+      myFile = INodeFile.valueOf(dir.getINode(src), src, true);
+      
       final DatanodeDescriptor clientNode = 
           blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
       return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode,
@@ -6364,6 +6369,16 @@ public class FSNamesystem implements Nam
     return JSON.toString(jasList);
   }
 
+  @Override // NameNodeMxBean
+  public String getJournalTransactionInfo() {
+    Map<String, String> txnIdMap = new HashMap<String, String>();
+    txnIdMap.put("LastAppliedOrWrittenTxId",
+        Long.toString(this.getFSImage().getLastAppliedOrWrittenTxId()));
+    txnIdMap.put("MostRecentCheckpointTxId",
+        Long.toString(this.getFSImage().getMostRecentCheckpointTxId()));
+    return JSON.toString(txnIdMap);
+  }
+  
   @Override  // NameNodeMXBean
   public String getNNStarted() {
     return getStartTime().toString();

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java Wed Aug 21 17:47:10 2013
@@ -169,18 +169,26 @@ public class FileJournalManager implemen
    * @param fromTxId the txnid which to start looking
    * @param forReading whether or not the caller intends to read from the edit
    *        logs
+   * @param inProgressOk whether or not to include the in-progress edit log 
+   *        segment       
    * @return a list of remote edit logs
    * @throws IOException if edit logs cannot be listed.
    */
   public List<RemoteEditLog> getRemoteEditLogs(long firstTxId,
-      boolean forReading) throws IOException {
+      boolean forReading, boolean inProgressOk) throws IOException {
+    // make sure not reading in-progress edit log, i.e., if forReading is true,
+    // we should ignore the in-progress edit log.
+    Preconditions.checkArgument(!(forReading && inProgressOk));
+    
     File currentDir = sd.getCurrentDir();
     List<EditLogFile> allLogFiles = matchEditLogs(currentDir);
     List<RemoteEditLog> ret = Lists.newArrayListWithCapacity(
         allLogFiles.size());
 
     for (EditLogFile elf : allLogFiles) {
-      if (elf.hasCorruptHeader() || elf.isInProgress()) continue;
+      if (elf.hasCorruptHeader() || (!inProgressOk && elf.isInProgress())) {
+        continue;
+      }
       if (elf.getFirstTxId() >= firstTxId) {
         ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId));
       } else if (elf.getFirstTxId() < firstTxId && firstTxId <= elf.getLastTxId()) {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java Wed Aug 21 17:47:10 2013
@@ -310,11 +310,14 @@ public class GetImageServlet extends Htt
   
   static String getParamStringToPutImage(long txid,
       InetSocketAddress imageListenAddress, Storage storage) {
-    
+    String machine = !imageListenAddress.isUnresolved()
+        && imageListenAddress.getAddress().isAnyLocalAddress() ? null
+        : imageListenAddress.getHostName();
     return "putimage=1" +
       "&" + TXID_PARAM + "=" + txid +
       "&port=" + imageListenAddress.getPort() +
-      "&" + STORAGEINFO_PARAM + "=" +
+      (machine != null ? "&machine=" + machine : "")
+      + "&" + STORAGEINFO_PARAM + "=" +
       storage.toColonSeparatedString();
   }
 
@@ -341,10 +344,6 @@ public class GetImageServlet extends Htt
       Map<String, String[]> pmap = request.getParameterMap();
       isGetImage = isGetEdit = isPutImage = fetchLatest = false;
       remoteport = 0;
-      machineName = request.getRemoteHost();
-      if (InetAddresses.isInetAddress(machineName)) {
-        machineName = NetUtils.getHostNameOfIP(machineName);
-      }
 
       for (Map.Entry<String, String[]> entry : pmap.entrySet()) {
         String key = entry.getKey();
@@ -369,11 +368,20 @@ public class GetImageServlet extends Htt
           txId = ServletUtil.parseLongParam(request, TXID_PARAM);
         } else if (key.equals("port")) { 
           remoteport = new Integer(val[0]).intValue();
+        } else if (key.equals("machine")) {
+          machineName = val[0];
         } else if (key.equals(STORAGEINFO_PARAM)) {
           storageInfoString = val[0];
         }
       }
 
+      if (machineName == null) {
+        machineName = request.getRemoteHost();
+        if (InetAddresses.isInetAddress(machineName)) {
+          machineName = NetUtils.getHostNameOfIP(machineName);
+        }
+      }
+
       int numGets = (isGetImage?1:0) + (isGetEdit?1:0);
       if ((numGets > 1) || (numGets == 0) && !isPutImage) {
         throw new IOException("Illegal parameters to TransferFsImage");

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java Wed Aug 21 17:47:10 2013
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -31,14 +33,10 @@ import java.util.concurrent.CopyOnWriteA
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ComparisonChain;
 import com.google.common.collect.ImmutableList;
@@ -257,13 +255,12 @@ public class JournalSet implements Journ
             ". Skipping.", ioe);
       }
     }
-    chainAndMakeRedundantStreams(streams, allStreams, fromTxId, inProgressOk);
+    chainAndMakeRedundantStreams(streams, allStreams, fromTxId);
   }
   
   public static void chainAndMakeRedundantStreams(
       Collection<EditLogInputStream> outStreams,
-      PriorityQueue<EditLogInputStream> allStreams,
-      long fromTxId, boolean inProgressOk) {
+      PriorityQueue<EditLogInputStream> allStreams, long fromTxId) {
     // We want to group together all the streams that start on the same start
     // transaction ID.  To do this, we maintain an accumulator (acc) of all
     // the streams we've seen at a given start transaction ID.  When we see a
@@ -598,7 +595,7 @@ public class JournalSet implements Journ
       if (j.getManager() instanceof FileJournalManager) {
         FileJournalManager fjm = (FileJournalManager)j.getManager();
         try {
-          allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, forReading));
+          allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, forReading, false));
         } catch (Throwable t) {
           LOG.warn("Cannot list edit logs in " + fjm, t);
         }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Aug 21 17:47:10 2013
@@ -956,41 +956,49 @@ public class NameNode implements NameNod
     FSEditLog sourceEditLog = fsns.getFSImage().editLog;
     
     long fromTxId = fsns.getFSImage().getMostRecentCheckpointTxId();
-    Collection<EditLogInputStream> streams = sourceEditLog.selectInputStreams(
-        fromTxId+1, 0);
-
-    // Set the nextTxid to the CheckpointTxId+1
-    newSharedEditLog.setNextTxId(fromTxId + 1);
     
-    // Copy all edits after last CheckpointTxId to shared edits dir
-    for (EditLogInputStream stream : streams) {
-      LOG.debug("Beginning to copy stream " + stream + " to shared edits");
-      FSEditLogOp op;
-      boolean segmentOpen = false;
-      while ((op = stream.readOp()) != null) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("copying op: " + op);
-        }
-        if (!segmentOpen) {
-          newSharedEditLog.startLogSegment(op.txid, false);
-          segmentOpen = true;
+    Collection<EditLogInputStream> streams = null;
+    try {
+      streams = sourceEditLog.selectInputStreams(fromTxId + 1, 0);
+
+      // Set the nextTxid to the CheckpointTxId+1
+      newSharedEditLog.setNextTxId(fromTxId + 1);
+
+      // Copy all edits after last CheckpointTxId to shared edits dir
+      for (EditLogInputStream stream : streams) {
+        LOG.debug("Beginning to copy stream " + stream + " to shared edits");
+        FSEditLogOp op;
+        boolean segmentOpen = false;
+        while ((op = stream.readOp()) != null) {
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("copying op: " + op);
+          }
+          if (!segmentOpen) {
+            newSharedEditLog.startLogSegment(op.txid, false);
+            segmentOpen = true;
+          }
+
+          newSharedEditLog.logEdit(op);
+
+          if (op.opCode == FSEditLogOpCodes.OP_END_LOG_SEGMENT) {
+            newSharedEditLog.logSync();
+            newSharedEditLog.endCurrentLogSegment(false);
+            LOG.debug("ending log segment because of END_LOG_SEGMENT op in "
+                + stream);
+            segmentOpen = false;
+          }
         }
-        
-        newSharedEditLog.logEdit(op);
 
-        if (op.opCode == FSEditLogOpCodes.OP_END_LOG_SEGMENT) {
+        if (segmentOpen) {
+          LOG.debug("ending log segment because of end of stream in " + stream);
           newSharedEditLog.logSync();
           newSharedEditLog.endCurrentLogSegment(false);
-          LOG.debug("ending log segment because of END_LOG_SEGMENT op in " + stream);
           segmentOpen = false;
         }
       }
-      
-      if (segmentOpen) {
-        LOG.debug("ending log segment because of end of stream in " + stream);
-        newSharedEditLog.logSync();
-        newSharedEditLog.endCurrentLogSegment(false);
-        segmentOpen = false;
+    } finally {
+      if (streams != null) {
+        FSEditLog.closeAllStreams(streams);
       }
     }
   }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java Wed Aug 21 17:47:10 2013
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
+
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.HashMap;
@@ -70,65 +71,30 @@ public class NameNodeHttpServer {
   public void start() throws IOException {
     final String infoHost = bindAddress.getHostName();
     int infoPort = bindAddress.getPort();
-
-    httpServer = new HttpServer("hdfs", infoHost, infoPort,
-                                infoPort == 0, conf,
-                                new AccessControlList(conf.get(DFS_ADMIN, " "))) {
-      {
-        // Add SPNEGO support to NameNode
-        if (UserGroupInformation.isSecurityEnabled()) {
-          initSpnego(conf,
-              DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
-              DFSUtil.getSpnegoKeytabKey(conf,
-                  DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
-        }
-        if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
-          //add SPNEGO authentication filter for webhdfs
-          final String name = "SPNEGO";
-          final String classname = AuthFilter.class.getName();
-          final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
-          Map<String, String> params = getAuthFilterParams(conf);
-          defineFilter(webAppContext, name, classname, params,
-                       new String[]{pathSpec});
-          LOG.info("Added filter '" + name + "' (class=" + classname + ")");
-
-          // add webhdfs packages
-          addJerseyResourcePackage(
-            NamenodeWebHdfsMethods.class.getPackage().getName()
-            + ";" + Param.class.getPackage().getName(), pathSpec);
-        }
-      }
-
-      private Map<String, String> getAuthFilterParams(Configuration conf)
-        throws IOException {
-        Map<String, String> params = new HashMap<String, String>();
-        String principalInConf = conf
-          .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
-        if (principalInConf != null && !principalInConf.isEmpty()) {
-          params
-            .put(
-              DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
-              SecurityUtil.getServerPrincipal(principalInConf,
-                                              bindAddress.getHostName()));
-        } else if (UserGroupInformation.isSecurityEnabled()) {
-          LOG.error("WebHDFS and security are enabled, but configuration property '" +
-                    DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
-                    "' is not set.");
-        }
-        String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf,
-            DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
-        if (httpKeytab != null && !httpKeytab.isEmpty()) {
-          params.put(
-            DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
-            httpKeytab);
-        } else if (UserGroupInformation.isSecurityEnabled()) {
-          LOG.error("WebHDFS and security are enabled, but configuration property '" +
-                    DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
-                    "' is not set.");
-        }
-        return params;
+    httpServer = new HttpServer.Builder().setName("hdfs")
+        .setBindAddress(infoHost).setPort(infoPort)
+        .setFindPort(infoPort == 0).setConf(conf).setACL(
+            new AccessControlList(conf.get(DFS_ADMIN, " ")))
+        .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
+        .setUsernameConfKey(
+            DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
+        .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
+            DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)).build();
+    if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
+      //add SPNEGO authentication filter for webhdfs
+      final String name = "SPNEGO";
+      final String classname = AuthFilter.class.getName();
+      final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
+      Map<String, String> params = getAuthFilterParams(conf);
+      httpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params,
+          new String[]{pathSpec});
+      HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
+
+      // add webhdfs packages
+      httpServer.addJerseyResourcePackage(
+          NamenodeWebHdfsMethods.class.getPackage().getName()
+          + ";" + Param.class.getPackage().getName(), pathSpec);
       }
-    };
 
     boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
     if (certSSL) {
@@ -153,6 +119,38 @@ public class NameNodeHttpServer {
     httpServer.start();
     httpAddress = new InetSocketAddress(bindAddress.getAddress(), httpServer.getPort());
   }
+  
+  private Map<String, String> getAuthFilterParams(Configuration conf)
+      throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    String principalInConf = conf
+        .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
+    if (principalInConf != null && !principalInConf.isEmpty()) {
+      params
+          .put(
+              DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+              SecurityUtil.getServerPrincipal(principalInConf,
+                                              bindAddress.getHostName()));
+    } else if (UserGroupInformation.isSecurityEnabled()) {
+      HttpServer.LOG.error(
+          "WebHDFS and security are enabled, but configuration property '" +
+          DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
+          "' is not set.");
+    }
+    String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf,
+        DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
+    if (httpKeytab != null && !httpKeytab.isEmpty()) {
+      params.put(
+          DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
+          httpKeytab);
+    } else if (UserGroupInformation.isSecurityEnabled()) {
+      HttpServer.LOG.error(
+          "WebHDFS and security are enabled, but configuration property '" +
+          DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
+          "' is not set.");
+    }
+    return params;
+  }
 
 
   public void stop() throws Exception {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java Wed Aug 21 17:47:10 2013
@@ -188,6 +188,12 @@ public interface NameNodeMXBean {
    * @return the name journal status information, as a JSON string.
    */
   public String getNameJournalStatus();
+  
+  /**
+   * Get information about the transaction ID, including the last applied 
+   * transaction ID and the most recent checkpoint's transaction ID
+   */
+  public String getJournalTransactionInfo();
 
   /**
    * Gets the NN start time