You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by as...@apache.org on 2016/12/05 20:08:12 UTC

[1/6] hadoop git commit: HDFS-11181. Fuse wrapper has a typo. Contributed by Wei-Chiu Chuang.

Repository: hadoop
Updated Branches:
  refs/heads/YARN-5085 f885160f4 -> 8c4680852


HDFS-11181. Fuse wrapper has a typo. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c51bfd29
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c51bfd29
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c51bfd29

Branch: refs/heads/YARN-5085
Commit: c51bfd29cd1e6ec619742f2c47ebfc8bbfb231b6
Parents: f885160
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Mon Dec 5 08:44:40 2016 -0800
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Mon Dec 5 08:44:40 2016 -0800

----------------------------------------------------------------------
 .../src/main/native/fuse-dfs/fuse_dfs_wrapper.sh                   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c51bfd29/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh
index c52c5f9..d5bfd09 100755
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh
@@ -43,7 +43,7 @@ done < <(find "$HADOOP_HOME/hadoop-client" -name "*.jar" -print0)
 while IFS= read -r -d '' file
 do
   export CLASSPATH=$CLASSPATH:$file
-done < <(find "$HADOOP_HOME/hhadoop-hdfs-project" -name "*.jar" -print0)
+done < <(find "$HADOOP_HOME/hadoop-hdfs-project" -name "*.jar" -print0)
 
 export CLASSPATH=$HADOOP_CONF_DIR:$CLASSPATH
 export PATH=$FUSEDFS_PATH:$PATH


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[4/6] hadoop git commit: Revert "HDFS-11201. Spelling errors in the logging, help, assertions and exception messages. Contributed by Grant Sohn."

Posted by as...@apache.org.
Revert "HDFS-11201. Spelling errors in the logging, help, assertions and exception messages. Contributed by Grant Sohn."

This reverts commit b9522e86a55564c2ccb5ca3f1ca871965cbe74de.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b5cceaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b5cceaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b5cceaf

Branch: refs/heads/YARN-5085
Commit: 1b5cceaffbdde50a87ede81552dc380832db8e79
Parents: b9522e8
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Mon Dec 5 10:54:43 2016 -0800
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Mon Dec 5 10:54:43 2016 -0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java     | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java | 4 ++--
 .../main/java/org/apache/hadoop/lib/server/ServerException.java  | 2 +-
 .../java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java   | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/DFSUtil.java            | 2 +-
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java  | 2 +-
 .../hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java      | 2 +-
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java | 2 +-
 .../hadoop/hdfs/server/diskbalancer/command/QueryCommand.java    | 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java  | 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java  | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java   | 4 ++--
 .../server/namenode/web/resources/NamenodeWebHdfsMethods.java    | 2 +-
 13 files changed, 15 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index aabd6fd..5783f90 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1052,7 +1052,7 @@ public class DFSInputStream extends FSInputStream
             reader.getNetworkDistance(), nread);
         if (nread != len) {
           throw new IOException("truncated return from reader.read(): " +
-              "expected " + len + ", got " + nread);
+              "excpected " + len + ", got " + nread);
         }
         DFSClientFaultInjector.get().readFromDatanodeDelay();
         return;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index db064e4..51ad08f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -57,11 +57,11 @@ public class LongBitFormat implements Serializable {
   public long combine(long value, long record) {
     if (value < MIN) {
       throw new IllegalArgumentException(
-          "Illegal value: " + NAME + " = " + value + " < MIN = " + MIN);
+          "Illagal value: " + NAME + " = " + value + " < MIN = " + MIN);
     }
     if (value > MAX) {
       throw new IllegalArgumentException(
-          "Illegal value: " + NAME + " = " + value + " > MAX = " + MAX);
+          "Illagal value: " + NAME + " = " + value + " > MAX = " + MAX);
     }
     return (record & ~MASK) | (value << OFFSET);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
index fdca64e..e3759ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
@@ -38,7 +38,7 @@ public class ServerException extends XException {
     S04("Service [{0}] does not implement declared interface [{1}]"),
     S05("[{0}] is not a file"),
     S06("Could not load file [{0}], {1}"),
-    S07("Could not instantiate service class [{0}], {1}"),
+    S07("Could not instanciate service class [{0}], {1}"),
     S08("Could not load service classes, {0}"),
     S09("Could not set service [{0}] programmatically -server shutting down-, {1}"),
     S10("Service [{0}] requires service [{1}]"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
index 2d8f676..e26fac5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
@@ -130,7 +130,7 @@ class OpenFileCtxCache {
           }
           toEvict = openFileMap.remove(pairs.getKey());
           Preconditions.checkState(toEvict == pairs.getValue(),
-              "The deleted entry is not the same as oldest found.");
+              "The deleted entry is not the same as odlest found.");
         }
       }
       openFileMap.put(h, context);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 4664699..23166e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1364,7 +1364,7 @@ public class DFSUtil {
         DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT);
     HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
     if (policy == null) {
-      throw new HadoopIllegalArgumentException("Unrecognized value '"
+      throw new HadoopIllegalArgumentException("Unregonized value '"
           + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 05d538a..e60703b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2772,7 +2772,7 @@ public class BlockManager implements BlockStatsMXBean {
       throws IOException {
     for (ReportedBlockInfo rbi : rbis) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Processing previously queued message " + rbi);
+        LOG.debug("Processing previouly queued message " + rbi);
       }
       if (rbi.getReportedState() == null) {
         // This is a DELETE_BLOCK request

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index 69dc9f9..8323140 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -836,7 +836,7 @@ class BlockPoolSlice {
     } catch (Exception e) {
       // Any exception we need to revert back to read from disk
       // Log the error and return false
-      LOG.info("Exception occurred while reading the replicas cache file: "
+      LOG.info("Exception occured while reading the replicas cache file: "
           + replicaFile.getPath(), e );
       return false;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index d41f9c3..08564de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -1292,7 +1292,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
     try {
       fileNames = IOUtils.listDirectory(dir, BlockDirFilter.INSTANCE);
     } catch (IOException ioe) {
-      LOG.warn("Exception occurred while compiling report: ", ioe);
+      LOG.warn("Exception occured while compiling report: ", ioe);
       // Initiate a check on disk failure.
       dataset.datanode.checkDiskErrorAsync();
       // Ignore this directory and proceed.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
index 9e60e48..a8adcbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
@@ -97,7 +97,7 @@ public class QueryCommand extends Command {
     String header = "Query Plan queries a given data node about the " +
         "current state of disk balancer execution.\n\n";
 
-    String footer = "\nQuery command retrieves the plan ID and the current " +
+    String footer = "\nQuery command retrievs the plan ID and the current " +
         "running state. ";
 
     HelpFormatter helpFormatter = new HelpFormatter();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
index 41ec8e9..f2a1ee5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
@@ -134,7 +134,7 @@ final class FSDirTruncateOp {
       if (!onBlockBoundary) {
         // Open file for write, but don't log into edits
         long lastBlockDelta = file.computeFileSize() - newLength;
-        assert lastBlockDelta > 0 : "delta is 0 only if on block boundary";
+        assert lastBlockDelta > 0 : "delta is 0 only if on block bounday";
         truncateBlock = prepareFileForTruncate(fsn, iip, clientName,
             clientMachine, lastBlockDelta, null);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 11cdbc6..2990344 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -1082,7 +1082,7 @@ public class FSEditLogLoader {
       boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock(
           fsDir, path, iip, file, oldBlock);
       if (!removed && !(op instanceof UpdateBlocksOp)) {
-        throw new IOException("Trying to delete non-existent block " + oldBlock);
+        throw new IOException("Trying to delete non-existant block " + oldBlock);
       }
     } else if (newBlocks.length > oldBlocks.length) {
       final boolean isStriped = ecPolicy != null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index f79130d..d6dd8ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -1025,7 +1025,7 @@ public class NNStorage extends Storage implements Closeable,
     
     if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
       throw new InconsistentFSStateException(storage,
-          "Unexpected blockpoolID " + bpid + " . Expected " + blockpoolID);
+          "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
     }
     setBlockPoolID(bpid);
   }
@@ -1154,4 +1154,4 @@ public class NNStorage extends Storage implements Closeable,
       }
     }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 2c31cd9..107d4ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -219,7 +219,7 @@ public class NamenodeWebHdfsMethods {
       final String remoteAddr) throws IOException {
     FSNamesystem fsn = namenode.getNamesystem();
     if (fsn == null) {
-      throw new IOException("Namesystem has not been initialized yet.");
+      throw new IOException("Namesystem has not been intialized yet.");
     }
     final BlockManager bm = fsn.getBlockManager();
     


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[2/6] hadoop git commit: HADOOP-13847. KMSWebApp should close KeyProviderCryptoExtension. Contributed by John Zhuge.

Posted by as...@apache.org.
HADOOP-13847. KMSWebApp should close KeyProviderCryptoExtension. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/291df5c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/291df5c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/291df5c7

Branch: refs/heads/YARN-5085
Commit: 291df5c7fb713d5442ee29eb3f272127afb05a3c
Parents: c51bfd2
Author: Xiao Chen <xi...@apache.org>
Authored: Mon Dec 5 09:34:39 2016 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Mon Dec 5 09:35:17 2016 -0800

----------------------------------------------------------------------
 .../apache/hadoop/crypto/key/KeyProviderCryptoExtension.java  | 5 +++--
 .../org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java    | 7 ++++++-
 2 files changed, 9 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/291df5c7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
index 1ecd9f6..0543222 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
@@ -427,8 +427,9 @@ public class KeyProviderCryptoExtension extends
 
   @Override
   public void close() throws IOException {
-    if (getKeyProvider() != null) {
-      getKeyProvider().close();
+    KeyProvider provider = getKeyProvider();
+    if (provider != null && provider != this) {
+      provider.close();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/291df5c7/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index cd773dd..40ae19f 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -40,9 +40,9 @@ import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextListener;
 
 import java.io.File;
+import java.io.IOException;
 import java.net.URI;
 import java.net.URL;
-import java.util.List;
 
 @InterfaceAudience.Private
 public class KMSWebApp implements ServletContextListener {
@@ -215,6 +215,11 @@ public class KMSWebApp implements ServletContextListener {
 
   @Override
   public void contextDestroyed(ServletContextEvent sce) {
+    try {
+      keyProviderCryptoExtension.close();
+    } catch (IOException ioe) {
+      LOG.error("Error closing KeyProviderCryptoExtension", ioe);
+    }
     kmsAudit.shutdown();
     kmsAcls.stopReloader();
     jmxReporter.stop();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[3/6] hadoop git commit: HDFS-11201. Spelling errors in the logging, help, assertions and exception messages. Contributed by Grant Sohn.

Posted by as...@apache.org.
HDFS-11201. Spelling errors in the logging, help, assertions and exception messages. Contributed by Grant Sohn.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9522e86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9522e86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9522e86

Branch: refs/heads/YARN-5085
Commit: b9522e86a55564c2ccb5ca3f1ca871965cbe74de
Parents: 291df5c
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Mon Dec 5 09:37:12 2016 -0800
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Mon Dec 5 10:48:25 2016 -0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java     | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java | 4 ++--
 .../main/java/org/apache/hadoop/lib/server/ServerException.java  | 2 +-
 .../java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java   | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/DFSUtil.java            | 2 +-
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java  | 2 +-
 .../hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java      | 2 +-
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java | 2 +-
 .../hadoop/hdfs/server/diskbalancer/command/QueryCommand.java    | 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java  | 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java  | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java   | 4 ++--
 .../server/namenode/web/resources/NamenodeWebHdfsMethods.java    | 2 +-
 13 files changed, 15 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 5783f90..aabd6fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1052,7 +1052,7 @@ public class DFSInputStream extends FSInputStream
             reader.getNetworkDistance(), nread);
         if (nread != len) {
           throw new IOException("truncated return from reader.read(): " +
-              "excpected " + len + ", got " + nread);
+              "expected " + len + ", got " + nread);
         }
         DFSClientFaultInjector.get().readFromDatanodeDelay();
         return;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 51ad08f..db064e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -57,11 +57,11 @@ public class LongBitFormat implements Serializable {
   public long combine(long value, long record) {
     if (value < MIN) {
       throw new IllegalArgumentException(
-          "Illagal value: " + NAME + " = " + value + " < MIN = " + MIN);
+          "Illegal value: " + NAME + " = " + value + " < MIN = " + MIN);
     }
     if (value > MAX) {
       throw new IllegalArgumentException(
-          "Illagal value: " + NAME + " = " + value + " > MAX = " + MAX);
+          "Illegal value: " + NAME + " = " + value + " > MAX = " + MAX);
     }
     return (record & ~MASK) | (value << OFFSET);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
index e3759ce..fdca64e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
@@ -38,7 +38,7 @@ public class ServerException extends XException {
     S04("Service [{0}] does not implement declared interface [{1}]"),
     S05("[{0}] is not a file"),
     S06("Could not load file [{0}], {1}"),
-    S07("Could not instanciate service class [{0}], {1}"),
+    S07("Could not instantiate service class [{0}], {1}"),
     S08("Could not load service classes, {0}"),
     S09("Could not set service [{0}] programmatically -server shutting down-, {1}"),
     S10("Service [{0}] requires service [{1}]"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
index e26fac5..2d8f676 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
@@ -130,7 +130,7 @@ class OpenFileCtxCache {
           }
           toEvict = openFileMap.remove(pairs.getKey());
           Preconditions.checkState(toEvict == pairs.getValue(),
-              "The deleted entry is not the same as odlest found.");
+              "The deleted entry is not the same as oldest found.");
         }
       }
       openFileMap.put(h, context);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 23166e2..4664699 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1364,7 +1364,7 @@ public class DFSUtil {
         DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT);
     HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
     if (policy == null) {
-      throw new HadoopIllegalArgumentException("Unregonized value '"
+      throw new HadoopIllegalArgumentException("Unrecognized value '"
           + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e60703b..05d538a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2772,7 +2772,7 @@ public class BlockManager implements BlockStatsMXBean {
       throws IOException {
     for (ReportedBlockInfo rbi : rbis) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Processing previouly queued message " + rbi);
+        LOG.debug("Processing previously queued message " + rbi);
       }
       if (rbi.getReportedState() == null) {
         // This is a DELETE_BLOCK request

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index 8323140..69dc9f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -836,7 +836,7 @@ class BlockPoolSlice {
     } catch (Exception e) {
       // Any exception we need to revert back to read from disk
       // Log the error and return false
-      LOG.info("Exception occured while reading the replicas cache file: "
+      LOG.info("Exception occurred while reading the replicas cache file: "
           + replicaFile.getPath(), e );
       return false;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 08564de..d41f9c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -1292,7 +1292,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
     try {
       fileNames = IOUtils.listDirectory(dir, BlockDirFilter.INSTANCE);
     } catch (IOException ioe) {
-      LOG.warn("Exception occured while compiling report: ", ioe);
+      LOG.warn("Exception occurred while compiling report: ", ioe);
       // Initiate a check on disk failure.
       dataset.datanode.checkDiskErrorAsync();
       // Ignore this directory and proceed.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
index a8adcbd..9e60e48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
@@ -97,7 +97,7 @@ public class QueryCommand extends Command {
     String header = "Query Plan queries a given data node about the " +
         "current state of disk balancer execution.\n\n";
 
-    String footer = "\nQuery command retrievs the plan ID and the current " +
+    String footer = "\nQuery command retrieves the plan ID and the current " +
         "running state. ";
 
     HelpFormatter helpFormatter = new HelpFormatter();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
index f2a1ee5..41ec8e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
@@ -134,7 +134,7 @@ final class FSDirTruncateOp {
       if (!onBlockBoundary) {
         // Open file for write, but don't log into edits
         long lastBlockDelta = file.computeFileSize() - newLength;
-        assert lastBlockDelta > 0 : "delta is 0 only if on block bounday";
+        assert lastBlockDelta > 0 : "delta is 0 only if on block boundary";
         truncateBlock = prepareFileForTruncate(fsn, iip, clientName,
             clientMachine, lastBlockDelta, null);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 2990344..11cdbc6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -1082,7 +1082,7 @@ public class FSEditLogLoader {
       boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock(
           fsDir, path, iip, file, oldBlock);
       if (!removed && !(op instanceof UpdateBlocksOp)) {
-        throw new IOException("Trying to delete non-existant block " + oldBlock);
+        throw new IOException("Trying to delete non-existent block " + oldBlock);
       }
     } else if (newBlocks.length > oldBlocks.length) {
       final boolean isStriped = ecPolicy != null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index d6dd8ee..f79130d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -1025,7 +1025,7 @@ public class NNStorage extends Storage implements Closeable,
     
     if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
       throw new InconsistentFSStateException(storage,
-          "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
+          "Unexpected blockpoolID " + bpid + " . Expected " + blockpoolID);
     }
     setBlockPoolID(bpid);
   }
@@ -1154,4 +1154,4 @@ public class NNStorage extends Storage implements Closeable,
       }
     }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 107d4ed..2c31cd9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -219,7 +219,7 @@ public class NamenodeWebHdfsMethods {
       final String remoteAddr) throws IOException {
     FSNamesystem fsn = namenode.getNamesystem();
     if (fsn == null) {
-      throw new IOException("Namesystem has not been intialized yet.");
+      throw new IOException("Namesystem has not been initialized yet.");
     }
     final BlockManager bm = fsn.getBlockManager();
     


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[5/6] hadoop git commit: YARN-5559. Analyse 2.8.0/3.0.0 jdiff reports and fix any issues. Contributed by Akira Ajisaka & Wangda Tan

Posted by as...@apache.org.
YARN-5559. Analyse 2.8.0/3.0.0 jdiff reports and fix any issues. Contributed by  Akira Ajisaka & Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43ebff2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43ebff2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43ebff2e

Branch: refs/heads/YARN-5085
Commit: 43ebff2e354142bddcb42755766a965ae8a503a6
Parents: 1b5ccea
Author: Jian He <ji...@apache.org>
Authored: Mon Dec 5 11:39:34 2016 -0800
Committer: Jian He <ji...@apache.org>
Committed: Mon Dec 5 11:39:34 2016 -0800

----------------------------------------------------------------------
 .../GetClusterNodeLabelsResponse.java           | 50 ++++++++++++++++----
 .../yarn/client/api/impl/YarnClientImpl.java    |  2 +-
 .../pb/GetClusterNodeLabelsResponsePBImpl.java  | 41 ++++++++++++++--
 .../yarn/security/ContainerTokenIdentifier.java | 25 ++++++++++
 .../state/InvalidStateTransitionException.java  | 22 ++-------
 .../state/InvalidStateTransitonException.java   | 19 ++++++--
 .../resourcemanager/TestClientRMService.java    |  4 +-
 7 files changed, 125 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ebff2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeLabelsResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeLabelsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeLabelsResponse.java
index cf6e683..cb2ccfb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeLabelsResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeLabelsResponse.java
@@ -18,7 +18,9 @@
 
 package org.apache.hadoop.yarn.api.protocolrecords;
 
+import java.util.ArrayList;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
@@ -28,18 +30,48 @@ import org.apache.hadoop.yarn.util.Records;
 @Public
 @Evolving
 public abstract class GetClusterNodeLabelsResponse {
+  /**
+   * Creates a new instance.
+   *
+   * @param labels Node labels
+   * @return response
+   * @deprecated Use {@link #newInstance(List)} instead.
+   */
+  @Deprecated
+  public static GetClusterNodeLabelsResponse newInstance(Set<String> labels) {
+    List<NodeLabel> list = new ArrayList<>();
+    for (String label : labels) {
+      list.add(NodeLabel.newInstance(label));
+    }
+    return newInstance(list);
+  }
+
   public static GetClusterNodeLabelsResponse newInstance(List<NodeLabel> labels) {
-    GetClusterNodeLabelsResponse request =
+    GetClusterNodeLabelsResponse response =
         Records.newRecord(GetClusterNodeLabelsResponse.class);
-    request.setNodeLabels(labels);
-    return request;
+    response.setNodeLabelList(labels);
+    return response;
   }
 
-  @Public
-  @Evolving
-  public abstract void setNodeLabels(List<NodeLabel> labels);
+  public abstract void setNodeLabelList(List<NodeLabel> labels);
+
+  public abstract List<NodeLabel> getNodeLabelList();
+
+  /**
+   * Set node labels to the response.
+   *
+   * @param labels Node labels
+   * @deprecated Use {@link #setNodeLabelList(List)} instead.
+   */
+  @Deprecated
+  public abstract void setNodeLabels(Set<String> labels);
 
-  @Public
-  @Evolving
-  public abstract List<NodeLabel> getNodeLabels();
+  /**
+   * Get node labels of the response.
+   *
+   * @return Node labels
+   * @deprecated Use {@link #getNodeLabelList()} instead.
+   */
+  @Deprecated
+  public abstract Set<String> getNodeLabels();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ebff2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index a0f9678..50f1b490a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -899,7 +899,7 @@ public class YarnClientImpl extends YarnClient {
   @Override
   public List<NodeLabel> getClusterNodeLabels() throws YarnException, IOException {
     return rmClient.getClusterNodeLabels(
-        GetClusterNodeLabelsRequest.newInstance()).getNodeLabels();
+        GetClusterNodeLabelsRequest.newInstance()).getNodeLabelList();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ebff2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodeLabelsResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodeLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodeLabelsResponsePBImpl.java
index f569fb2..227abe9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodeLabelsResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodeLabelsResponsePBImpl.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
@@ -46,7 +48,7 @@ public class GetClusterNodeLabelsResponsePBImpl extends
     viaProto = true;
   }
 
-  public GetClusterNodeLabelsResponseProto getProto() {
+  public synchronized GetClusterNodeLabelsResponseProto getProto() {
     mergeLocalToProto();
     proto = viaProto ? proto : builder.build();
     viaProto = true;
@@ -101,14 +103,43 @@ public class GetClusterNodeLabelsResponsePBImpl extends
   }
 
   @Override
-  public void setNodeLabels(List<NodeLabel> updatedNodeLabels) {
+  public synchronized void setNodeLabelList(List<NodeLabel> nodeLabels) {
     maybeInitBuilder();
     this.updatedNodeLabels = new ArrayList<>();
-    if (updatedNodeLabels == null) {
+    if (nodeLabels == null) {
       builder.clearNodeLabels();
       return;
     }
-    this.updatedNodeLabels.addAll(updatedNodeLabels);
+    this.updatedNodeLabels.addAll(nodeLabels);
+  }
+
+  /**
+   * @deprecated Use {@link #getNodeLabelList()} instead.
+   */
+  @Override
+  @Deprecated
+  public synchronized Set<String> getNodeLabels() {
+    Set<String> set = new HashSet<>();
+    List<NodeLabel> labelList = getNodeLabelList();
+    if (labelList != null) {
+      for (NodeLabel label : labelList) {
+        set.add(label.getName());
+      }
+    }
+    return set;
+  }
+
+  /**
+   * @deprecated Use {@link #setNodeLabelList(List)} instead.
+   */
+  @Override
+  @Deprecated
+  public void setNodeLabels(Set<String> labels) {
+    List<NodeLabel> list = new ArrayList<>();
+    for (String s : labels) {
+      list.add(NodeLabel.newInstance(s));
+    }
+    setNodeLabelList(list);
   }
 
   private void initLocalNodeLabels() {
@@ -121,7 +152,7 @@ public class GetClusterNodeLabelsResponsePBImpl extends
   }
 
   @Override
-  public List<NodeLabel> getNodeLabels() {
+  public synchronized List<NodeLabel> getNodeLabelList() {
     if (this.updatedNodeLabels != null) {
       return this.updatedNodeLabels;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ebff2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
index f8e9463..4b34998 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
@@ -73,6 +73,31 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
         CommonNodeLabelsManager.NO_LABEL, ContainerType.TASK);
   }
 
+  /**
+   * Creates a instance.
+   *
+   * @param appSubmitter appSubmitter
+   * @param containerID container ID
+   * @param creationTime creation time
+   * @param expiryTimeStamp expiry timestamp
+   * @param hostName hostname
+   * @param logAggregationContext log aggregation context
+   * @param masterKeyId master key ID
+   * @param priority priority
+   * @param r resource needed by the container
+   * @param rmIdentifier ResourceManager identifier
+   * @deprecated Use one of the other constructors instead.
+   */
+  @Deprecated
+  public ContainerTokenIdentifier(ContainerId containerID, String hostName,
+      String appSubmitter, Resource r, long expiryTimeStamp, int masterKeyId,
+      long rmIdentifier, Priority priority, long creationTime,
+      LogAggregationContext logAggregationContext) {
+    this(containerID, hostName, appSubmitter, r, expiryTimeStamp, masterKeyId,
+        rmIdentifier, priority, creationTime, logAggregationContext,
+        CommonNodeLabelsManager.NO_LABEL);
+  }
+
   public ContainerTokenIdentifier(ContainerId containerID, String hostName,
       String appSubmitter, Resource r, long expiryTimeStamp, int masterKeyId,
       long rmIdentifier, Priority priority, long creationTime,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ebff2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitionException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitionException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitionException.java
index d10902a..51eafc9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitionException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitionException.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.state;
 
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
-import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
 /**
  * The exception that happens when you call invalid state transition.
@@ -28,24 +27,13 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
  */
 @Public
 @Evolving
-public class InvalidStateTransitionException extends YarnRuntimeException {
+@SuppressWarnings("deprecation")
+public class InvalidStateTransitionException extends
+    InvalidStateTransitonException {
 
-  private static final long serialVersionUID = -6188669113571351684L;
-  private Enum<?> currentState;
-  private Enum<?> event;
+  private static final long serialVersionUID = 8610511635996283691L;
 
   public InvalidStateTransitionException(Enum<?> currentState, Enum<?> event) {
-    super("Invalid event: " + event + " at " + currentState);
-    this.currentState = currentState;
-    this.event = event;
+    super(currentState, event);
   }
-
-  public Enum<?> getCurrentState() {
-    return currentState;
-  }
-
-  public Enum<?> getEvent() {
-    return event;
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ebff2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java
index eeb1b97..82f46cc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java
@@ -20,20 +20,31 @@ package org.apache.hadoop.yarn.state;
 
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
 /** @deprecated Use {@link InvalidStateTransitionException} instead. */
 
 @Public
 @Evolving
 @Deprecated
-public class InvalidStateTransitonException extends
-    InvalidStateTransitionException {
+public class InvalidStateTransitonException extends YarnRuntimeException {
 
-  private static final long serialVersionUID = 8610511635996283691L;
+  private static final long serialVersionUID = -6188669113571351684L;
+  private Enum<?> currentState;
+  private Enum<?> event;
 
   public InvalidStateTransitonException(Enum<?> currentState, Enum<?> event) {
-    super(currentState, event);
+    super("Invalid event: " + event + " at " + currentState);
+    this.currentState = currentState;
+    this.event = event;
   }
 
+  public Enum<?> getCurrentState() {
+    return currentState;
+  }
+
+  public Enum<?> getEvent() {
+    return event;
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ebff2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index f35efa0..12cdcf1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -1534,7 +1534,7 @@ public class TestClientRMService {
     // Get node labels collection
     GetClusterNodeLabelsResponse response = client
         .getClusterNodeLabels(GetClusterNodeLabelsRequest.newInstance());
-    Assert.assertTrue(response.getNodeLabels().containsAll(
+    Assert.assertTrue(response.getNodeLabelList().containsAll(
         Arrays.asList(labelX, labelY)));
 
     // Get node labels mapping
@@ -1605,7 +1605,7 @@ public class TestClientRMService {
     // Get node labels collection
     GetClusterNodeLabelsResponse response = client
         .getClusterNodeLabels(GetClusterNodeLabelsRequest.newInstance());
-    Assert.assertTrue(response.getNodeLabels().containsAll(
+    Assert.assertTrue(response.getNodeLabelList().containsAll(
         Arrays.asList(labelX, labelY, labelZ)));
 
     // Get labels to nodes mapping


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[6/6] hadoop git commit: HDFS-11094. Send back HAState along with NamespaceInfo during a versionRequest as an optional parameter. Contributed by Eric Badger

Posted by as...@apache.org.
HDFS-11094. Send back HAState along with NamespaceInfo during a versionRequest as an optional parameter. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c468085
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c468085
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c468085

Branch: refs/heads/YARN-5085
Commit: 8c4680852b20ad0e65e77dd123c9ba5bb6f2fa39
Parents: 43ebff2
Author: Mingliang Liu <li...@apache.org>
Authored: Mon Dec 5 11:34:13 2016 -0800
Committer: Mingliang Liu <li...@apache.org>
Committed: Mon Dec 5 11:48:58 2016 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 76 +++++++++++++-------
 .../hdfs/server/datanode/BPOfferService.java    | 10 ++-
 .../hdfs/server/datanode/BPServiceActor.java    |  4 +-
 .../hdfs/server/namenode/FSNamesystem.java      |  8 ++-
 .../hdfs/server/protocol/NamespaceInfo.java     | 26 +++++++
 .../hadoop-hdfs/src/main/proto/HdfsServer.proto |  2 +
 .../server/datanode/TestBPOfferService.java     | 31 ++++++++
 .../hdfs/server/namenode/TestFSNamesystem.java  | 21 ++++++
 8 files changed, 148 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c468085/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 78371f5..1e6d882 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -26,7 +26,7 @@ import com.google.protobuf.ByteString;
 
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -338,7 +338,8 @@ public class PBHelper {
     StorageInfoProto storage = info.getStorageInfo();
     return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(),
         info.getBlockPoolID(), storage.getCTime(), info.getBuildVersion(),
-        info.getSoftwareVersion(), info.getCapabilities());
+        info.getSoftwareVersion(), info.getCapabilities(),
+        convert(info.getState()));
   }
 
   public static NamenodeCommand convert(NamenodeCommandProto cmd) {
@@ -744,43 +745,68 @@ public class PBHelper {
   }
   
   public static NamespaceInfoProto convert(NamespaceInfo info) {
-    return NamespaceInfoProto.newBuilder()
-        .setBlockPoolID(info.getBlockPoolID())
+    NamespaceInfoProto.Builder builder = NamespaceInfoProto.newBuilder();
+    builder.setBlockPoolID(info.getBlockPoolID())
         .setBuildVersion(info.getBuildVersion())
         .setUnused(0)
         .setStorageInfo(PBHelper.convert((StorageInfo)info))
         .setSoftwareVersion(info.getSoftwareVersion())
-        .setCapabilities(info.getCapabilities())
-        .build();
+        .setCapabilities(info.getCapabilities());
+    HAServiceState state = info.getState();
+    if(state != null) {
+      builder.setState(convert(info.getState()));
+    }
+    return builder.build();
   }
 
-  public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) {
-    if (s == null) return null;
-    switch (s.getState()) {
+  public static HAServiceState convert(HAServiceStateProto s) {
+    if (s == null) {
+      return null;
+    }
+    switch (s) {
+    case INITIALIZING:
+      return HAServiceState.INITIALIZING;
     case ACTIVE:
-      return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid());
+      return HAServiceState.ACTIVE;
     case STANDBY:
-      return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid());
+      return HAServiceState.STANDBY;
     default:
-      throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState());
+      throw new IllegalArgumentException("Unexpected HAServiceStateProto:"
+          + s);
     }
   }
 
+  public static HAServiceStateProto convert(HAServiceState s) {
+    if (s == null) {
+      return null;
+    }
+    switch (s) {
+    case INITIALIZING:
+      return HAServiceStateProto.INITIALIZING;
+    case ACTIVE:
+      return HAServiceStateProto.ACTIVE;
+    case STANDBY:
+      return HAServiceStateProto.STANDBY;
+    default:
+      throw new IllegalArgumentException("Unexpected HAServiceState:"
+          + s);
+    }
+  }
+
+  public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) {
+    if (s == null) {
+      return null;
+    }
+    return new NNHAStatusHeartbeat(convert(s.getState()), s.getTxid());
+  }
+
   public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) {
-    if (hb == null) return null;
-    NNHAStatusHeartbeatProto.Builder builder =
-      NNHAStatusHeartbeatProto.newBuilder();
-    switch (hb.getState()) {
-      case ACTIVE:
-        builder.setState(HAServiceProtocolProtos.HAServiceStateProto.ACTIVE);
-        break;
-      case STANDBY:
-        builder.setState(HAServiceProtocolProtos.HAServiceStateProto.STANDBY);
-        break;
-      default:
-        throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" +
-            hb.getState());
+    if (hb == null) {
+      return null;
     }
+    NNHAStatusHeartbeatProto.Builder builder =
+        NNHAStatusHeartbeatProto.newBuilder();
+    builder.setState(convert(hb.getState()));
     builder.setTxid(hb.getTxId());
     return builder.build();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c468085/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 00102eb..00e6b3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -307,8 +307,16 @@ class BPOfferService {
    * verifies that this namespace matches (eg to prevent a misconfiguration
    * where a StandbyNode from a different cluster is specified)
    */
-  void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOException {
+  void verifyAndSetNamespaceInfo(BPServiceActor actor, NamespaceInfo nsInfo)
+    throws IOException {
     writeLock();
+
+    if(nsInfo.getState() == HAServiceState.ACTIVE
+        && bpServiceToActive == null) {
+      LOG.info("Acknowledging ACTIVE Namenode during handshake" + actor);
+      bpServiceToActive = actor;
+    }
+
     try {
       if (this.bpNSInfo == null) {
         this.bpNSInfo = nsInfo;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c468085/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index f3247fc..dffe14f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -269,11 +269,11 @@ class BPServiceActor implements Runnable {
     // First phase of the handshake with NN - get the namespace
     // info.
     NamespaceInfo nsInfo = retrieveNamespaceInfo();
-    
+
     // Verify that this matches the other NN in this HA pair.
     // This also initializes our block pool in the DN if we are
     // the first NN connection for this BP.
-    bpos.verifyAndSetNamespaceInfo(nsInfo);
+    bpos.verifyAndSetNamespaceInfo(this, nsInfo);
     
     // Second phase of the handshake with the NN.
     register(nsInfo);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c468085/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 8a750a0..90fb924 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1594,7 +1594,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   NamespaceInfo unprotectedGetNamespaceInfo() {
     return new NamespaceInfo(getFSImage().getStorage().getNamespaceID(),
         getClusterId(), getBlockPoolId(),
-        getFSImage().getStorage().getCTime());
+        getFSImage().getStorage().getCTime(), getState());
   }
 
   /**
@@ -4531,12 +4531,16 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       return 0;
     }
   }
-  
+
   @Metric
   public int getBlockCapacity() {
     return blockManager.getCapacity();
   }
 
+  public HAServiceState getState() {
+    return haContext == null ? null : haContext.getState().getServiceState();
+  }
+
   @Override // FSNamesystemMBean
   public String getFSState() {
     return isInSafeMode() ? "safeMode" : "Operational";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c468085/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
index 90d0aac..66ce9ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
@@ -44,6 +45,7 @@ public class NamespaceInfo extends StorageInfo {
   String blockPoolID = "";    // id of the block pool
   String softwareVersion;
   long capabilities;
+  HAServiceState state;
 
   // only authoritative on the server-side to determine advertisement to
   // clients.  enum will update the supported values
@@ -88,6 +90,14 @@ public class NamespaceInfo extends StorageInfo {
         CAPABILITIES_SUPPORTED);
   }
 
+  public NamespaceInfo(int nsID, String clusterID, String bpID,
+      long cT, String buildVersion, String softwareVersion,
+      long capabilities, HAServiceState st) {
+    this(nsID, clusterID, bpID, cT, buildVersion, softwareVersion,
+        capabilities);
+    this.state = st;
+  }
+
   // for use by server and/or client
   public NamespaceInfo(int nsID, String clusterID, String bpID,
       long cT, String buildVersion, String softwareVersion,
@@ -105,6 +115,13 @@ public class NamespaceInfo extends StorageInfo {
     this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(),
         VersionInfo.getVersion());
   }
+
+  public NamespaceInfo(int nsID, String clusterID, String bpID,
+      long cT, HAServiceState st) {
+    this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(),
+        VersionInfo.getVersion());
+    this.state = st;
+  }
   
   public long getCapabilities() {
     return capabilities;
@@ -115,6 +132,11 @@ public class NamespaceInfo extends StorageInfo {
     this.capabilities = capabilities;
   }
 
+  @VisibleForTesting
+  public void setState(HAServiceState state) {
+    this.state = state;
+  }
+
   public boolean isCapabilitySupported(Capability capability) {
     Preconditions.checkArgument(capability != Capability.UNKNOWN,
         "cannot test for unknown capability");
@@ -134,6 +156,10 @@ public class NamespaceInfo extends StorageInfo {
     return softwareVersion;
   }
 
+  public HAServiceState getState() {
+    return state;
+  }
+
   @Override
   public String toString(){
     return super.toString() + ";bpid=" + blockPoolID;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c468085/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
index 910e03b..d7deebf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
@@ -32,6 +32,7 @@ option java_generate_equals_and_hash = true;
 package hadoop.hdfs;
 
 import "hdfs.proto";
+import "HAServiceProtocol.proto";
 
 /**
  * Block access token information
@@ -101,6 +102,7 @@ message NamespaceInfoProto {
   required StorageInfoProto storageInfo = 4;// Node information
   required string softwareVersion = 5;      // Software version number (e.g. 2.0.0)
   optional uint64 capabilities = 6 [default = 0]; // feature flags
+  optional hadoop.common.HAServiceStateProto state = 7;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c468085/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index 2d50c75..f8f0a3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 
@@ -799,4 +800,34 @@ public class TestBPOfferService {
     }
     return -1;
   }
+
+   /*
+    *
+    */
+  @Test
+  public void testNNHAStateUpdateFromVersionRequest() throws Exception {
+    final BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
+    BPServiceActor actor = bpos.getBPServiceActors().get(0);
+    bpos.start();
+    waitForInitialization(bpos);
+    // Should start with neither NN as active.
+    assertNull(bpos.getActiveNN());
+
+    // getNamespaceInfo() will not include HAServiceState
+    NamespaceInfo nsInfo = mockNN1.versionRequest();
+    bpos.verifyAndSetNamespaceInfo(actor, nsInfo);
+
+    assertNull(bpos.getActiveNN());
+
+    // Change mock so getNamespaceInfo() will include HAServiceState
+    Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0,
+        HAServiceState.ACTIVE)).when(mockNN1).versionRequest();
+
+    // Update the bpos NamespaceInfo
+    nsInfo = mockNN1.versionRequest();
+    bpos.verifyAndSetNamespaceInfo(actor, nsInfo);
+
+    assertNotNull(bpos.getActiveNN());
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c468085/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index f02c679..6a0dd6f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -33,6 +33,7 @@ import java.util.Collection;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.junit.After;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -155,6 +157,25 @@ public class TestFSNamesystem {
   }
 
   @Test
+  public void testHAStateInNamespaceInfo() throws IOException {
+    Configuration conf = new Configuration();
+
+    FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
+    FSImage fsImage = Mockito.mock(FSImage.class);
+    Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
+    NNStorage nnStorage = Mockito.mock(NNStorage.class);
+    Mockito.when(fsImage.getStorage()).thenReturn(nnStorage);
+
+    FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
+    FSNamesystem fsn = Mockito.spy(fsNamesystem);
+    Mockito.when(fsn.getState()).thenReturn(
+        HAServiceProtocol.HAServiceState.ACTIVE);
+
+    NamespaceInfo nsInfo = fsn.unprotectedGetNamespaceInfo();
+    assertNotNull(nsInfo.getState());
+  }
+
+  @Test
   public void testReset() throws Exception {
     Configuration conf = new Configuration();
     FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org