You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by as...@apache.org on 2015/12/06 08:13:03 UTC

[01/38] hadoop git commit: HADOOP-11505. Various native parts use bswap incorrectly and unportably (Alan Burlison via aw)

Repository: hadoop
Updated Branches:
  refs/heads/yarn-2877 9f256d1d7 -> 742632e34


HADOOP-11505. Various native parts use bswap incorrectly and unportably (Alan Burlison via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6725e7f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6725e7f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6725e7f1

Branch: refs/heads/yarn-2877
Commit: 6725e7f1beb96177b0b59a6082a05869aab2e37b
Parents: 9f256d1
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Nov 27 18:25:45 2015 -0800
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Fri Nov 27 18:25:45 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../hadoop-common/src/CMakeLists.txt            | 125 +++++++++++++++++++
 .../src/org/apache/hadoop/util/bulk_crc32.c     |  17 +--
 .../src/CMakeLists.txt                          |   1 +
 .../mapred/nativetask/INativeComparable.java    |   4 +-
 .../src/main/native/src/NativeTask.h            |   9 --
 .../src/main/native/src/codec/BlockCodec.cc     |   4 +-
 .../src/main/native/src/codec/Lz4Codec.cc       |   4 +-
 .../src/main/native/src/codec/SnappyCodec.cc    |   4 +-
 .../main/native/src/handler/CombineHandler.cc   |  15 +--
 .../src/handler/MCollectorOutputHandler.cc      |  10 +-
 .../src/handler/MCollectorOutputHandler.h       |   2 -
 .../src/main/native/src/lib/Buffers.h           |  13 +-
 .../src/main/native/src/lib/IFile.cc            |   4 +-
 .../src/main/native/src/lib/IFile.h             |   4 +-
 .../main/native/src/lib/NativeObjectFactory.cc  |  16 +--
 .../src/main/native/src/lib/SpillInfo.cc        |   4 +-
 .../src/main/native/src/lib/commons.h           |   1 +
 .../src/main/native/src/lib/primitives.h        |  50 ++------
 .../src/main/native/src/util/WritableUtils.cc   |  26 ++--
 .../src/main/native/test/TestIFile.cc           |   2 +-
 .../src/main/native/test/TestSort.cc            |   8 +-
 .../src/main/native/test/lib/TestKVBuffer.cc    |   4 +-
 .../native/test/lib/TestMemBlockIterator.cc     |   2 +-
 .../src/main/native/test/lib/TestMemoryBlock.cc |   6 +-
 .../main/native/test/lib/TestPartitionBucket.cc |  26 ++--
 26 files changed, 225 insertions(+), 139 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7cdf21b..4f35432 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -537,6 +537,9 @@ Trunk (Unreleased)
 
     HADOOP-12553. [JDK8] Fix javadoc error caused by illegal tag. (aajisaka)
 
+    HADOOP-11505. Various native parts use bswap incorrectly and unportably
+    (Alan Burlison via aw)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-common-project/hadoop-common/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
index 63bb773..a8762d5 100644
--- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt
+++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
@@ -41,6 +41,131 @@ endif()
 # Configure JNI.
 include(HadoopJNI)
 
+#
+# Endian configuration, as per http://austingroupbugs.net/view.php?id=162#c665
+#
+
+# Work out the endianness, set header macro values.
+include(TestBigEndian)
+include(CheckIncludeFile)
+include(CheckSymbolExists)
+test_big_endian(_bigendian)
+if(_bigendian)
+  set(HADOOP_BYTE_ORDER "HADOOP_BIG_ENDIAN")
+else()
+  set(HADOOP_BYTE_ORDER "HADOOP_LITTLE_ENDIAN")
+endif()
+
+# Linux, NetBSD, FreeBSD and OpenBSD all provide htoXXX definitions in endian.h or sys/endian.h.
+check_include_file("endian.h" _endian_h)
+if (_endian_h)
+  set(HADOOP_ENDIAN_H "endian.h")
+else()
+  check_include_file("sys/endian.h" _sys_endian_h)
+  if (_sys_endian_h)
+    set(HADOOP_ENDIAN_H "sys/endian.h")
+  endif()
+endif()
+if(DEFINED HADOOP_ENDIAN_H)
+check_symbol_exists("be64toh" ${HADOOP_ENDIAN_H} _be64toh)
+  if( _be64toh)
+    set(HADOOP_HTOBE16 "htobe16")
+    set(HADOOP_HTOLE16 "htole16")
+    set(HADOOP_BE16TOH "be16toh")
+    set(HADOOP_LE16TOH "le16toh")
+    set(HADOOP_HTOBE32 "htobe32")
+    set(HADOOP_HTOLE32 "htole32")
+    set(HADOOP_BE32TOH "be32toh")
+    set(HADOOP_LE32TOH "le32toh")
+    set(HADOOP_HTOBE64 "htobe64")
+    set(HADOOP_HTOLE64 "htole64")
+    set(HADOOP_BE64TOH "be64toh")
+    set(HADOOP_LE64TOH "le64toh")
+    set(_have_endian TRUE)
+    unset(_be64toh)
+  else()
+    message(FATAL_ERROR "endian.h located but doesn't contain be64toh")
+  endif()
+endif()
+
+# Solaris doesn't provide htoXXX, we have to provide alternatives.
+if(NOT _have_endian)
+  check_include_file("sys/byteorder.h" _sys_byteorder_h)
+  if(_sys_byteorder_h)
+    set(HADOOP_ENDIAN_H "sys/byteorder.h")
+    check_symbol_exists("BSWAP_64" ${HADOOP_ENDIAN_H} _bswap_64)
+  endif()
+  if(_sys_byteorder_h AND _bswap_64)
+    if(_bigendian)
+      set(HADOOP_HTOBE16 "")
+      set(HADOOP_HTOLE16 "BSWAP_16")
+      set(HADOOP_BE16TOH "")
+      set(HADOOP_LE16TOH "BSWAP_16")
+      set(HADOOP_HTOBE32 "")
+      set(HADOOP_HTOLE32 "BSWAP_32")
+      set(HADOOP_BE32TOH "")
+      set(HADOOP_LE32TOH "BSWAP_32")
+      set(HADOOP_HTOBE64 "")
+      set(HADOOP_HTOLE64 "BSWAP_64")
+      set(HADOOP_BE64TOH "")
+      set(HADOOP_LE64TOH "BSWAP_64")
+    else()
+      set(HADOOP_HTOBE16 "BSWAP_16")
+      set(HADOOP_HTOLE16 "")
+      set(HADOOP_BE16TOH "BSWAP_16")
+      set(HADOOP_LE16TOH "")
+      set(HADOOP_HTOBE32 "BSWAP_32")
+      set(HADOOP_HTOLE32 "")
+      set(HADOOP_BE32TOH "BSWAP_32")
+      set(HADOOP_LE32TOH "")
+      set(HADOOP_HTOBE64 "BSWAP_64")
+      set(HADOOP_HTOLE64 "")
+      set(HADOOP_BE64TOH "BSWAP_64")
+      set(HADOOP_LE64TOH "")
+    endif()
+    set(_have_endian TRUE)
+    unset(_sys_byteorder_h)
+    unset(_bswap_64)
+  endif()
+endif()
+
+# OSX uses libkern/OSByteOrder.h and OSSwapXtoY.
+if(NOT _have_endian)
+  check_include_file("libkern/OSByteOrder.h" _libkern_osbyteorder_h)
+  if(_libkern_osbyteorder_h)
+    set(HADOOP_ENDIAN_H "libkern/OSByteOrder.h")
+    check_symbol_exists("OSSwapHostToLittleInt64" ${HADOOP_ENDIAN_H} _osswaphosttolittleint64)
+  endif()
+  if(_libkern_osbyteorder_h AND _osswaphosttolittleint64)
+    set(HADOOP_HTOBE16 "OSSwapHostToBigInt16")
+    set(HADOOP_HTOLE16 "OSSwapHostToLittleInt16")
+    set(HADOOP_BE16TOH "OSSwapBigToHostInt16")
+    set(HADOOP_LE16TOH "OSSwapLittleToHostInt16")
+    set(HADOOP_HTOBE32 "OSSwapHostToBigInt32")
+    set(HADOOP_HTOLE32 "OSSwapHostToLittleInt32")
+    set(HADOOP_BE32TOH "OSSwapBigToHostInt32")
+    set(HADOOP_LE32TOH "OSSwapLittleToHostInt32")
+    set(HADOOP_HTOBE64 "OSSwapHostToBigInt64")
+    set(HADOOP_HTOLE64 "OSSwapHostToLittleInt64")
+    set(HADOOP_BE64TOH "OSSwapBigToHostInt64")
+    set(HADOOP_LE64TOH "OSSwapLittleToHostInt64")
+    set(_have_endian TRUE)
+    unset(_libkern_osbyteorder_h)
+    unset(_osswaphosttolittleint64)
+  endif()
+endif()
+
+# Bail if we don't know the endian definitions for this platform.
+if(NOT _have_endian)
+  message(FATAL_ERROR "Can't provide endianness definitions for this platform")
+endif()
+
+# Configure the hadoop_endian.h header file.
+configure_file(${CMAKE_SOURCE_DIR}/hadoop_endian.h.cmake ${CMAKE_BINARY_DIR}/hadoop_endian.h)
+unset(_bigendian)
+unset(_have_endian)
+unset(HADOOP_ENDIAN_H)
+
 # Require zlib.
 set(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
 hadoop_set_find_shared_library_version("1")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
index b3bb699..988ccf2 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
@@ -37,6 +37,7 @@
 #include "crc32c_tables.h"
 #include "bulk_crc32.h"
 #include "gcc_optimizations.h"
+#include "hadoop_endian.h"
 
 #define CRC_INITIAL_VAL 0xffffffff
 
@@ -163,7 +164,7 @@ static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length) {
   for (li=0; li < running_length/8; li++) {
 	uint32_t term1;
 	uint32_t term2;
-    crc ^= *(uint32_t *)buf;
+    crc ^= hadoop_htole32(*(uint32_t *)buf);
     buf += 4;
     term1 = CRC32C_T8_7[crc & 0x000000FF] ^
         CRC32C_T8_6[(crc >> 8) & 0x000000FF];
@@ -171,10 +172,10 @@ static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length) {
     crc = term1 ^
         CRC32C_T8_5[term2 & 0x000000FF] ^ 
         CRC32C_T8_4[(term2 >> 8) & 0x000000FF];
-    term1 = CRC32C_T8_3[(*(uint32_t *)buf) & 0x000000FF] ^
-        CRC32C_T8_2[((*(uint32_t *)buf) >> 8) & 0x000000FF];
+    term1 = CRC32C_T8_3[hadoop_htole32(*(uint32_t *)buf) & 0x000000FF] ^
+        CRC32C_T8_2[(hadoop_htole32(*(uint32_t *)buf) >> 8) & 0x000000FF];
     
-    term2 = (*(uint32_t *)buf) >> 16;
+    term2 = hadoop_htole32((*(uint32_t *)buf)) >> 16;
     crc =  crc ^ 
         term1 ^    
         CRC32C_T8_1[term2  & 0x000000FF] ^  
@@ -209,7 +210,7 @@ static uint32_t crc32_zlib_sb8(
   for (li=0; li < running_length/8; li++) {
 	uint32_t term1;
 	uint32_t term2;
-    crc ^= *(uint32_t *)buf;
+    crc ^= hadoop_htole32(*(uint32_t *)buf);
     buf += 4;
     term1 = CRC32_T8_7[crc & 0x000000FF] ^
         CRC32_T8_6[(crc >> 8) & 0x000000FF];
@@ -217,10 +218,10 @@ static uint32_t crc32_zlib_sb8(
     crc = term1 ^
         CRC32_T8_5[term2 & 0x000000FF] ^ 
         CRC32_T8_4[(term2 >> 8) & 0x000000FF];
-    term1 = CRC32_T8_3[(*(uint32_t *)buf) & 0x000000FF] ^
-        CRC32_T8_2[((*(uint32_t *)buf) >> 8) & 0x000000FF];
+    term1 = CRC32_T8_3[hadoop_htole32(*(uint32_t *)buf) & 0x000000FF] ^
+        CRC32_T8_2[(hadoop_htole32(*(uint32_t *)buf) >> 8) & 0x000000FF];
     
-    term2 = (*(uint32_t *)buf) >> 16;
+    term2 = hadoop_htole32(*(uint32_t *)buf) >> 16;
     crc =  crc ^ 
         term1 ^    
         CRC32_T8_1[term2  & 0x000000FF] ^  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
index f878a94..99428b0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
@@ -90,6 +90,7 @@ include_directories(
     ${SRC}/src/util
     ${SRC}/src/lib
     ${SRC}/test
+    ../../../../hadoop-common-project/hadoop-common/target/native
     ${CMAKE_CURRENT_SOURCE_DIR}
     ${CMAKE_BINARY_DIR}
     ${JNI_INCLUDE_DIRS}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/INativeComparable.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/INativeComparable.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/INativeComparable.java
index 1ec05db..df6570a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/INativeComparable.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/INativeComparable.java
@@ -42,8 +42,8 @@ import org.apache.hadoop.classification.InterfaceStability;
  * <code>
  *   int HivePlatform::HiveKeyComparator(const char * src, uint32_t srcLength,
  *   const char * dest, uint32_t destLength) {
- *     uint32_t sl = bswap(*(uint32_t*)src);
- *     uint32_t dl = bswap(*(uint32_t*)dest);
+ *     uint32_t sl = hadoop_be32toh(*(uint32_t*)src);
+ *     uint32_t dl = hadoop_be32toh(*(uint32_t*)dest);
  *     return NativeObjectFactory::BytesComparator(src + 4, sl, dest + 4, dl);
  *   }
  * </code>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/NativeTask.h
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/NativeTask.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/NativeTask.h
index ba026f5..f1336ef 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/NativeTask.h
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/NativeTask.h
@@ -40,15 +40,6 @@ enum NativeObjectType {
   BatchHandlerType = 1,
 };
 
-/**
- * Enduim setting
- *
- */
-enum Endium {
-  LITTLE_ENDIUM = 0,
-  LARGE_ENDIUM = 1
-};
-
 #define NATIVE_COMBINER "native.combiner.class"
 #define NATIVE_PARTITIONER "native.partitioner.class"
 #define NATIVE_MAPPER "native.mapper.class"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/BlockCodec.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/BlockCodec.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/BlockCodec.cc
index ce36239..7ce26f1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/BlockCodec.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/BlockCodec.cc
@@ -104,8 +104,8 @@ int32_t BlockDecompressStream::read(void * buff, uint32_t length) {
       THROW_EXCEPTION(IOException, "readFully get incomplete data");
     }
     _compressedBytesRead += rd;
-    sizes[0] = bswap(sizes[0]);
-    sizes[1] = bswap(sizes[1]);
+    sizes[0] = hadoop_be32toh(sizes[0]);
+    sizes[1] = hadoop_be32toh(sizes[1]);
     if (sizes[0] <= length) {
       uint32_t len = decompressOneBlock(sizes[1], buff, sizes[0]);
       if (len != sizes[0]) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/Lz4Codec.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/Lz4Codec.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/Lz4Codec.cc
index 48c96b5..23c6c46 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/Lz4Codec.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/Lz4Codec.cc
@@ -38,8 +38,8 @@ void Lz4CompressStream::compressOneBlock(const void * buff, uint32_t length) {
   int ret = LZ4_compress((char*)buff, _tempBuffer + 8, length);
   if (ret > 0) {
     compressedLength = ret;
-    ((uint32_t*)_tempBuffer)[0] = bswap(length);
-    ((uint32_t*)_tempBuffer)[1] = bswap((uint32_t)compressedLength);
+    ((uint32_t*)_tempBuffer)[0] = hadoop_be32toh(length);
+    ((uint32_t*)_tempBuffer)[1] = hadoop_be32toh((uint32_t)compressedLength);
     _stream->write(_tempBuffer, compressedLength + 8);
     _compressedBytesWritten += (compressedLength + 8);
   } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/SnappyCodec.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/SnappyCodec.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/SnappyCodec.cc
index a0417e0..04380ac 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/SnappyCodec.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/SnappyCodec.cc
@@ -37,8 +37,8 @@ void SnappyCompressStream::compressOneBlock(const void * buff, uint32_t length)
   snappy_status ret = snappy_compress((const char*)buff, length, _tempBuffer + 8,
       &compressedLength);
   if (ret == SNAPPY_OK) {
-    ((uint32_t*)_tempBuffer)[0] = bswap(length);
-    ((uint32_t*)_tempBuffer)[1] = bswap((uint32_t)compressedLength);
+    ((uint32_t*)_tempBuffer)[0] = hadoop_be32toh(length);
+    ((uint32_t*)_tempBuffer)[1] = hadoop_be32toh((uint32_t)compressedLength);
     _stream->write(_tempBuffer, compressedLength + 8);
     _compressedBytesWritten += (compressedLength + 8);
   } else if (ret == SNAPPY_INVALID_INPUT) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/CombineHandler.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/CombineHandler.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/CombineHandler.cc
index 5f3863e..b18d057 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/CombineHandler.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/CombineHandler.cc
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 #include "CombineHandler.h"
 
 namespace NativeTask {
@@ -48,8 +49,8 @@ uint32_t CombineHandler::feedDataToJavaInWritableSerialization() {
 
   if (_kvCached) {
     uint32_t kvLength = _key.outerLength + _value.outerLength + KVBuffer::headerLength();
-    outputInt(bswap(_key.outerLength));
-    outputInt(bswap(_value.outerLength));
+    outputInt(hadoop_be32toh(_key.outerLength));
+    outputInt(hadoop_be32toh(_value.outerLength));
     outputKeyOrValue(_key, _kType);
     outputKeyOrValue(_value, _vType);
 
@@ -73,8 +74,8 @@ uint32_t CombineHandler::feedDataToJavaInWritableSerialization() {
     } else {
       firstKV = false;
       //write final key length and final value length
-      outputInt(bswap(_key.outerLength));
-      outputInt(bswap(_value.outerLength));
+      outputInt(hadoop_be32toh(_key.outerLength));
+      outputInt(hadoop_be32toh(_value.outerLength));
       outputKeyOrValue(_key, _kType);
       outputKeyOrValue(_value, _vType);
 
@@ -101,7 +102,7 @@ void CombineHandler::outputKeyOrValue(SerializeInfo & KV, KeyValueType type) {
     output(KV.buffer.data(), KV.buffer.length());
     break;
   case BytesType:
-    outputInt(bswap(KV.buffer.length()));
+    outputInt(hadoop_be32toh(KV.buffer.length()));
     output(KV.buffer.data(), KV.buffer.length());
     break;
   default:
@@ -202,8 +203,8 @@ void CombineHandler::write(char * buf, uint32_t length) {
   uint32_t outputRecordCount = 0;
   while (remain > 0) {
     kv = (KVBuffer *)pos;
-    kv->keyLength = bswap(kv->keyLength);
-    kv->valueLength = bswap(kv->valueLength);
+    kv->keyLength = hadoop_be32toh(kv->keyLength);
+    kv->valueLength = hadoop_be32toh(kv->valueLength);
     _writer->write(kv->getKey(), kv->keyLength, kv->getValue(), kv->valueLength);
     outputRecordCount++;
     remain -= kv->length();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.cc
index 7e4ae44..4921b33 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.cc
@@ -30,7 +30,7 @@ using std::vector;
 namespace NativeTask {
 
 MCollectorOutputHandler::MCollectorOutputHandler()
-    : _collector(NULL), _dest(NULL), _endium(LARGE_ENDIUM) {
+    : _collector(NULL), _dest(NULL) {
 }
 
 MCollectorOutputHandler::~MCollectorOutputHandler() {
@@ -73,11 +73,9 @@ void MCollectorOutputHandler::handleInput(ByteBuffer & in) {
       THROW_EXCEPTION(IOException, "k/v meta information incomplete");
     }
 
-    if (_endium == LARGE_ENDIUM) {
-      kvBuffer->partitionId = bswap(kvBuffer->partitionId);
-      kvBuffer->buffer.keyLength = bswap(kvBuffer->buffer.keyLength);
-      kvBuffer->buffer.valueLength = bswap(kvBuffer->buffer.valueLength);
-    }
+    kvBuffer->partitionId = hadoop_be32toh(kvBuffer->partitionId);
+    kvBuffer->buffer.keyLength = hadoop_be32toh(kvBuffer->buffer.keyLength);
+    kvBuffer->buffer.valueLength = hadoop_be32toh(kvBuffer->buffer.valueLength);
 
     uint32_t kvLength = kvBuffer->buffer.length();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.h
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.h
index fe4635f..2e21806 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.h
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.h
@@ -35,8 +35,6 @@ private:
   // state info for large KV pairs
   char * _dest;
 
-  Endium _endium;
-
 public:
   MCollectorOutputHandler();
   virtual ~MCollectorOutputHandler();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Buffers.h
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Buffers.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Buffers.h
index 4929426..09606d8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Buffers.h
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Buffers.h
@@ -115,7 +115,7 @@ public:
    * read uint32_t big endian
    */
   inline uint32_t read_uint32_be() {
-    return bswap(read_uint32_le());
+    return hadoop_be32toh(read_uint32_le());
   }
 };
 
@@ -198,7 +198,7 @@ public:
   }
 
   inline void write_uint32_be(uint32_t v) {
-    write_uint32_le(bswap(v));
+    write_uint32_le(hadoop_be32toh(v));
   }
 
   inline void write_uint64_le(uint64_t v) {
@@ -211,7 +211,7 @@ public:
   }
 
   inline void write_uint64_be(uint64_t v) {
-    write_uint64_le(bswap64(v));
+    write_uint64_le(hadoop_be64toh(v));
   }
 
   inline void write_vlong(int64_t v) {
@@ -278,12 +278,11 @@ struct KVBuffer {
   }
 
   uint32_t length() {
-    return keyLength + valueLength + SIZE_OF_KEY_LENGTH + SIZE_OF_VALUE_LENGTH;
+    return keyLength + valueLength + SIZE_OF_KV_LENGTH;
   }
 
   uint32_t lengthConvertEndium() {
-    long value = bswap64(*((long *)this));
-    return (value >> 32) + value + SIZE_OF_KEY_LENGTH + SIZE_OF_VALUE_LENGTH;
+    return hadoop_be32toh(keyLength) + hadoop_be32toh(valueLength) + SIZE_OF_KV_LENGTH;
   }
 
   void fill(const void * key, uint32_t keylen, const void * value, uint32_t vallen) {
@@ -299,7 +298,7 @@ struct KVBuffer {
   }
 
   static uint32_t headerLength() {
-    return SIZE_OF_KEY_LENGTH + SIZE_OF_VALUE_LENGTH;
+    return SIZE_OF_KV_LENGTH;
   }
 };
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.cc
index 2d3e0b5..cbe1b28 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.cc
@@ -60,7 +60,7 @@ bool IFileReader::nextPartition() {
     if (4 != _stream->readFully(&chsum, 4)) {
       THROW_EXCEPTION(IOException, "read ifile checksum failed");
     }
-    uint32_t actual = bswap(chsum);
+    uint32_t actual = hadoop_be32toh(chsum);
     uint32_t expect = _source->getChecksum();
     if (actual != expect) {
       THROW_EXCEPTION_EX(IOException, "read ifile checksum not match, actual %x expect %x", actual,
@@ -130,7 +130,7 @@ void IFileWriter::endPartition() {
   }
 
   uint32_t chsum = _dest->getChecksum();
-  chsum = bswap(chsum);
+  chsum = hadoop_be32toh(chsum);
   _stream->write(&chsum, sizeof(chsum));
   _stream->flush();
   IFileSegment * info = &(_spillFileSegments[_spillFileSegments.size() - 1]);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.h
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.h
index e397f90..414dc27 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.h
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.h
@@ -74,7 +74,7 @@ public:
       keyLen = WritableUtils::ReadVInt(kvbuff, len);
       break;
     case BytesType:
-      keyLen = bswap(*(uint32_t*)kvbuff);
+      keyLen = hadoop_be32toh(*(uint32_t*)kvbuff);
       len = 4;
       break;
     default:
@@ -89,7 +89,7 @@ public:
       _valuePos = vbuff + len;
       break;
     case BytesType:
-      _valueLen = bswap(*(uint32_t*)vbuff);
+      _valueLen = hadoop_be32toh(*(uint32_t*)vbuff);
       _valuePos = vbuff + 4;
       break;
     default:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/NativeObjectFactory.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/NativeObjectFactory.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/NativeObjectFactory.cc
index 2185798..5633fcf 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/NativeObjectFactory.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/NativeObjectFactory.cc
@@ -317,8 +317,8 @@ int NativeObjectFactory::IntComparator(const char * src, uint32_t srcLength, con
     uint32_t destLength) {
   int result = (*src) - (*dest);
   if (result == 0) {
-    uint32_t from = bswap(*(uint32_t*)src);
-    uint32_t to = bswap(*(uint32_t*)dest);
+    uint32_t from = hadoop_be32toh(*(uint32_t*)src);
+    uint32_t to = hadoop_be32toh(*(uint32_t*)dest);
     if (from > to) {
       return 1;
     } else if (from == to) {
@@ -335,8 +335,8 @@ int NativeObjectFactory::LongComparator(const char * src, uint32_t srcLength, co
   int result = (int)(*src) - (int)(*dest);
   if (result == 0) {
 
-    uint64_t from = bswap64(*(uint64_t*)src);
-    uint64_t to = bswap64(*(uint64_t*)dest);
+    uint64_t from = hadoop_be64toh(*(uint64_t*)src);
+    uint64_t to = hadoop_be64toh(*(uint64_t*)dest);
     if (from > to) {
       return 1;
     } else if (from == to) {
@@ -380,8 +380,8 @@ int NativeObjectFactory::FloatComparator(const char * src, uint32_t srcLength, c
     THROW_EXCEPTION_EX(IOException, "float comparator, while src/dest lengt is not 4");
   }
 
-  uint32_t from = bswap(*(uint32_t*)src);
-  uint32_t to = bswap(*(uint32_t*)dest);
+  uint32_t from = hadoop_be32toh(*(uint32_t*)src);
+  uint32_t to = hadoop_be32toh(*(uint32_t*)dest);
 
   float * srcValue = (float *)(&from);
   float * destValue = (float *)(&to);
@@ -401,8 +401,8 @@ int NativeObjectFactory::DoubleComparator(const char * src, uint32_t srcLength,
     THROW_EXCEPTION_EX(IOException, "double comparator, while src/dest lengt is not 4");
   }
 
-  uint64_t from = bswap64(*(uint64_t*)src);
-  uint64_t to = bswap64(*(uint64_t*)dest);
+  uint64_t from = hadoop_be64toh(*(uint64_t*)src);
+  uint64_t to = hadoop_be64toh(*(uint64_t*)dest);
 
   double * srcValue = (double *)(&from);
   double * destValue = (double *)(&to);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/SpillInfo.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/SpillInfo.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/SpillInfo.cc
index 9cff529..c1a36ce 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/SpillInfo.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/SpillInfo.cc
@@ -58,10 +58,10 @@ void SingleSpillInfo::writeSpillInfo(const std::string & filepath) {
     appendBuffer.flush();
     uint32_t chsum = dest.getChecksum();
 #ifdef SPILLRECORD_CHECKSUM_UINT
-    chsum = bswap(chsum);
+    chsum = hadoop_be32toh(chsum);
     fout->write(&chsum, sizeof(uint32_t));
 #else
-    uint64_t wtchsum = bswap64((uint64_t)chsum);
+    uint64_t wtchsum = hadoop_be64toh((uint64_t)chsum);
     fout->write(&wtchsum, sizeof(uint64_t));
 #endif
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/commons.h
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/commons.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/commons.h
index 57500b7..9c69f42 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/commons.h
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/commons.h
@@ -41,6 +41,7 @@
 #include <map>
 #include <algorithm>
 
+#include "hadoop_endian.h"
 #include "lib/primitives.h"
 #include "lib/Log.h"
 #include "NativeTask.h"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
index 3bf5f76..8a74a63 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
@@ -28,6 +28,7 @@
 #include <stdint.h>
 #include <assert.h>
 #include <string>
+#include "hadoop_endian.h"
 
 #ifdef __GNUC__
 #define likely(x)       __builtin_expect((x),1)
@@ -94,39 +95,6 @@ inline void simple_memcpy(void * dest, const void * src, size_t len) {
 #endif
 
 /**
- * little-endian to big-endian or vice versa
- */
-inline uint32_t bswap(uint32_t val) {
-#ifdef __aarch64__
-  __asm__("rev %w[dst], %w[src]" : [dst]"=r"(val) : [src]"r"(val));
-#else
-  __asm__("bswap %0" : "=r" (val) : "0" (val));
-#endif
-  return val;
-}
-
-inline uint64_t bswap64(uint64_t val) {
-#ifdef __aarch64__
-  __asm__("rev %[dst], %[src]" : [dst]"=r"(val) : [src]"r"(val));
-#else
-#ifdef __X64
-  __asm__("bswapq %0" : "=r" (val) : "0" (val));
-#else
-
-  uint64_t lower = val & 0xffffffffU;
-  uint32_t higher = (val >> 32) & 0xffffffffU;
-
-  lower = bswap(lower);
-  higher = bswap(higher);
-
-  return (lower << 32) + higher;
-
-#endif
-#endif
-  return val;
-}
-
-/**
  * Fast memcmp
  */
 inline int64_t fmemcmp(const char * src, const char * dest, uint32_t len) {
@@ -158,16 +126,16 @@ inline int64_t fmemcmp(const char * src, const char * dest, uint32_t len) {
     return ((int64_t)src8[2] - (int64_t)dest8[2]);
   }
   case 4: {
-    return (int64_t)bswap(*(uint32_t*)src) - (int64_t)bswap(*(uint32_t*)dest);
+    return (int64_t)hadoop_be32toh(*(uint32_t*)src) - (int64_t)hadoop_be32toh(*(uint32_t*)dest);
   }
   }
   if (len < 8) {
-    int64_t ret = ((int64_t)bswap(*(uint32_t*)src) - (int64_t)bswap(*(uint32_t*)dest));
+    int64_t ret = ((int64_t)hadoop_be32toh(*(uint32_t*)src) - (int64_t)hadoop_be32toh(*(uint32_t*)dest));
     if (ret) {
       return ret;
     }
-    return ((int64_t)bswap(*(uint32_t*)(src + len - 4))
-        - (int64_t)bswap(*(uint32_t*)(dest + len - 4)));
+    return ((int64_t)hadoop_be32toh(*(uint32_t*)(src + len - 4))
+        - (int64_t)hadoop_be32toh(*(uint32_t*)(dest + len - 4)));
   }
   uint32_t cur = 0;
   uint32_t end = len & (0xffffffffU << 3);
@@ -175,8 +143,8 @@ inline int64_t fmemcmp(const char * src, const char * dest, uint32_t len) {
     uint64_t l = *(uint64_t*)(src8 + cur);
     uint64_t r = *(uint64_t*)(dest8 + cur);
     if (l != r) {
-      l = bswap64(l);
-      r = bswap64(r);
+      l = hadoop_be64toh(l);
+      r = hadoop_be64toh(r);
       return l > r ? 1 : -1;
     }
     cur += 8;
@@ -184,8 +152,8 @@ inline int64_t fmemcmp(const char * src, const char * dest, uint32_t len) {
   uint64_t l = *(uint64_t*)(src8 + len - 8);
   uint64_t r = *(uint64_t*)(dest8 + len - 8);
   if (l != r) {
-    l = bswap64(l);
-    r = bswap64(r);
+    l = hadoop_be64toh(l);
+    r = hadoop_be64toh(r);
     return l > r ? 1 : -1;
   }
   return 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/WritableUtils.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/WritableUtils.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/WritableUtils.cc
index 8ed8dd2..b9f434e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/WritableUtils.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/WritableUtils.cc
@@ -120,29 +120,29 @@ void WritableUtils::WriteVLongInner(int64_t v, char * pos, uint32_t & len) {
     len = 4;
   } else if (value < (1ULL << 32)) {
     *(pos++) = base - 3;
-    *(uint32_t*)(pos) = bswap((uint32_t)value);
+    *(uint32_t*)(pos) = hadoop_be32toh((uint32_t)value);
     len = 5;
   } else if (value < (1ULL << 40)) {
     *(pos++) = base - 4;
-    *(uint32_t*)(pos) = bswap((uint32_t)(value >> 8));
+    *(uint32_t*)(pos) = hadoop_be32toh((uint32_t)(value >> 8));
     *(uint8_t*)(pos + 4) = value;
     len = 6;
   } else if (value < (1ULL << 48)) {
     *(pos++) = base - 5;
-    *(uint32_t*)(pos) = bswap((uint32_t)(value >> 16));
+    *(uint32_t*)(pos) = hadoop_be32toh((uint32_t)(value >> 16));
     *(uint8_t*)(pos + 4) = value >> 8;
     *(uint8_t*)(pos + 5) = value;
     len = 7;
   } else if (value < (1ULL << 56)) {
     *(pos++) = base - 6;
-    *(uint32_t*)(pos) = bswap((uint32_t)(value >> 24));
+    *(uint32_t*)(pos) = hadoop_be32toh((uint32_t)(value >> 24));
     *(uint8_t*)(pos + 4) = value >> 16;
     *(uint8_t*)(pos + 5) = value >> 8;
     *(uint8_t*)(pos + 6) = value;
     len = 8;
   } else {
     *(pos++) = base - 7;
-    *(uint64_t*)pos = bswap64(value);
+    *(uint64_t*)pos = hadoop_be64toh(value);
     len = 9;
   }
 }
@@ -168,7 +168,7 @@ int64_t WritableUtils::ReadLong(InputStream * stream) {
   if (stream->readFully(&ret, 8) != 8) {
     THROW_EXCEPTION(IOException, "ReadLong reach EOF");
   }
-  return (int64_t)bswap64(ret);
+  return (int64_t)hadoop_be64toh(ret);
 }
 
 int32_t WritableUtils::ReadInt(InputStream * stream) {
@@ -176,7 +176,7 @@ int32_t WritableUtils::ReadInt(InputStream * stream) {
   if (stream->readFully(&ret, 4) != 4) {
     THROW_EXCEPTION(IOException, "ReadInt reach EOF");
   }
-  return (int32_t)bswap(ret);
+  return (int32_t)hadoop_be32toh(ret);
 }
 
 int16_t WritableUtils::ReadShort(InputStream * stream) {
@@ -192,7 +192,7 @@ float WritableUtils::ReadFloat(InputStream * stream) {
   if (stream->readFully(&ret, 4) != 4) {
     THROW_EXCEPTION(IOException, "ReadFloat reach EOF");
   }
-  ret = bswap(ret);
+  ret = hadoop_be32toh(ret);
   return *(float*)&ret;
 }
 
@@ -232,12 +232,12 @@ void WritableUtils::WriteVLong(OutputStream * stream, int64_t v) {
 }
 
 void WritableUtils::WriteLong(OutputStream * stream, int64_t v) {
-  uint64_t be = bswap64((uint64_t)v);
+  uint64_t be = hadoop_be64toh((uint64_t)v);
   stream->write(&be, 8);
 }
 
 void WritableUtils::WriteInt(OutputStream * stream, int32_t v) {
-  uint32_t be = bswap((uint32_t)v);
+  uint32_t be = hadoop_be32toh((uint32_t)v);
   stream->write(&be, 4);
 }
 
@@ -249,7 +249,7 @@ void WritableUtils::WriteShort(OutputStream * stream, int16_t v) {
 
 void WritableUtils::WriteFloat(OutputStream * stream, float v) {
   uint32_t intv = *(uint32_t*)&v;
-  intv = bswap(intv);
+  intv = hadoop_be32toh(intv);
   stream->write(&intv, 4);
 }
 
@@ -286,10 +286,10 @@ void WritableUtils::toString(string & dest, KeyValueType type, const void * data
     dest.append(*(uint8_t*)data ? "true" : "false");
     break;
   case IntType:
-    dest.append(StringUtil::ToString((int32_t)bswap(*(uint32_t*)data)));
+    dest.append(StringUtil::ToString((int32_t)hadoop_be32toh(*(uint32_t*)data)));
     break;
   case LongType:
-    dest.append(StringUtil::ToString((int64_t)bswap64(*(uint64_t*)data)));
+    dest.append(StringUtil::ToString((int64_t)hadoop_be64toh(*(uint64_t*)data)));
     break;
   case FloatType:
     dest.append(StringUtil::ToString(*(float*)data));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestIFile.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestIFile.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestIFile.cc
index e1e32d4..93417b4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestIFile.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestIFile.cc
@@ -190,7 +190,7 @@ TEST(IFile, TestGlibCBug) {
   reader->nextPartition();
   uint32_t index = 0;
   while (NULL != (key = reader->nextKey(length))) {
-    int32_t realKey = (int32_t)bswap(*(uint32_t *)(key));
+    int32_t realKey = (int32_t)hadoop_be32toh(*(uint32_t *)(key));
     ASSERT_LT(index, 5);
     ASSERT_EQ(expect[index], realKey);
     index++;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestSort.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestSort.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestSort.cc
index 1c391a6..6d40dc2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestSort.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestSort.cc
@@ -38,8 +38,8 @@ inline int fmemcmporig(const char * src, const char * dest, uint32_t len) {
     uint64_t l = *src8;
     uint64_t r = *dest8;
     if (l != r) {
-      l = bswap64(l);
-      r = bswap64(r);
+      l = hadoop_be64toh(l);
+      r = hadoop_be64toh(r);
       return l > r ? 1 : -1;
     }
     ++src8;
@@ -59,8 +59,8 @@ inline int fmemcmporig(const char * src, const char * dest, uint32_t len) {
   if (l == r) {
     return 0;
   }
-  l = bswap64(l);
-  r = bswap64(r);
+  l = hadoop_be64toh(l);
+  r = hadoop_be64toh(r);
   return l > r ? 1 : -1;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestKVBuffer.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestKVBuffer.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestKVBuffer.cc
index e47e169..dac79ba 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestKVBuffer.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestKVBuffer.cc
@@ -43,8 +43,8 @@ TEST(KVBuffer, test) {
   ASSERT_EQ(8, kv1->getKey() - buff);
   ASSERT_EQ(strlen(KEY) + 8, kv1->getValue() - buff);
 
-  kv1->keyLength = bswap(kv1->keyLength);
-  kv1->valueLength = bswap(kv1->valueLength);
+  kv1->keyLength = hadoop_be32toh(kv1->keyLength);
+  kv1->valueLength = hadoop_be32toh(kv1->valueLength);
 
   ASSERT_EQ(8, kv1->headerLength());
   ASSERT_EQ(strlen(KEY) + strlen(VALUE) + 8, kv1->lengthConvertEndium());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemBlockIterator.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemBlockIterator.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemBlockIterator.cc
index 8d784fb..4025e3c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemBlockIterator.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemBlockIterator.cc
@@ -59,7 +59,7 @@ class MemoryBlockFactory {
       kv->keyLength = 4;
       kv->valueLength = 4;
       uint32_t * key = (uint32_t *)kv->getKey();
-      *key = bswap(index);
+      *key = hadoop_be32toh(index);
     }
     return block1;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemoryBlock.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemoryBlock.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemoryBlock.cc
index 6af73c5..fd9c29b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemoryBlock.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemoryBlock.cc
@@ -85,17 +85,17 @@ TEST(MemoryBlock, sort) {
   medium->keyLength = 4;
   medium->valueLength = 4;
   uint32_t * mediumKey = (uint32_t *)medium->getKey();
-  *mediumKey = bswap(MEDIUM);
+  *mediumKey = hadoop_be32toh(MEDIUM);
 
   small->keyLength = 4;
   small->valueLength = 4;
   uint32_t * smallKey = (uint32_t *)small->getKey();
-  *smallKey = bswap(SMALL);
+  *smallKey = hadoop_be32toh(SMALL);
 
   big->keyLength = 4;
   big->valueLength = 4;
   uint32_t * bigKey = (uint32_t *)big->getKey();
-  *bigKey = bswap(BIG);
+  *bigKey = hadoop_be32toh(BIG);
 
   ComparatorPtr bytesComparator = NativeTask::get_comparator(BytesType, NULL);
   block.sort(CPPSORT, bytesComparator);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6725e7f1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestPartitionBucket.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestPartitionBucket.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestPartitionBucket.cc
index 79e1b5e..d13987a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestPartitionBucket.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestPartitionBucket.cc
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-#include "lib/commons.h"
+#include "hadoop_endian.h"
 #include "test_commons.h"
 #include "lib/PartitionBucket.h"
 #include "lib/PartitionBucketIterator.h"
@@ -129,15 +129,15 @@ TEST(PartitionBucket, sort) {
   const uint32_t BIG = 1000;
 
   kv1->keyLength = 4;
-  *((uint32_t *)kv1->getKey()) = bswap(BIG);
+  *((uint32_t *)kv1->getKey()) = hadoop_be32toh(BIG);
   kv1->valueLength = KV_SIZE - kv1->headerLength() - kv1->keyLength;
 
   kv2->keyLength = 4;
-  *((uint32_t *)kv2->getKey()) = bswap(SMALL);
+  *((uint32_t *)kv2->getKey()) = hadoop_be32toh(SMALL);
   kv2->valueLength = KV_SIZE - kv2->headerLength() - kv2->keyLength;
 
   kv3->keyLength = 4;
-  *((uint32_t *)kv3->getKey()) = bswap(MEDIUM);
+  *((uint32_t *)kv3->getKey()) = hadoop_be32toh(MEDIUM);
   kv3->valueLength = KV_SIZE - kv3->headerLength() - kv3->keyLength;
 
   bucket->sort(DUALPIVOTSORT);
@@ -148,13 +148,13 @@ TEST(PartitionBucket, sort) {
   Buffer value;
   iter->next(key, value);
 
-  ASSERT_EQ(SMALL, bswap(*(uint32_t * )key.data()));
+  ASSERT_EQ(SMALL, hadoop_be32toh(*(uint32_t * )key.data()));
 
   iter->next(key, value);
-  ASSERT_EQ(MEDIUM, bswap(*(uint32_t * )key.data()));
+  ASSERT_EQ(MEDIUM, hadoop_be32toh(*(uint32_t * )key.data()));
 
   iter->next(key, value);
-  ASSERT_EQ(BIG, bswap(*(uint32_t * )key.data()));
+  ASSERT_EQ(BIG, hadoop_be32toh(*(uint32_t * )key.data()));
 
   delete iter;
   delete bucket;
@@ -181,15 +181,15 @@ TEST(PartitionBucket, spill) {
   const uint32_t BIG = 1000;
 
   kv1->keyLength = 4;
-  *((uint32_t *)kv1->getKey()) = bswap(BIG);
+  *((uint32_t *)kv1->getKey()) = hadoop_be32toh(BIG);
   kv1->valueLength = KV_SIZE - KVBuffer::headerLength() - kv1->keyLength;
 
   kv2->keyLength = 4;
-  *((uint32_t *)kv2->getKey()) = bswap(SMALL);
+  *((uint32_t *)kv2->getKey()) = hadoop_be32toh(SMALL);
   kv2->valueLength = KV_SIZE - KVBuffer::headerLength() - kv2->keyLength;
 
   kv3->keyLength = 4;
-  *((uint32_t *)kv3->getKey()) = bswap(MEDIUM);
+  *((uint32_t *)kv3->getKey()) = hadoop_be32toh(MEDIUM);
   kv3->valueLength = KV_SIZE - KVBuffer::headerLength() - kv3->keyLength;
 
   bucket->sort(DUALPIVOTSORT);
@@ -203,17 +203,17 @@ TEST(PartitionBucket, spill) {
   KVBuffer * first = (KVBuffer *)writer.buff();
   ASSERT_EQ(4, first->keyLength);
   ASSERT_EQ(KV_SIZE - KVBuffer::headerLength() - 4, first->valueLength);
-  ASSERT_EQ(bswap(SMALL), (*(uint32_t * )(first->getKey())));
+  ASSERT_EQ(hadoop_be32toh(SMALL), (*(uint32_t * )(first->getKey())));
 
   KVBuffer * second = first->next();
   ASSERT_EQ(4, second->keyLength);
   ASSERT_EQ(KV_SIZE - KVBuffer::headerLength() - 4, second->valueLength);
-  ASSERT_EQ(bswap(MEDIUM), (*(uint32_t * )(second->getKey())));
+  ASSERT_EQ(hadoop_be32toh(MEDIUM), (*(uint32_t * )(second->getKey())));
 
   KVBuffer * third = second->next();
   ASSERT_EQ(4, third->keyLength);
   ASSERT_EQ(KV_SIZE - KVBuffer::headerLength() - 4, third->valueLength);
-  ASSERT_EQ(bswap(BIG), (*(uint32_t * )(third->getKey())));
+  ASSERT_EQ(hadoop_be32toh(BIG), (*(uint32_t * )(third->getKey())));
 
   delete [] buff;
   delete bucket;


[29/38] hadoop git commit: HDFS-9490. MiniDFSCluster should change block generation stamp via FsDatasetTestUtils. (Tony Wu via lei)

Posted by as...@apache.org.
HDFS-9490. MiniDFSCluster should change block generation stamp via FsDatasetTestUtils. (Tony Wu via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ac8fb4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ac8fb4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ac8fb4b

Branch: refs/heads/yarn-2877
Commit: 0ac8fb4b336dd03a037092c9712b962e7ed8f852
Parents: 3fa33b5
Author: Lei Xu <le...@apache.org>
Authored: Fri Dec 4 10:24:55 2015 -0800
Committer: Lei Xu <le...@apache.org>
Committed: Fri Dec 4 10:24:55 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt           |  3 +++
 .../java/org/apache/hadoop/hdfs/MiniDFSCluster.java   |  8 +++-----
 .../hdfs/server/datanode/FsDatasetTestUtils.java      |  9 +++++++++
 .../fsdataset/impl/FsDatasetImplTestUtils.java        | 14 ++++++++++++++
 .../namenode/ha/TestPendingCorruptDnMessages.java     |  2 +-
 5 files changed, 30 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac8fb4b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 17cbe29..89094bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1710,6 +1710,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9485. Make BlockManager#removeFromExcessReplicateMap accept BlockInfo
     instead of Block. (Mingliang Liu via jing9)
 
+    HDFS-9490. MiniDFSCluster should change block generation stamp via
+    FsDatasetTestUtils. (Tony Wu via lei)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac8fb4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 6baea25..3e25177 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -2141,12 +2141,10 @@ public class MiniDFSCluster {
     getMaterializedReplica(i, blk).truncateMeta(newSize);
   }
 
-  public boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
+  public void changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
       long newGenStamp) throws IOException {
-    File blockFile = getBlockFile(dnIndex, blk);
-    File metaFile = FsDatasetUtil.findMetaFile(blockFile);
-    return metaFile.renameTo(new File(DatanodeUtil.getMetaName(
-        blockFile.getAbsolutePath(), newGenStamp)));
+    getFsDatasetTestUtils(dnIndex)
+        .changeStoredGenerationStamp(blk, newGenStamp);
   }
 
   /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac8fb4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
index 07fb7ce..fd47705 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
@@ -242,4 +242,13 @@ public interface FsDatasetTestUtils {
    * Get the persistently stored generation stamp.
    */
   long getStoredGenerationStamp(ExtendedBlock block) throws IOException;
+
+  /**
+   * Change the persistently stored generation stamp.
+   * @param block the block whose generation stamp will be changed
+   * @param newGenStamp the new generation stamp
+   * @throws IOException
+   */
+  void changeStoredGenerationStamp(ExtendedBlock block, long newGenStamp)
+      throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac8fb4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
index 8fce163..320ae9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
@@ -47,6 +48,7 @@ import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.channels.FileChannel;
 import java.nio.file.Files;
+import java.nio.file.StandardCopyOption;
 import java.util.Random;
 
 /**
@@ -363,4 +365,16 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils {
     File[] files = FileUtil.listFiles(dir);
     return FsDatasetUtil.getGenerationStampFromFile(files, f);
   }
+
+  @Override
+  public void changeStoredGenerationStamp(
+      ExtendedBlock block, long newGenStamp) throws IOException {
+    File blockFile =
+        dataset.getBlockFile(block.getBlockPoolId(), block.getBlockId());
+    File metaFile = FsDatasetUtil.findMetaFile(blockFile);
+    File newMetaFile = new File(
+        DatanodeUtil.getMetaName(blockFile.getAbsolutePath(), newGenStamp));
+    Files.move(metaFile.toPath(), newMetaFile.toPath(),
+        StandardCopyOption.ATOMIC_MOVE);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac8fb4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
index 443500c..5f116d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
@@ -68,7 +68,7 @@ public class TestPendingCorruptDnMessages {
       // Change the gen stamp of the block on datanode to go back in time (gen
       // stamps start at 1000)
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
-      assertTrue(cluster.changeGenStampOfBlock(0, block, 900));
+      cluster.changeGenStampOfBlock(0, block, 900);
       
       // Run directory dsscanner to update Datanode's volumeMap
       DataNodeTestUtils.runDirectoryScanner(cluster.getDataNodes().get(0));


[09/38] hadoop git commit: Creating 2.6.4 entries in CHANGES.txt files.

Posted by as...@apache.org.
Creating 2.6.4 entries in CHANGES.txt files.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02889428
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02889428
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02889428

Branch: refs/heads/yarn-2877
Commit: 028894281bccd4e2f37eebff2e46409404ef64b7
Parents: 1c05393
Author: Junping Du <ju...@apache.org>
Authored: Tue Dec 1 08:16:39 2015 -0800
Committer: Junping Du <ju...@apache.org>
Committed: Tue Dec 1 08:16:39 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 ++++++++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     | 12 ++++++++++++
 hadoop-mapreduce-project/CHANGES.txt            | 12 ++++++++++++
 hadoop-yarn-project/CHANGES.txt                 | 12 ++++++++++++
 4 files changed, 48 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02889428/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index d48479a..32f6905 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -2294,6 +2294,18 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11837. AuthenticationFilter should destroy SignerSecretProvider in
     Tomcat deployments. (Bowen Zhang via wheat9)
 
+Release 2.6.4 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02889428/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bf77e73..9e37b2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -3542,6 +3542,18 @@ Release 2.7.0 - 2015-04-20
       HDFS-7700. Document quota support for storage types. (Xiaoyu Yao via
       Arpit Agarwal)
 
+Release 2.6.4 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02889428/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 2a995c7..c4d2e48 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -964,6 +964,18 @@ Release 2.7.0 - 2015-04-20
     MAPREDUCE-6285. ClientServiceDelegate should not retry upon
     AuthenticationException. (Jonathan Eagles via ozawa)
 
+Release 2.6.4 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02889428/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index eef241b..5992ee8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1957,6 +1957,18 @@ Release 2.7.0 - 2015-04-20
     YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers 
     and node-label column (Jason Lowe via wangda)
 
+Release 2.6.4 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[27/38] hadoop git commit: Add missing file for YARN-4419

Posted by as...@apache.org.
Add missing file for YARN-4419


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e84d6ca2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e84d6ca2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e84d6ca2

Branch: refs/heads/yarn-2877
Commit: e84d6ca2df775bb4c93f6c08b345ac30b3a4525b
Parents: 755dda8
Author: Jian He <ji...@apache.org>
Authored: Thu Dec 3 19:07:28 2015 -0800
Committer: Jian He <ji...@apache.org>
Committed: Thu Dec 3 19:07:28 2015 -0800

----------------------------------------------------------------------
 .../NonAppendableFSNodeLabelStore.java          | 134 +++++++++++++++++++
 1 file changed, 134 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e84d6ca2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
new file mode 100644
index 0000000..6be5715
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.nodelabels;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl;
+
+public class NonAppendableFSNodeLabelStore extends FileSystemNodeLabelsStore {
+  protected static final Log
+      LOG = LogFactory.getLog(NonAppendableFSNodeLabelStore.class);
+
+  @Override
+  public void close() throws IOException {
+  }
+
+
+  @Override
+  public void recover() throws YarnException,
+      IOException {
+    Path newMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new");
+    Path oldMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME);
+    loadFromMirror(newMirrorPath, oldMirrorPath);
+    
+    // if new mirror exists, remove old mirror and rename new mirror
+    if (fs.exists(newMirrorPath)) {
+      // remove old mirror
+      try {
+        fs.delete(oldMirrorPath, false);
+      } catch (IOException e) {
+        // do nothing
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Exception while removing old mirror", e);
+        }
+      }
+      
+      // rename new to old
+      fs.rename(newMirrorPath, oldMirrorPath);
+    }
+
+    LOG.info("Node label store recover is completed");
+  }
+  
+  @Override
+  public void updateNodeToLabelsMappings(
+      Map<NodeId, Set<String>> nodeToLabels) throws IOException {
+    writeNewMirror();
+  }
+
+  @Override
+  public void storeNewClusterNodeLabels(List<NodeLabel> labels)
+      throws IOException {
+    writeNewMirror();
+  }
+
+  @Override
+  public void removeClusterNodeLabels(Collection<String> labels)
+      throws IOException {
+    writeNewMirror();
+  }
+
+  private void writeNewMirror() throws IOException {
+    ReentrantReadWriteLock.ReadLock readLock = mgr.readLock;
+    try {
+      // Acquire readlock to make sure we get cluster node labels and
+      // node-to-labels mapping atomically.
+      readLock.lock();
+      List<NodeLabel> nodeLabels = mgr.getClusterNodeLabels();
+      Map<NodeId, Set<String>> nodeToLabels = mgr.getNodeLabels();
+      
+      // Write mirror to mirror.new.tmp file
+      Path newTmpPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new.tmp"); 
+      FSDataOutputStream os = fs
+          .create(newTmpPath, true);
+      ((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequest
+          .newInstance(nodeLabels)).getProto().writeDelimitedTo(os);
+
+      if (mgr.isCentralizedConfiguration()) {
+        // Only save node-to-labels mapping while using centralized configuration
+        ((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest
+            .newInstance(nodeToLabels)).getProto().writeDelimitedTo(os);
+      }
+      
+      os.close();
+      
+      // Rename mirror.new.tmp to mirror.new (will remove .new if it's existed)
+      Path newPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new"); 
+      if (fs.exists(newPath)) {
+        fs.delete(newPath, false);
+      }
+      fs.rename(newTmpPath, newPath);
+      
+      // Remove existing mirror and rename mirror.new to mirror
+      Path mirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME);
+      if (fs.exists(mirrorPath)) {
+        fs.delete(mirrorPath, false);
+      }
+      fs.rename(newPath, mirrorPath);
+    } finally {
+      readLock.unlock();
+    }
+  }
+}


[02/38] hadoop git commit: HADOOP-11505. Various native parts use bswap incorrectly and unportably (Alan Burlison via aw)

Posted by as...@apache.org.
HADOOP-11505. Various native parts use bswap incorrectly and unportably (Alan Burlison via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c8125d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c8125d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c8125d6

Branch: refs/heads/yarn-2877
Commit: 4c8125d60d47e98b1ec84422888975111e0cbcec
Parents: 6725e7f
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Nov 27 21:06:28 2015 -0800
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Fri Nov 27 21:06:28 2015 -0800

----------------------------------------------------------------------
 .../hadoop-common/src/hadoop_endian.h.cmake     | 43 ++++++++++++++++++++
 1 file changed, 43 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c8125d6/hadoop-common-project/hadoop-common/src/hadoop_endian.h.cmake
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/hadoop_endian.h.cmake b/hadoop-common-project/hadoop-common/src/hadoop_endian.h.cmake
new file mode 100644
index 0000000..b30d9bd
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/hadoop_endian.h.cmake
@@ -0,0 +1,43 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+/* Hadoop versions of http://austingroupbugs.net/view.php?id=162#c665 */
+
+#ifndef HADOOP_ENDIAN_H
+#define HADOOP_ENDIAN_H
+
+#include <@HADOOP_ENDIAN_H@>
+
+#define HADOOP_LITTLE_ENDIAN 1234
+#define HADOOP_BIG_ENDIAN    4321
+#cmakedefine HADOOP_BYTE_ORDER @HADOOP_BYTE_ORDER@
+
+#define hadoop_htobe16(X) @HADOOP_HTOBE16@(X)
+#define hadoop_htole16(X) @HADOOP_HTOLE16@(X)
+#define hadoop_be16toh(X) @HADOOP_BE16TOH@(X)
+#define hadoop_le16toh(X) @HADOOP_LE16TOH@(X)
+#define hadoop_htobe32(X) @HADOOP_HTOBE32@(X)
+#define hadoop_htole32(X) @HADOOP_HTOLE32@(X)
+#define hadoop_be32toh(X) @HADOOP_BE32TOH@(X)
+#define hadoop_le32toh(X) @HADOOP_LE32TOH@(X)
+#define hadoop_htobe64(X) @HADOOP_HTOBE64@(X)
+#define hadoop_htole64(X) @HADOOP_HTOLE64@(X)
+#define hadoop_be64toh(X) @HADOOP_BE64TOH@(X)
+#define hadoop_le64toh(X) @HADOOP_LE64TOH@(X)
+
+#endif


[13/38] hadoop git commit: HDFS-6533. TestBPOfferService#testBasicFunctionalitytest fails intermittently. (Contributed by Wei-Chiu Chuang)

Posted by as...@apache.org.
HDFS-6533. TestBPOfferService#testBasicFunctionalitytest fails intermittently. (Contributed by Wei-Chiu Chuang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58f6f54e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58f6f54e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58f6f54e

Branch: refs/heads/yarn-2877
Commit: 58f6f54eeac779428ac995d196b60ffb90563f97
Parents: 830eb25
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Dec 1 13:32:32 2015 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Tue Dec 1 13:32:32 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/server/datanode/BPServiceActor.java    |  4 +++
 .../server/datanode/TestBPOfferService.java     | 32 +++++++++++++++++++-
 3 files changed, 38 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58f6f54e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3f31f3e..3e1718d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2424,6 +2424,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9336. deleteSnapshot throws NPE when snapshotname is null.
     (Brahma Reddy Battula via aajisaka)
 
+    HDFS-6533. TestBPOfferService#testBasicFunctionalitytest fails
+    intermittently. (Wei-Chiu Chuang via Arpit Agarwal)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58f6f54e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 0316535..1b72961 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -127,6 +127,10 @@ class BPServiceActor implements Runnable {
     scheduler = new Scheduler(dnConf.heartBeatInterval, dnConf.blockReportInterval);
   }
 
+  public DatanodeRegistration getBpRegistration() {
+    return bpRegistration;
+  }
+
   boolean isAlive() {
     if (!shouldServiceRun || !bpThread.isAlive()) {
       return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58f6f54e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index ab69bb0..cb5f272 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -180,7 +180,7 @@ public class TestBPOfferService {
     BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
     bpos.start();
     try {
-      waitForInitialization(bpos);
+      waitForBothActors(bpos);
       
       // The DN should have register to both NNs.
       Mockito.verify(mockNN1).registerDatanode(
@@ -205,6 +205,7 @@ public class TestBPOfferService {
 
     } finally {
       bpos.stop();
+      bpos.join();
     }
   }
 
@@ -235,6 +236,7 @@ public class TestBPOfferService {
 
     } finally {
       bpos.stop();
+      bpos.join();
     }
     
     // Should ignore the delete command from the standby
@@ -260,6 +262,7 @@ public class TestBPOfferService {
       waitForOneToFail(bpos);
     } finally {
       bpos.stop();
+      bpos.join();
     }
   }
   
@@ -307,6 +310,7 @@ public class TestBPOfferService {
 
     } finally {
       bpos.stop();
+      bpos.join();
     }
   }
 
@@ -349,6 +353,7 @@ public class TestBPOfferService {
       waitForBlockReport(mockNN1, mockNN2);
     } finally {
       bpos.stop();
+      bpos.join();
     }
   }
 
@@ -403,6 +408,27 @@ public class TestBPOfferService {
       }
     }, 100, 10000);
   }
+
+  private void waitForBothActors(final BPOfferService bpos)
+      throws Exception {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        List<BPServiceActor> actors = bpos.getBPServiceActors();
+
+        return bpos.isAlive() && getRegisteredActors(actors) == 2;
+      }
+      private int getRegisteredActors(List<BPServiceActor> actors) {
+        int regActors = 0;
+        for (BPServiceActor actor : actors) {
+          if (actor.getBpRegistration() != null) {
+            regActors++;
+          }
+        }
+        return regActors;
+      }
+    }, 100, 10000);
+  }
   
   private void waitForBlockReport(final DatanodeProtocolClientSideTranslatorPB mockNN)
       throws Exception {
@@ -540,6 +566,7 @@ public class TestBPOfferService {
           difference < 5000);
     } finally {
       bpos.stop();
+      bpos.join();
     }
   }
 
@@ -579,6 +606,7 @@ public class TestBPOfferService {
           + " processing ", difference < 5000);
     } finally {
       bpos.stop();
+      bpos.join();
     }
   }
   /**
@@ -624,6 +652,7 @@ public class TestBPOfferService {
           + "when errorReport threw IOException", secondCallTime != 0);
     } finally {
       bpos.stop();
+      bpos.join();
     }
   } 
 
@@ -675,6 +704,7 @@ public class TestBPOfferService {
           .reportBadBlocks(Mockito.any(LocatedBlock[].class));
     } finally {
       bpos.stop();
+      bpos.join();
     }
   }
 }


[20/38] hadoop git commit: HADOOP-12565. Replace DSA with RSA for SSH key type in SingleCluster.md. Contributed by Mingliang Liu.

Posted by as...@apache.org.
HADOOP-12565. Replace DSA with RSA for SSH key type in SingleCluster.md. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3857fed2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3857fed2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3857fed2

Branch: refs/heads/yarn-2877
Commit: 3857fed2c8fc601b46e8331f73a3104f4f33e498
Parents: e8bd1ba
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Dec 3 11:45:45 2015 +0800
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Dec 3 11:50:31 2015 +0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                  | 3 +++
 .../hadoop-common/src/site/markdown/SingleCluster.md.vm          | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3857fed2/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index b19b703..de4dad0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1524,6 +1524,9 @@ Release 2.7.3 - UNRELEASED
 
     HADOOP-12482. Race condition in JMX cache update. (Tony Wu via lei)
 
+    HADOOP-12565. Replace DSA with RSA for SSH key type in SingleCluster.md.
+    (Mingliang Liu via aajisaka)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3857fed2/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
index 2de8b2b..84789f6 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
@@ -138,8 +138,8 @@ Now check that you can ssh to the localhost without a passphrase:
 
 If you cannot ssh to localhost without a passphrase, execute the following commands:
 
-      $ ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
-      $ cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
+      $ ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
+      $ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
       $ chmod 0600 ~/.ssh/authorized_keys
 
 $H3 Execution


[19/38] hadoop git commit: HDFS-9294. DFSClient deadlock when close file and failed to renew lease. Contributed by Brahma Reddy Battula

Posted by as...@apache.org.
HDFS-9294. DFSClient deadlock when close file and failed to renew lease.  Contributed by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8bd1ba7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8bd1ba7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8bd1ba7

Branch: refs/heads/yarn-2877
Commit: e8bd1ba74b2fc7a6a1b71d068ef01a0fb0bbe294
Parents: 6b9a5be
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Wed Dec 2 17:39:28 2015 -0800
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Wed Dec 2 17:39:28 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 28 ++++++++++++--------
 .../hadoop/hdfs/DFSStripedOutputStream.java     | 22 ++++++++-------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 3 files changed, 32 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8bd1ba7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 836868b..f6a8981 100755
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -708,13 +708,17 @@ public class DFSOutputStream extends FSOutputSummer
    * Aborts this output stream and releases any system
    * resources associated with this stream.
    */
-  synchronized void abort() throws IOException {
-    if (isClosed()) {
-      return;
+  void abort() throws IOException {
+    synchronized (this) {
+      if (isClosed()) {
+        return;
+      }
+      getStreamer().getLastException().set(
+          new IOException("Lease timeout of "
+              + (dfsClient.getConf().getHdfsTimeout() / 1000)
+              + " seconds expired."));
+      closeThreads(true);
     }
-    getStreamer().getLastException().set(new IOException("Lease timeout of "
-        + (dfsClient.getConf().getHdfsTimeout() / 1000) + " seconds expired."));
-    closeThreads(true);
     dfsClient.endFileLease(fileId);
   }
 
@@ -747,11 +751,14 @@ public class DFSOutputStream extends FSOutputSummer
    * resources associated with this stream.
    */
   @Override
-  public synchronized void close() throws IOException {
-    try (TraceScope ignored =
-             dfsClient.newPathTraceScope("DFSOutputStream#close", src)) {
-      closeImpl();
+  public void close() throws IOException {
+    synchronized (this) {
+      try (TraceScope ignored = dfsClient.newPathTraceScope(
+          "DFSOutputStream#close", src)) {
+        closeImpl();
+      }
     }
+    dfsClient.endFileLease(fileId);
   }
 
   protected synchronized void closeImpl() throws IOException {
@@ -779,7 +786,6 @@ public class DFSOutputStream extends FSOutputSummer
                dfsClient.getTracer().newScope("completeFile")) {
         completeFile(lastBlock);
       }
-      dfsClient.endFileLease(fileId);
     } catch (ClosedChannelException ignored) {
     } finally {
       setClosed();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8bd1ba7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index f5bae2a..9c98f9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -757,16 +757,19 @@ public class DFSStripedOutputStream extends DFSOutputStream {
   }
 
   @Override
-  synchronized void abort() throws IOException {
-    if (isClosed()) {
-      return;
-    }
-    for (StripedDataStreamer streamer : streamers) {
-      streamer.getLastException().set(new IOException("Lease timeout of "
-          + (dfsClient.getConf().getHdfsTimeout()/1000) +
-          " seconds expired."));
+  void abort() throws IOException {
+    synchronized (this) {
+      if (isClosed()) {
+        return;
+      }
+      for (StripedDataStreamer streamer : streamers) {
+        streamer.getLastException().set(
+            new IOException("Lease timeout of "
+                + (dfsClient.getConf().getHdfsTimeout() / 1000)
+                + " seconds expired."));
+      }
+      closeThreads(true);
     }
-    closeThreads(true);
     dfsClient.endFileLease(fileId);
   }
 
@@ -954,7 +957,6 @@ public class DFSStripedOutputStream extends DFSOutputStream {
                dfsClient.getTracer().newScope("completeFile")) {
         completeFile(currentBlockGroup);
       }
-      dfsClient.endFileLease(fileId);
     } catch (ClosedChannelException ignored) {
     } finally {
       setClosed();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8bd1ba7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bb3f148..b65c048 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2535,6 +2535,9 @@ Release 2.7.2 - UNRELEASED
     HDFS-9426. Rollingupgrade finalization is not backward compatible
     (Kihwal Lee via vinayakumarb)
 
+    HDFS-9294. DFSClient deadlock when close file and failed to renew lease.
+    (Brahma Reddy Battula via szetszwo)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES


[07/38] hadoop git commit: HDFS-9470. Encryption zone on root not loaded from fsimage after NN restart. Xiao Chen via wang.

Posted by as...@apache.org.
HDFS-9470. Encryption zone on root not loaded from fsimage after NN restart. Xiao Chen via wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b8e50b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b8e50b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b8e50b4

Branch: refs/heads/yarn-2877
Commit: 9b8e50b424d060e16c1175b1811e7abc476e2468
Parents: 43acf9a
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Nov 30 14:32:19 2015 -0800
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Nov 30 14:32:51 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/server/namenode/FSDirectory.java       | 49 +++++++++++++-------
 .../server/namenode/FSImageFormatPBINode.java   |  1 +
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 38 +++++++++++++++
 4 files changed, 74 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b8e50b4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d73dbd2..5ee5446 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -3560,6 +3560,9 @@ Release 2.6.3 - UNRELEASED
     HDFS-9434. Recommission a datanode with 500k blocks may pause NN for 30
     seconds for printing info log messags.  (szetszwo)
 
+    HDFS-9470. Encryption zone on root not loaded from fsimage after NN
+    restart. (Xiao Chen via wang)
+
 Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b8e50b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 0f3011a..661d788 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1163,28 +1163,43 @@ public class FSDirectory implements Closeable {
       inodeMap.put(inode);
       if (!inode.isSymlink()) {
         final XAttrFeature xaf = inode.getXAttrFeature();
-        if (xaf != null) {
-          XAttr xattr = xaf.getXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE);
-          if (xattr != null) {
-            try {
-              final HdfsProtos.ZoneEncryptionInfoProto ezProto =
-                  HdfsProtos.ZoneEncryptionInfoProto.parseFrom(
-                      xattr.getValue());
-              ezManager.unprotectedAddEncryptionZone(inode.getId(),
-                  PBHelperClient.convert(ezProto.getSuite()),
-                  PBHelperClient.convert(ezProto.getCryptoProtocolVersion()),
-                  ezProto.getKeyName());
-            } catch (InvalidProtocolBufferException e) {
-              NameNode.LOG.warn("Error parsing protocol buffer of " +
-                  "EZ XAttr " + xattr.getName());
-            }
-          }
-        }
+        addEncryptionZone((INodeWithAdditionalFields) inode, xaf);
       }
     }
   }
+
+  private void addEncryptionZone(INodeWithAdditionalFields inode,
+      XAttrFeature xaf) {
+    if (xaf == null) {
+      return;
+    }
+    XAttr xattr = xaf.getXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE);
+    if (xattr == null) {
+      return;
+    }
+    try {
+      final HdfsProtos.ZoneEncryptionInfoProto ezProto =
+          HdfsProtos.ZoneEncryptionInfoProto.parseFrom(
+              xattr.getValue());
+      ezManager.unprotectedAddEncryptionZone(inode.getId(),
+          PBHelperClient.convert(ezProto.getSuite()),
+          PBHelperClient.convert(ezProto.getCryptoProtocolVersion()),
+          ezProto.getKeyName());
+    } catch (InvalidProtocolBufferException e) {
+      NameNode.LOG.warn("Error parsing protocol buffer of " +
+          "EZ XAttr " + xattr.getName() + " dir:" + inode.getFullPathName());
+    }
+  }
   
   /**
+   * This is to handle encryption zone for rootDir when loading from
+   * fsimage, and should only be called during NN restart.
+   */
+  public final void addRootDirToEncryptionZone(XAttrFeature xaf) {
+    addEncryptionZone(rootDir, xaf);
+  }
+
+  /**
    * This method is always called with writeLock of FSDirectory held.
    */
   public final void removeFromInodeMap(List<? extends INode> inodes) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b8e50b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index cf7895b..2f74a2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -427,6 +427,7 @@ public final class FSImageFormatPBINode {
       if (f != null) {
         dir.rootDir.addXAttrFeature(f);
       }
+      dir.addRootDirToEncryptionZone(f);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b8e50b4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 3630f19..90cbc0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -378,6 +378,44 @@ public class TestEncryptionZones {
     assertZonePresent(null, nonpersistZone.toString());
   }
 
+  @Test(timeout = 60000)
+  public void testBasicOperationsRootDir() throws Exception {
+    int numZones = 0;
+    final Path rootDir = new Path("/");
+    final Path zone1 = new Path(rootDir, "zone1");
+
+    /* Normal creation of an EZ on rootDir */
+    dfsAdmin.createEncryptionZone(rootDir, TEST_KEY);
+    assertNumZones(++numZones);
+    assertZonePresent(null, rootDir.toString());
+
+    /* create EZ on child of rootDir which is already an EZ should fail */
+    fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
+    try {
+      dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
+      fail("EZ over an EZ");
+    } catch (IOException e) {
+      assertExceptionContains("already in an encryption zone", e);
+    }
+
+    // Verify rootDir ez is present after restarting the NameNode
+    // and saving/loading from fsimage.
+    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    fs.saveNamespace();
+    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+    cluster.restartNameNode(true);
+    assertNumZones(numZones);
+    assertZonePresent(null, rootDir.toString());
+
+    /* create EZ on child of rootDir which is already an EZ should fail */
+    try {
+      dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
+      fail("EZ over an EZ");
+    } catch (IOException e) {
+      assertExceptionContains("already in an encryption zone", e);
+    }
+  }
+
   /**
    * Test listing encryption zones as a non super user.
    */


[30/38] hadoop git commit: HDFS-8831. Trash Support for deletion in HDFS encryption zone. Contributed by Xiaoyu Yao.

Posted by as...@apache.org.
HDFS-8831. Trash Support for deletion in HDFS encryption zone. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbc7b6bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbc7b6bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbc7b6bf

Branch: refs/heads/yarn-2877
Commit: cbc7b6bf97a80c39d4bbb3005e42dacae6726baf
Parents: 0ac8fb4
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri Dec 4 10:39:45 2015 -0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Fri Dec 4 10:39:45 2015 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/FileSystem.java   |  51 ++++-
 .../org/apache/hadoop/fs/FilterFileSystem.java  |  11 ++
 .../main/java/org/apache/hadoop/fs/FsShell.java |  12 +-
 .../main/java/org/apache/hadoop/fs/Trash.java   |  14 +-
 .../java/org/apache/hadoop/fs/TrashPolicy.java  |  56 +++++-
 .../apache/hadoop/fs/TrashPolicyDefault.java    | 193 +++++++++++--------
 .../org/apache/hadoop/fs/TestHarFileSystem.java |   4 +
 .../java/org/apache/hadoop/fs/TestTrash.java    |   9 +
 .../hadoop/hdfs/DistributedFileSystem.java      |  62 ++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  47 +++++
 11 files changed, 368 insertions(+), 93 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc7b6bf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index d3eb0ad..fdea387 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -105,6 +105,8 @@ public abstract class FileSystem extends Configured implements Closeable {
    */
   public static final int SHUTDOWN_HOOK_PRIORITY = 10;
 
+  public static final String TRASH_PREFIX = ".Trash";
+
   /** FileSystem cache */
   static final Cache CACHE = new Cache();
 
@@ -2663,6 +2665,53 @@ public abstract class FileSystem extends Configured implements Closeable {
         + " doesn't support getAllStoragePolicies");
   }
 
+  /**
+   * Get the root directory of Trash for current user when the path specified
+   * is deleted.
+   *
+   * @param path the trash root of the path to be determined.
+   * @return the default implementation returns "/user/$USER/.Trash".
+   * @throws IOException
+   */
+  public Path getTrashRoot(Path path) throws IOException {
+    return this.makeQualified(new Path(getHomeDirectory().toUri().getPath(),
+        TRASH_PREFIX));
+  }
+
+  /**
+   * Get all the trash roots for current user or all users.
+   *
+   * @param allUsers return trash roots for all users if true.
+   * @return all the trash root directories.
+   *         Default FileSystem returns .Trash under users' home directories if
+   *         /user/$USER/.Trash exists.
+   * @throws IOException
+   */
+  public Collection<FileStatus> getTrashRoots(boolean allUsers)
+      throws IOException {
+    Path userHome = new Path(getHomeDirectory().toUri().getPath());
+    List<FileStatus> ret = new ArrayList<FileStatus>();
+    if (!allUsers) {
+      Path userTrash = new Path(userHome, TRASH_PREFIX);
+      if (exists(userTrash)) {
+        ret.add(getFileStatus(userTrash));
+      }
+    } else {
+      Path homeParent = userHome.getParent();
+      if (exists(homeParent)) {
+        FileStatus[] candidates = listStatus(homeParent);
+        for (FileStatus candidate : candidates) {
+          Path userTrash = new Path(candidate.getPath(), TRASH_PREFIX);
+          if (exists(userTrash)) {
+            candidate.setPath(userTrash);
+            ret.add(candidate);
+          }
+        }
+      }
+    }
+    return ret;
+  }
+
   // making it volatile to be able to do a double checked locking
   private volatile static boolean FILE_SYSTEMS_LOADED = false;
 
@@ -3183,7 +3232,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * For each StatisticsData object, we will call accept on the visitor.
      * Finally, at the end, we will call aggregate to get the final total. 
      *
-     * @param         The visitor to use.
+     * @param         visitor to use.
      * @return        The total.
      */
     private synchronized <T> T visitAll(StatisticsAggregator<T> visitor) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc7b6bf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 00f6778..53678e0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -643,4 +643,15 @@ public class FilterFileSystem extends FileSystem {
       throws IOException {
     return fs.getAllStoragePolicies();
   }
+
+  @Override
+  public Path getTrashRoot(Path path) throws IOException {
+    return fs.getTrashRoot(path);
+  }
+
+  @Override
+  public Collection<FileStatus> getTrashRoots(boolean allUsers)
+      throws IOException {
+    return fs.getTrashRoots(allUsers);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc7b6bf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index 334b6bc..c0a3bea 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -121,6 +121,16 @@ public class FsShell extends Configured implements Tool {
     return getTrash().getCurrentTrashDir();
   }
 
+  /**
+   * Returns the current trash location for the path specified
+   * @param path to be deleted
+   * @return path to the trash
+   * @throws IOException
+   */
+  public Path getCurrentTrashDir(Path path) throws IOException {
+    return getTrash().getCurrentTrashDir(path);
+  }
+
   protected String getUsagePrefix() {
     return usagePrefix;
   }
@@ -129,7 +139,7 @@ public class FsShell extends Configured implements Tool {
   // that access commandFactory
   
   /**
-   *  Display help for commands with their short usage and long description
+   *  Display help for commands with their short usage and long description.
    */
    protected class Usage extends FsCommand {
     public static final String NAME = "usage";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc7b6bf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
index 13d0ec3..b771812 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
@@ -54,7 +54,7 @@ public class Trash extends Configured {
    */
   public Trash(FileSystem fs, Configuration conf) throws IOException {
     super(conf);
-    trashPolicy = TrashPolicy.getInstance(conf, fs, fs.getHomeDirectory());
+    trashPolicy = TrashPolicy.getInstance(conf, fs);
   }
 
   /**
@@ -92,11 +92,7 @@ public class Trash extends Configured {
       throw new IOException("Failed to get server trash configuration", e);
     }
     Trash trash = new Trash(fullyResolvedFs, conf);
-    boolean success = trash.moveToTrash(fullyResolvedPath);
-    if (success) {
-      LOG.info("Moved: '" + p + "' to trash at: " + trash.getCurrentTrashDir());
-    }
-    return success;
+    return trash.moveToTrash(fullyResolvedPath);
   }
   
   /**
@@ -124,7 +120,7 @@ public class Trash extends Configured {
   }
 
   /** get the current working directory */
-  Path getCurrentTrashDir() {
+  Path getCurrentTrashDir() throws IOException {
     return trashPolicy.getCurrentTrashDir();
   }
 
@@ -139,4 +135,8 @@ public class Trash extends Configured {
   public Runnable getEmptier() throws IOException {
     return trashPolicy.getEmptier();
   }
+
+  public Path getCurrentTrashDir(Path path) throws IOException {
+    return trashPolicy.getCurrentTrashDir(path);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc7b6bf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
index eab83b3..1d901c1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
@@ -38,7 +38,7 @@ public abstract class TrashPolicy extends Configured {
 
   /**
    * Used to setup the trash policy. Must be implemented by all TrashPolicy
-   * implementations
+   * implementations.
    * @param conf the configuration to be used
    * @param fs the filesystem to be used
    * @param home the home directory
@@ -46,7 +46,19 @@ public abstract class TrashPolicy extends Configured {
   public abstract void initialize(Configuration conf, FileSystem fs, Path home);
 
   /**
-   * Returns whether the Trash Policy is enabled for this filesystem
+   * Used to setup the trash policy. Must be implemented by all TrashPolicy
+   * implementations. Different from initialize(conf, fs, home), this one does
+   * not assume trash always under /user/$USER due to HDFS encryption zone.
+   * @param conf the configuration to be used
+   * @param fs the filesystem to be used
+   * @throws IOException
+   */
+  public void initialize(Configuration conf, FileSystem fs) throws IOException{
+    throw new UnsupportedOperationException();
+  }
+
+  /**
+   * Returns whether the Trash Policy is enabled for this filesystem.
    */
   public abstract boolean isEnabled();
 
@@ -68,8 +80,27 @@ public abstract class TrashPolicy extends Configured {
 
   /**
    * Get the current working directory of the Trash Policy
+   * This API does not work with files deleted from encryption zone when HDFS
+   * data encryption at rest feature is enabled as rename file between
+   * encryption zones or encryption zone and non-encryption zone is not allowed.
+   *
+   * The caller is recommend to use the new API
+   * TrashPolicy#getCurrentTrashDir(Path path).
+   * It returns the trash location correctly for the path specified no matter
+   * the path is in encryption zone or not.
+   */
+  public abstract Path getCurrentTrashDir() throws IOException;
+
+  /**
+   * Get the current trash directory for path specified based on the Trash
+   * Policy
+   * @param path path to be deleted
+   * @return current trash directory for the path to be deleted
+   * @throws IOException
    */
-  public abstract Path getCurrentTrashDir();
+  public Path getCurrentTrashDir(Path path) throws IOException {
+    throw new UnsupportedOperationException();
+  }
 
   /** 
    * Return a {@link Runnable} that periodically empties the trash of all
@@ -78,7 +109,7 @@ public abstract class TrashPolicy extends Configured {
   public abstract Runnable getEmptier() throws IOException;
 
   /**
-   * Get an instance of the configured TrashPolicy based on the value 
+   * Get an instance of the configured TrashPolicy based on the value
    * of the configuration parameter fs.trash.classname.
    *
    * @param conf the configuration to be used
@@ -93,4 +124,21 @@ public abstract class TrashPolicy extends Configured {
     trash.initialize(conf, fs, home); // initialize TrashPolicy
     return trash;
   }
+
+  /**
+   * Get an instance of the configured TrashPolicy based on the value
+   * of the configuration parameter fs.trash.classname.
+   *
+   * @param conf the configuration to be used
+   * @param fs the file system to be used
+   * @return an instance of TrashPolicy
+   */
+  public static TrashPolicy getInstance(Configuration conf, FileSystem fs)
+      throws IOException {
+    Class<? extends TrashPolicy> trashClass = conf.getClass(
+        "fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
+    TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
+    trash.initialize(conf, fs); // initialize TrashPolicy
+    return trash;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc7b6bf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 1ed8a46..b5d245b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -27,6 +27,7 @@ import java.io.IOException;
 import java.text.DateFormat;
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
+import java.util.Collection;
 import java.util.Date;
 
 import org.apache.commons.logging.Log;
@@ -66,23 +67,18 @@ public class TrashPolicyDefault extends TrashPolicy {
       new SimpleDateFormat("yyMMddHHmm");
   private static final int MSECS_PER_MINUTE = 60*1000;
 
-  private Path current;
-  private Path homesParent;
   private long emptierInterval;
 
   public TrashPolicyDefault() { }
 
-  private TrashPolicyDefault(FileSystem fs, Path home, Configuration conf)
+  private TrashPolicyDefault(FileSystem fs, Configuration conf)
       throws IOException {
-    initialize(conf, fs, home);
+    initialize(conf, fs);
   }
 
   @Override
   public void initialize(Configuration conf, FileSystem fs, Path home) {
     this.fs = fs;
-    this.trash = new Path(home, TRASH);
-    this.homesParent = home.getParent();
-    this.current = new Path(trash, CURRENT);
     this.deletionInterval = (long)(conf.getFloat(
         FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT)
         * MSECS_PER_MINUTE);
@@ -91,6 +87,17 @@ public class TrashPolicyDefault extends TrashPolicy {
         * MSECS_PER_MINUTE);
    }
 
+  @Override
+  public void initialize(Configuration conf, FileSystem fs) {
+    this.fs = fs;
+    this.deletionInterval = (long)(conf.getFloat(
+        FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT)
+        * MSECS_PER_MINUTE);
+    this.emptierInterval = (long)(conf.getFloat(
+        FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
+        * MSECS_PER_MINUTE);
+  }
+
   private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
     return Path.mergePaths(basePath, rmFilePath);
   }
@@ -113,17 +120,19 @@ public class TrashPolicyDefault extends TrashPolicy {
 
     String qpath = fs.makeQualified(path).toString();
 
-    if (qpath.startsWith(trash.toString())) {
+    Path trashRoot = fs.getTrashRoot(path);
+    Path trashCurrent = new Path(trashRoot, CURRENT);
+    if (qpath.startsWith(trashRoot.toString())) {
       return false;                               // already in trash
     }
 
-    if (trash.getParent().toString().startsWith(qpath)) {
+    if (trashRoot.getParent().toString().startsWith(qpath)) {
       throw new IOException("Cannot move \"" + path +
                             "\" to the trash, as it contains the trash");
     }
 
-    Path trashPath = makeTrashRelativePath(current, path);
-    Path baseTrashPath = makeTrashRelativePath(current, path.getParent());
+    Path trashPath = makeTrashRelativePath(trashCurrent, path);
+    Path baseTrashPath = makeTrashRelativePath(trashCurrent, path.getParent());
     
     IOException cause = null;
 
@@ -148,14 +157,16 @@ public class TrashPolicyDefault extends TrashPolicy {
           trashPath = new Path(orig + Time.now());
         }
         
-        if (fs.rename(path, trashPath))           // move to current trash
+        if (fs.rename(path, trashPath)) {           // move to current trash
+          LOG.info("Moved: '" + path + "' to trash at: " + trashPath);
           return true;
+        }
       } catch (IOException e) {
         cause = e;
       }
     }
     throw (IOException)
-      new IOException("Failed to move to trash: "+path).initCause(cause);
+      new IOException("Failed to move to trash: " + path).initCause(cause);
   }
 
   @SuppressWarnings("deprecation")
@@ -166,72 +177,32 @@ public class TrashPolicyDefault extends TrashPolicy {
 
   @SuppressWarnings("deprecation")
   public void createCheckpoint(Date date) throws IOException {
-
-    if (!fs.exists(current))                     // no trash, no checkpoint
-      return;
-
-    Path checkpointBase;
-    synchronized (CHECKPOINT) {
-      checkpointBase = new Path(trash, CHECKPOINT.format(date));
-
+    Collection<FileStatus> trashRoots = fs.getTrashRoots(false);
+    for (FileStatus trashRoot: trashRoots) {
+      LOG.info("TrashPolicyDefault#createCheckpoint for trashRoot: " +
+          trashRoot.getPath());
+      createCheckpoint(trashRoot.getPath(), date);
     }
-    Path checkpoint = checkpointBase;
-
-    int attempt = 0;
-    while (true) {
-      try {
-        fs.rename(current, checkpoint, Rename.NONE);
-        break;
-      } catch (FileAlreadyExistsException e) {
-        if (++attempt > 1000) {
-          throw new IOException("Failed to checkpoint trash: "+checkpoint);
-        }
-        checkpoint = checkpointBase.suffix("-" + attempt);
-      }
-    }
-
-    LOG.info("Created trash checkpoint: "+checkpoint.toUri().getPath());
   }
 
   @Override
   public void deleteCheckpoint() throws IOException {
-    FileStatus[] dirs = null;
-    
-    try {
-      dirs = fs.listStatus(trash);            // scan trash sub-directories
-    } catch (FileNotFoundException fnfe) {
-      return;
+    Collection<FileStatus> trashRoots = fs.getTrashRoots(false);
+    for (FileStatus trashRoot : trashRoots) {
+      LOG.info("TrashPolicyDefault#deleteCheckpoint for trashRoot: " +
+          trashRoot.getPath());
+      deleteCheckpoint(trashRoot.getPath());
     }
+  }
 
-    long now = Time.now();
-    for (int i = 0; i < dirs.length; i++) {
-      Path path = dirs[i].getPath();
-      String dir = path.toUri().getPath();
-      String name = path.getName();
-      if (name.equals(CURRENT.getName()))         // skip current
-        continue;
-
-      long time;
-      try {
-        time = getTimeFromCheckpoint(name);
-      } catch (ParseException e) {
-        LOG.warn("Unexpected item in trash: "+dir+". Ignoring.");
-        continue;
-      }
-
-      if ((now - deletionInterval) > time) {
-        if (fs.delete(path, true)) {
-          LOG.info("Deleted trash checkpoint: "+dir);
-        } else {
-          LOG.warn("Couldn't delete checkpoint: "+dir+" Ignoring.");
-        }
-      }
-    }
+  @Override
+  public Path getCurrentTrashDir() throws IOException {
+    return new Path(fs.getTrashRoot(null), CURRENT);
   }
 
   @Override
-  public Path getCurrentTrashDir() {
-    return current;
+  public Path getCurrentTrashDir(Path path) throws IOException {
+    return new Path(fs.getTrashRoot(path), CURRENT);
   }
 
   @Override
@@ -278,25 +249,24 @@ public class TrashPolicyDefault extends TrashPolicy {
         try {
           now = Time.now();
           if (now >= end) {
-
-            FileStatus[] homes = null;
+            Collection<FileStatus> trashRoots;
             try {
-              homes = fs.listStatus(homesParent);         // list all home dirs
+              trashRoots = fs.getTrashRoots(true);      // list all home dirs
             } catch (IOException e) {
-              LOG.warn("Trash can't list homes: "+e+" Sleeping.");
+              LOG.warn("Trash can't list all trash roots: "+e+" Sleeping.");
               continue;
             }
 
-            for (FileStatus home : homes) {         // dump each trash
-              if (!home.isDirectory())
+            for (FileStatus trashRoot : trashRoots) {   // dump each trash
+              if (!trashRoot.isDirectory())
                 continue;
               try {
-                TrashPolicyDefault trash = new TrashPolicyDefault(
-                    fs, home.getPath(), conf);
-                trash.deleteCheckpoint();
-                trash.createCheckpoint(new Date(now));
+                TrashPolicyDefault trash = new TrashPolicyDefault(fs, conf);
+                trash.deleteCheckpoint(trashRoot.getPath());
+                trash.createCheckpoint(trashRoot.getPath(), new Date(now));
               } catch (IOException e) {
-                LOG.warn("Trash caught: "+e+". Skipping "+home.getPath()+".");
+                LOG.warn("Trash caught: "+e+". Skipping " +
+                    trashRoot.getPath() + ".");
               } 
             }
           }
@@ -319,6 +289,69 @@ public class TrashPolicyDefault extends TrashPolicy {
     }
   }
 
+  private void createCheckpoint(Path trashRoot, Date date) throws IOException {
+    if (!fs.exists(new Path(trashRoot, CURRENT))) {
+      return;
+    }
+    Path checkpointBase;
+    synchronized (CHECKPOINT) {
+      checkpointBase = new Path(trashRoot, CHECKPOINT.format(date));
+    }
+    Path checkpoint = checkpointBase;
+    Path current = new Path(trashRoot, CURRENT);
+
+    int attempt = 0;
+    while (true) {
+      try {
+        fs.rename(current, checkpoint, Rename.NONE);
+        LOG.info("Created trash checkpoint: " + checkpoint.toUri().getPath());
+        break;
+      } catch (FileAlreadyExistsException e) {
+        if (++attempt > 1000) {
+          throw new IOException("Failed to checkpoint trash: " + checkpoint);
+        }
+        checkpoint = checkpointBase.suffix("-" + attempt);
+      }
+    }
+  }
+
+  private void deleteCheckpoint(Path trashRoot) throws IOException {
+    LOG.info("TrashPolicyDefault#deleteCheckpoint for trashRoot: " + trashRoot);
+
+    FileStatus[] dirs = null;
+    try {
+      dirs = fs.listStatus(trashRoot); // scan trash sub-directories
+    } catch (FileNotFoundException fnfe) {
+      return;
+    }
+
+    long now = Time.now();
+    for (int i = 0; i < dirs.length; i++) {
+      Path path = dirs[i].getPath();
+      String dir = path.toUri().getPath();
+      String name = path.getName();
+      if (name.equals(CURRENT.getName())) {         // skip current
+        continue;
+      }
+
+      long time;
+      try {
+        time = getTimeFromCheckpoint(name);
+      } catch (ParseException e) {
+        LOG.warn("Unexpected item in trash: "+dir+". Ignoring.");
+        continue;
+      }
+
+      if ((now - deletionInterval) > time) {
+        if (fs.delete(path, true)) {
+          LOG.info("Deleted trash checkpoint: "+dir);
+        } else {
+          LOG.warn("Couldn't delete checkpoint: " + dir + " Ignoring.");
+        }
+      }
+    }
+  }
+
   private long getTimeFromCheckpoint(String name) throws ParseException {
     long time;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc7b6bf/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
index b179c36..8ec9d80 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
@@ -214,6 +214,10 @@ public class TestHarFileSystem {
 
     public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
         throws IOException;
+
+    public Path getTrashRoot(Path path) throws IOException;
+
+    public Collection<FileStatus> getTrashRoots(boolean allUsers) throws IOException;
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc7b6bf/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 9a91733..88194fd 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -696,6 +696,10 @@ public class TestTrash extends TestCase {
     }
 
     @Override
+    public void initialize(Configuration conf, FileSystem fs) {
+    }
+
+    @Override
     public boolean isEnabled() {
       return false;
     }
@@ -719,6 +723,11 @@ public class TestTrash extends TestCase {
     }
 
     @Override
+    public Path getCurrentTrashDir(Path path) throws IOException {
+      return null;
+    }
+
+    @Override
     public Runnable getEmptier() throws IOException {
       return null;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc7b6bf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 57fac40..48efb1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -25,6 +25,7 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
@@ -2305,4 +2306,65 @@ public class DistributedFileSystem extends FileSystem {
       throws IOException {
     return Arrays.asList(dfs.getErasureCodingPolicies());
   }
+
+  /**
+   * Get the root directory of Trash for a path in HDFS.
+   * 1. File in encryption zone returns /ez1/.Trash/username
+   * 2. File not in encryption zone returns /users/username/.Trash
+   * Caller appends either Current or checkpoint timestamp for trash destination
+   * @param path the trash root of the path to be determined.
+   * @return trash root
+   * @throws IOException
+   */
+  @Override
+  public Path getTrashRoot(Path path) throws IOException {
+    if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
+      return super.getTrashRoot(path);
+    }
+
+    String absSrc = path.toUri().getPath();
+    EncryptionZone ez = dfs.getEZForPath(absSrc);
+    if ((ez != null) && !ez.getPath().equals(absSrc)) {
+      return this.makeQualified(
+          new Path(ez.getPath() + "/" + FileSystem.TRASH_PREFIX +
+              dfs.ugi.getShortUserName()));
+    } else {
+      return super.getTrashRoot(path);
+    }
+  }
+
+  /**
+   * Get all the trash roots of HDFS for current user or for all the users.
+   * 1. File deleted from non-encryption zone /user/username/.Trash
+   * 2. File deleted from encryption zones
+   *    e.g., ez1 rooted at /ez1 has its trash root at /ez1/.Trash/$USER
+   * @allUsers return trashRoots of all users if true, used by emptier
+   * @return trash roots of HDFS
+   * @throws IOException
+   */
+  @Override
+  public Collection<FileStatus> getTrashRoots(boolean allUsers) throws IOException {
+    List<FileStatus> ret = new ArrayList<FileStatus>();
+    // Get normal trash roots
+    ret.addAll(super.getTrashRoots(allUsers));
+
+    // Get EZ Trash roots
+    final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
+    while (it.hasNext()) {
+      Path ezTrashRoot = new Path(it.next().getPath(), FileSystem.TRASH_PREFIX);
+      if (allUsers) {
+        for (FileStatus candidate : listStatus(ezTrashRoot)) {
+          if (exists(candidate.getPath())) {
+            ret.add(candidate);
+          }
+        }
+      } else {
+        Path userTrash = new Path(ezTrashRoot, System.getProperty("user.name"));
+        if (exists(userTrash)) {
+          ret.add(getFileStatus(userTrash));
+        }
+      }
+    }
+    return ret;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc7b6bf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 89094bc..9e8b8a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -209,6 +209,8 @@ Trunk (Unreleased)
     HDFS-9129. Move the safemode block count into BlockManager. (Mingliang Liu
     via jing9)
 
+    HDFS-8831. Trash Support for deletion in HDFS encryption zone. (xyao)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc7b6bf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 90cbc0b..b476483 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -96,6 +97,7 @@ import static org.mockito.Matchers.anyShort;
 import static org.mockito.Mockito.withSettings;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyString;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesEqual;
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
@@ -1407,4 +1409,49 @@ public class TestEncryptionZones {
       assertExceptionContains("Path not found: " + zoneFile, e);
     }
   }
+
+  @Test(timeout = 120000)
+  public void testEncryptionZoneWithTrash() throws Exception {
+    // Create the encryption zone1
+    final HdfsAdmin dfsAdmin =
+        new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+    final Path zone1 = new Path("/zone1");
+    fs.mkdirs(zone1);
+    dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
+
+    // Create the encrypted file in zone1
+    final Path encFile1 = new Path(zone1, "encFile1");
+    final int len = 8192;
+    DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
+
+    Configuration clientConf = new Configuration(conf);
+    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
+    FsShell shell = new FsShell(clientConf);
+
+    // Delete encrypted file from the shell with trash enabled
+    // Verify the file is moved to appropriate trash within the zone
+    verifyShellDeleteWithTrash(shell, encFile1);
+
+    // Delete encryption zone from the shell with trash enabled
+    // Verify the zone is moved to appropriate trash location in user's home dir
+    verifyShellDeleteWithTrash(shell, zone1);
+  }
+
+  private void verifyShellDeleteWithTrash(FsShell shell, Path path)
+      throws Exception{
+    try {
+      final Path trashFile =
+          new Path(shell.getCurrentTrashDir(path) + "/" + path);
+      String[] argv = new String[]{"-rm", "-r", path.toString()};
+      int res = ToolRunner.run(shell, argv);
+      assertEquals("rm failed", 0, res);
+      assertTrue("File not in trash : " + trashFile, fs.exists(trashFile));
+    } catch (IOException ioe) {
+      fail(ioe.getMessage());
+    } finally {
+      if (fs.exists(path)) {
+        fs.delete(path, true);
+      }
+    }
+  }
 }


[06/38] hadoop git commit: Update CHANGES.txt for commit of MR-6549 to branch-2.7 and branch-2.6.

Posted by as...@apache.org.
Update CHANGES.txt for commit of MR-6549 to branch-2.7 and branch-2.6.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43acf9ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43acf9ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43acf9ab

Branch: refs/heads/yarn-2877
Commit: 43acf9ab8ef4eea7950fa99096cd99459cfa25ad
Parents: c37c3f4
Author: Jason Lowe <jl...@apache.org>
Authored: Mon Nov 30 17:54:39 2015 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Mon Nov 30 17:54:39 2015 +0000

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43acf9ab/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index dc37bf1..2a995c7 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -686,6 +686,9 @@ Release 2.7.3 - UNRELEASED
     MAPREDUCE-5883. "Total megabyte-seconds" in job counters is slightly
     misleading (Nathan Roberts via jlowe)
 
+    MAPREDUCE-6549. multibyte delimiters with LineRecordReader cause
+    duplicate records (wilfreds via rkanter)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -994,6 +997,9 @@ Release 2.6.3 - UNRELEASED
     MAPREDUCE-5883. "Total megabyte-seconds" in job counters is slightly
     misleading (Nathan Roberts via jlowe)
 
+    MAPREDUCE-6549. multibyte delimiters with LineRecordReader cause
+    duplicate records (wilfreds via rkanter)
+
 Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES


[16/38] hadoop git commit: Move HDFS-9129 to trunk in CHANGES.txt

Posted by as...@apache.org.
Move HDFS-9129 to trunk in CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c4a34e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c4a34e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c4a34e9

Branch: refs/heads/yarn-2877
Commit: 3c4a34e90c4734c1dce60cab113c42e55dcf0ed2
Parents: a49cc74
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Dec 1 16:15:08 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Tue Dec 1 16:15:08 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c4a34e9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ee6d38f..89aaed4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -206,6 +206,9 @@ Trunk (Unreleased)
     HDFS-8968. Erasure coding: a comprehensive I/O throughput benchmark tool.
     (Rui Li via zhz)
 
+    HDFS-9129. Move the safemode block count into BlockManager. (Mingliang Liu
+    via jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -879,9 +882,6 @@ Release 2.9.0 - UNRELEASED
 
   IMPROVEMENTS
 
-      HDFS-9129. Move the safemode block count into BlockManager. (Mingliang Liu
-      via jing9)
-
   OPTIMIZATIONS
 
   BUG FIXES


[12/38] hadoop git commit: HDFS-9485. Make BlockManager#removeFromExcessReplicateMap accept BlockInfo instead of Block. Contributed by Mingliang Liu.

Posted by as...@apache.org.
HDFS-9485. Make BlockManager#removeFromExcessReplicateMap accept BlockInfo instead of Block. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/830eb252
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/830eb252
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/830eb252

Branch: refs/heads/yarn-2877
Commit: 830eb252aaa4fec7ef2ec38cb66f669e8e1ecaa5
Parents: 1cc7e61
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Dec 1 13:05:22 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Tue Dec 1 13:05:22 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                      | 3 +++
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java  | 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java | 2 +-
 3 files changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/830eb252/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 77d5415..3f31f3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1704,6 +1704,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9269. Update the documentation and wrapper for fuse-dfs. 
     (Wei-Chiu Chuang via zhz)
 
+    HDFS-9485. Make BlockManager#removeFromExcessReplicateMap accept BlockInfo
+    instead of Block. (Mingliang Liu via jing9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/830eb252/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ef3a8cc..3033eaa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3930,7 +3930,7 @@ public class BlockManager implements BlockStatsMXBean {
     return corruptReplicas.numCorruptReplicas(block);
   }
 
-  public void removeBlockFromMap(Block block) {
+  public void removeBlockFromMap(BlockInfo block) {
     removeFromExcessReplicateMap(block);
     blocksMap.removeBlock(block);
     // If block is removed from blocksMap remove it from corruptReplicasMap
@@ -3940,7 +3940,7 @@ public class BlockManager implements BlockStatsMXBean {
   /**
    * If a block is removed from blocksMap, remove it from excessReplicateMap.
    */
-  private void removeFromExcessReplicateMap(Block block) {
+  private void removeFromExcessReplicateMap(BlockInfo block) {
     for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
       String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
       LightWeightHashSet<BlockInfo> excessReplicas =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/830eb252/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index fa87bf2..0a71d78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -82,7 +82,7 @@ class FSDirWriteFileOp {
     if (uc == null) {
       return false;
     }
-    fsd.getBlockManager().removeBlockFromMap(block);
+    fsd.getBlockManager().removeBlockFromMap(uc);
 
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* FSDirectory.removeBlock: "


[36/38] hadoop git commit: HDFS-9414. Refactor reconfiguration of ClientDatanodeProtocol for reusability. (Contributed by Xiaobing Zhou)

Posted by as...@apache.org.
HDFS-9414. Refactor reconfiguration of ClientDatanodeProtocol for reusability. (Contributed by Xiaobing Zhou)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86c95cb3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86c95cb3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86c95cb3

Branch: refs/heads/yarn-2877
Commit: 86c95cb31a392d2ee4dcf3cc36e924ad34000b27
Parents: 4265a85
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Dec 4 20:24:08 2015 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Dec 4 20:24:08 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   1 +
 .../hdfs/protocol/ReconfigurationProtocol.java  |  36 +++++
 .../ClientDatanodeProtocolTranslatorPB.java     |  46 ++----
 .../protocolPB/ReconfigurationProtocolPB.java   |  43 ++++++
 .../ReconfigurationProtocolTranslatorPB.java    | 146 +++++++++++++++++++
 .../ReconfigurationProtocolUtils.java           |  64 ++++++++
 .../src/main/proto/ClientDatanodeProtocol.proto |  32 +---
 .../main/proto/ReconfigurationProtocol.proto    |  74 ++++++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  12 +-
 ...tDatanodeProtocolServerSideTranslatorPB.java |  58 ++------
 ...igurationProtocolServerSideTranslatorPB.java |  89 +++++++++++
 .../ReconfigurationProtocolServerSideUtils.java |  78 ++++++++++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  21 ++-
 13 files changed, 579 insertions(+), 121 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index c70b890..b0e0e1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -118,6 +118,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                   <include>encryption.proto</include>
                   <include>inotify.proto</include>
                   <include>erasurecoding.proto</include>
+                  <include>ReconfigurationProtocol.proto</include>
                 </includes>
               </source>
               <output>${project.build.directory}/generated-sources/java</output>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
new file mode 100644
index 0000000..23fd57c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
@@ -0,0 +1,36 @@
+package org.apache.hadoop.hdfs.protocol;
+
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.ReconfigurationTaskStatus;
+
+/**********************************************************************
+ * ReconfigurationProtocol is used by HDFS admin to reload configuration
+ * for NN/DN without restarting them.
+ **********************************************************************/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface ReconfigurationProtocol {
+
+  long versionID = 1L;
+
+  /**
+   * Asynchronously reload configuration on disk and apply changes.
+   */
+  void startReconfiguration() throws IOException;
+
+  /**
+   * Get the status of the previously issued reconfig task.
+   * @see {@link org.apache.hadoop.conf.ReconfigurationTaskStatus}.
+   */
+  ReconfigurationTaskStatus getReconfigurationStatus() throws IOException;
+
+  /**
+   * Get a list of allowed properties for reconfiguration.
+   */
+  List<String> listReconfigurableProperties() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index b9f7a4d..2fffffd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -21,17 +21,13 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.List;
-import java.util.Map;
 
 import javax.net.SocketFactory;
 
-import com.google.common.base.Optional;
-import com.google.common.collect.Maps;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationTaskStatus;
-import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
@@ -48,14 +44,12 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlo
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ListReconfigurablePropertiesRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ListReconfigurablePropertiesResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.ipc.ProtobufHelper;
@@ -273,39 +267,19 @@ public class ClientDatanodeProtocolTranslatorPB implements
   @Override
   public ReconfigurationTaskStatus getReconfigurationStatus()
       throws IOException {
-    GetReconfigurationStatusResponseProto response;
-    Map<PropertyChange, Optional<String>> statusMap = null;
-    long startTime;
-    long endTime = 0;
     try {
-      response = rpcProxy.getReconfigurationStatus(NULL_CONTROLLER,
-          VOID_GET_RECONFIG_STATUS);
-      startTime = response.getStartTime();
-      if (response.hasEndTime()) {
-        endTime = response.getEndTime();
-      }
-      if (response.getChangesCount() > 0) {
-        statusMap = Maps.newHashMap();
-        for (GetReconfigurationStatusConfigChangeProto change :
-            response.getChangesList()) {
-          PropertyChange pc = new PropertyChange(
-              change.getName(), change.getNewValue(), change.getOldValue());
-          String errorMessage = null;
-          if (change.hasErrorMessage()) {
-            errorMessage = change.getErrorMessage();
-          }
-          statusMap.put(pc, Optional.fromNullable(errorMessage));
-        }
-      }
+      return ReconfigurationProtocolUtils.getReconfigurationStatus(
+          rpcProxy
+          .getReconfigurationStatus(
+              NULL_CONTROLLER,
+              VOID_GET_RECONFIG_STATUS));
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }
-    return new ReconfigurationTaskStatus(startTime, endTime, statusMap);
   }
 
   @Override
-  public List<String> listReconfigurableProperties()
-      throws IOException {
+  public List<String> listReconfigurableProperties() throws IOException {
     ListReconfigurablePropertiesResponseProto response;
     try {
       response = rpcProxy.listReconfigurableProperties(NULL_CONTROLLER,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolPB.java
new file mode 100644
index 0000000..659edac
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolPB.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolPB;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ReconfigurationProtocolService;
+
+@KerberosInfo(serverPrincipal =
+    CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
+@ProtocolInfo(
+    protocolName = "org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol",
+    protocolVersion = 1)
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+/**
+ * Protocol that clients use to communicate with the NN/DN to do
+ * reconfiguration on the fly.
+ *
+ * Note: This extends the protocolbuffer service based interface to
+ * add annotations required for security.
+ */
+public interface ReconfigurationProtocolPB extends
+    ReconfigurationProtocolService.BlockingInterface {
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java
new file mode 100644
index 0000000..472f3d8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolPB;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.List;
+
+import javax.net.SocketFactory;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationTaskStatus;
+import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto;
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtocolMetaInterface;
+import org.apache.hadoop.ipc.ProtocolTranslator;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcClientUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+/**
+ * This class is the client side translator to translate the requests made on
+ * {@link ReconfigurationProtocol} interfaces to the RPC server implementing
+ * {@link ReconfigurationProtocolPB}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class ReconfigurationProtocolTranslatorPB implements
+    ProtocolMetaInterface, ReconfigurationProtocol, ProtocolTranslator,
+    Closeable {
+  public static final Logger LOG = LoggerFactory
+      .getLogger(ReconfigurationProtocolTranslatorPB.class);
+
+  private static final RpcController NULL_CONTROLLER = null;
+  private static final StartReconfigurationRequestProto VOID_START_RECONFIG =
+      StartReconfigurationRequestProto.newBuilder().build();
+
+  private static final ListReconfigurablePropertiesRequestProto
+      VOID_LIST_RECONFIGURABLE_PROPERTIES =
+      ListReconfigurablePropertiesRequestProto.newBuilder().build();
+
+  private static final GetReconfigurationStatusRequestProto
+      VOID_GET_RECONFIG_STATUS =
+      GetReconfigurationStatusRequestProto.newBuilder().build();
+
+  private final ReconfigurationProtocolPB rpcProxy;
+
+  public ReconfigurationProtocolTranslatorPB(InetSocketAddress addr,
+      UserGroupInformation ticket, Configuration conf, SocketFactory factory)
+      throws IOException {
+    rpcProxy = createReconfigurationProtocolProxy(addr, ticket, conf, factory,
+        0);
+  }
+
+  static ReconfigurationProtocolPB createReconfigurationProtocolProxy(
+      InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
+      SocketFactory factory, int socketTimeout) throws IOException {
+    RPC.setProtocolEngine(conf, ReconfigurationProtocolPB.class,
+        ProtobufRpcEngine.class);
+    return RPC.getProxy(ReconfigurationProtocolPB.class,
+        RPC.getProtocolVersion(ReconfigurationProtocolPB.class),
+        addr, ticket, conf, factory, socketTimeout);
+  }
+
+  @Override
+  public void close() throws IOException {
+    RPC.stopProxy(rpcProxy);
+  }
+
+  @Override
+  public Object getUnderlyingProxyObject() {
+    return rpcProxy;
+  }
+
+  @Override
+  public void startReconfiguration() throws IOException {
+    try {
+      rpcProxy.startReconfiguration(NULL_CONTROLLER, VOID_START_RECONFIG);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public ReconfigurationTaskStatus getReconfigurationStatus()
+      throws IOException {
+    try {
+      return ReconfigurationProtocolUtils.getReconfigurationStatus(
+          rpcProxy
+          .getReconfigurationStatus(
+              NULL_CONTROLLER,
+              VOID_GET_RECONFIG_STATUS));
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public List<String> listReconfigurableProperties() throws IOException {
+    ListReconfigurablePropertiesResponseProto response;
+    try {
+      response = rpcProxy.listReconfigurableProperties(NULL_CONTROLLER,
+          VOID_LIST_RECONFIGURABLE_PROPERTIES);
+      return response.getNameList();
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public boolean isMethodSupported(String methodName) throws IOException {
+    return RpcClientUtil.isMethodSupported(rpcProxy,
+        ReconfigurationProtocolPB.class,
+        RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.getProtocolVersion(ReconfigurationProtocolPB.class),
+        methodName);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolUtils.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolUtils.java
new file mode 100644
index 0000000..a3fead9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolUtils.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocolPB;
+
+import java.util.Map;
+
+import org.apache.hadoop.conf.ReconfigurationTaskStatus;
+import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusConfigChangeProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.Maps;
+
+/**
+ * This is a client side utility class that handles
+ * common logic to to parameter reconfiguration.
+ */
+public class ReconfigurationProtocolUtils {
+  private ReconfigurationProtocolUtils() {
+  }
+
+  public static ReconfigurationTaskStatus getReconfigurationStatus(
+      GetReconfigurationStatusResponseProto response) {
+    Map<PropertyChange, Optional<String>> statusMap = null;
+    long startTime;
+    long endTime = 0;
+
+    startTime = response.getStartTime();
+    if (response.hasEndTime()) {
+      endTime = response.getEndTime();
+    }
+    if (response.getChangesCount() > 0) {
+      statusMap = Maps.newHashMap();
+      for (GetReconfigurationStatusConfigChangeProto change : response
+          .getChangesList()) {
+        PropertyChange pc = new PropertyChange(change.getName(),
+            change.getNewValue(), change.getOldValue());
+        String errorMessage = null;
+        if (change.hasErrorMessage()) {
+          errorMessage = change.getErrorMessage();
+        }
+        statusMap.put(pc, Optional.fromNullable(errorMessage));
+      }
+    }
+    return new ReconfigurationTaskStatus(startTime, endTime, statusMap);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
index dd39546..954fedc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
@@ -33,6 +33,7 @@ package hadoop.hdfs;
 
 import "Security.proto";
 import "hdfs.proto";
+import "ReconfigurationProtocol.proto";
 
 /**
  * block - block for which visible length is requested
@@ -123,12 +124,6 @@ message GetDatanodeInfoResponseProto {
   required DatanodeLocalInfoProto localInfo = 1;
 }
 
-/** Asks DataNode to reload configuration file. */
-message StartReconfigurationRequestProto {
-}
-
-message StartReconfigurationResponseProto {
-}
 
 message TriggerBlockReportRequestProto {
   required bool incremental = 1;
@@ -137,31 +132,6 @@ message TriggerBlockReportRequestProto {
 message TriggerBlockReportResponseProto {
 }
 
-/** Query the running status of reconfiguration process */
-message GetReconfigurationStatusRequestProto {
-}
-
-message GetReconfigurationStatusConfigChangeProto {
-  required string name = 1;
-  required string oldValue = 2;
-  optional string newValue = 3;
-  optional string errorMessage = 4;  // It is empty if success.
-}
-
-message GetReconfigurationStatusResponseProto {
-  required int64 startTime = 1;
-  optional int64 endTime = 2;
-  repeated GetReconfigurationStatusConfigChangeProto changes = 3;
-}
-
-message ListReconfigurablePropertiesRequestProto {
-}
-
-/** Query the reconfigurable properties on DataNode. */
-message ListReconfigurablePropertiesResponseProto {
-  repeated string name = 1;
-}
-
 message GetBalancerBandwidthRequestProto {
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ReconfigurationProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ReconfigurationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ReconfigurationProtocol.proto
new file mode 100644
index 0000000..12a38b1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ReconfigurationProtocol.proto
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ // This file contains protocol buffers that are used to reconfigure NameNode
+ // and DataNode by HDFS admin.
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "ReconfigurationProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
+
+/** Asks NN/DN to reload configuration file. */
+message StartReconfigurationRequestProto {
+}
+
+message StartReconfigurationResponseProto {
+}
+
+/** Query the running status of reconfiguration process */
+message GetReconfigurationStatusRequestProto {
+}
+
+message GetReconfigurationStatusConfigChangeProto {
+  required string name = 1;
+  required string oldValue = 2;
+  optional string newValue = 3;
+  optional string errorMessage = 4;  // It is empty if success.
+}
+
+message GetReconfigurationStatusResponseProto {
+  required int64 startTime = 1;
+  optional int64 endTime = 2;
+  repeated GetReconfigurationStatusConfigChangeProto changes = 3;
+}
+
+/** Query the reconfigurable properties on NN/DN. */
+message ListReconfigurablePropertiesRequestProto {
+}
+
+message ListReconfigurablePropertiesResponseProto {
+  repeated string name = 1;
+}
+
+/**
+ * Protocol used from client to the NN/DN.
+ * See the request and response for details of rpc call.
+ */
+service ReconfigurationProtocolService {
+  rpc getReconfigurationStatus(GetReconfigurationStatusRequestProto)
+      returns(GetReconfigurationStatusResponseProto);
+
+  rpc startReconfiguration(StartReconfigurationRequestProto)
+      returns(StartReconfigurationResponseProto);
+
+  rpc listReconfigurableProperties(
+      ListReconfigurablePropertiesRequestProto)
+      returns(ListReconfigurablePropertiesResponseProto);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c7074b5..118df6e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -883,11 +883,15 @@ Release 2.9.0 - UNRELEASED
   NEW FEATURES
 
   IMPROVEMENTS
-      HDFS-9267. TestDiskError should get stored replicas through
-      FsDatasetTestUtils. (Lei (Eddy) Xu via Colin P. McCabe)
 
-      HDFS-9491. Tests should get the number of pending async delets via
-      FsDatasetTestUtils. (Tony Wu via lei)
+    HDFS-9267. TestDiskError should get stored replicas through
+    FsDatasetTestUtils. (Lei (Eddy) Xu via Colin P. McCabe)
+
+    HDFS-9491. Tests should get the number of pending async delets via
+    FsDatasetTestUtils. (Tony Wu via lei)
+
+    HDFS-9414. Refactor reconfiguration of ClientDatanodeProtocol for
+    reusability. (Xiaobing Zhou via Arpit Agarwal)
 
   OPTIMIZATIONS
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
index 76ad820..0feecc1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
@@ -18,12 +18,8 @@
 package org.apache.hadoop.hdfs.protocolPB;
 
 import java.io.IOException;
-import java.util.Map;
 
-import com.google.common.base.Optional;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.ReconfigurationTaskStatus;
-import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
@@ -35,19 +31,18 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlo
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ListReconfigurablePropertiesRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ListReconfigurablePropertiesResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto;
 
@@ -162,7 +157,7 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
   @Override
   public StartReconfigurationResponseProto startReconfiguration(
       RpcController unused, StartReconfigurationRequestProto request)
-    throws ServiceException {
+      throws ServiceException {
     try {
       impl.startReconfiguration();
     } catch (IOException e) {
@@ -173,54 +168,27 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
 
   @Override
   public ListReconfigurablePropertiesResponseProto listReconfigurableProperties(
-        RpcController controller,
-        ListReconfigurablePropertiesRequestProto request)
+      RpcController controller,
+      ListReconfigurablePropertiesRequestProto request)
       throws ServiceException {
-    ListReconfigurablePropertiesResponseProto.Builder builder =
-        ListReconfigurablePropertiesResponseProto.newBuilder();
     try {
-      for (String name : impl.listReconfigurableProperties()) {
-        builder.addName(name);
-      }
+      return ReconfigurationProtocolServerSideUtils
+          .listReconfigurableProperties(impl.listReconfigurableProperties());
     } catch (IOException e) {
       throw new ServiceException(e);
     }
-    return builder.build();
   }
 
   @Override
   public GetReconfigurationStatusResponseProto getReconfigurationStatus(
       RpcController unused, GetReconfigurationStatusRequestProto request)
       throws ServiceException {
-    GetReconfigurationStatusResponseProto.Builder builder =
-        GetReconfigurationStatusResponseProto.newBuilder();
     try {
-      ReconfigurationTaskStatus status = impl.getReconfigurationStatus();
-      builder.setStartTime(status.getStartTime());
-      if (status.stopped()) {
-        builder.setEndTime(status.getEndTime());
-        assert status.getStatus() != null;
-        for (Map.Entry<PropertyChange, Optional<String>> result :
-            status.getStatus().entrySet()) {
-          GetReconfigurationStatusConfigChangeProto.Builder changeBuilder =
-              GetReconfigurationStatusConfigChangeProto.newBuilder();
-          PropertyChange change = result.getKey();
-          changeBuilder.setName(change.prop);
-          changeBuilder.setOldValue(change.oldVal != null ? change.oldVal : "");
-          if (change.newVal != null) {
-            changeBuilder.setNewValue(change.newVal);
-          }
-          if (result.getValue().isPresent()) {
-            // Get full stack trace.
-            changeBuilder.setErrorMessage(result.getValue().get());
-          }
-          builder.addChanges(changeBuilder);
-        }
-      }
+      return ReconfigurationProtocolServerSideUtils
+          .getReconfigurationStatus(impl.getReconfigurationStatus());
     } catch (IOException e) {
       throw new ServiceException(e);
     }
-    return builder.build();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolServerSideTranslatorPB.java
new file mode 100644
index 0000000..7caed68
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolServerSideTranslatorPB.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolPB;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+/**
+ * This class is used on the server side. Calls come across the wire for the
+ * for protocol {@link ReconfigurationProtocolPB}.
+ * This class translates the PB data types
+ * to the native data types used inside the NN/DN as specified in the generic
+ * ReconfigurationProtocol.
+ */
+public class ReconfigurationProtocolServerSideTranslatorPB implements
+    ReconfigurationProtocolPB {
+
+  private final ReconfigurationProtocol impl;
+
+  private static final StartReconfigurationResponseProto START_RECONFIG_RESP =
+      StartReconfigurationResponseProto.newBuilder().build();
+
+  public ReconfigurationProtocolServerSideTranslatorPB(
+      ReconfigurationProtocol impl) {
+    this.impl = impl;
+  }
+
+  @Override
+  public StartReconfigurationResponseProto startReconfiguration(
+      RpcController controller, StartReconfigurationRequestProto request)
+      throws ServiceException {
+    try {
+      impl.startReconfiguration();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return START_RECONFIG_RESP;
+  }
+
+  @Override
+  public ListReconfigurablePropertiesResponseProto listReconfigurableProperties(
+      RpcController controller,
+      ListReconfigurablePropertiesRequestProto request)
+      throws ServiceException {
+    try {
+      return ReconfigurationProtocolServerSideUtils
+          .listReconfigurableProperties(impl.listReconfigurableProperties());
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public GetReconfigurationStatusResponseProto getReconfigurationStatus(
+      RpcController unused, GetReconfigurationStatusRequestProto request)
+      throws ServiceException {
+    try {
+      return ReconfigurationProtocolServerSideUtils
+          .getReconfigurationStatus(impl.getReconfigurationStatus());
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolServerSideUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolServerSideUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolServerSideUtils.java
new file mode 100644
index 0000000..29c7326
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolServerSideUtils.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolPB;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.ReconfigurationTaskStatus;
+import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusConfigChangeProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto;
+
+import com.google.common.base.Optional;
+
+/**
+ * This is a server side utility class that handles
+ * common logic to to parameter reconfiguration.
+ */
+public class ReconfigurationProtocolServerSideUtils {
+  private ReconfigurationProtocolServerSideUtils() {
+  }
+
+  public static ListReconfigurablePropertiesResponseProto
+      listReconfigurableProperties(
+          List<String> reconfigurableProperties) {
+    ListReconfigurablePropertiesResponseProto.Builder builder =
+        ListReconfigurablePropertiesResponseProto.newBuilder();
+    for (String name : reconfigurableProperties) {
+      builder.addName(name);
+    }
+    return builder.build();
+  }
+
+  public static GetReconfigurationStatusResponseProto getReconfigurationStatus(
+      ReconfigurationTaskStatus status) {
+    GetReconfigurationStatusResponseProto.Builder builder =
+        GetReconfigurationStatusResponseProto.newBuilder();
+
+    builder.setStartTime(status.getStartTime());
+    if (status.stopped()) {
+      builder.setEndTime(status.getEndTime());
+      assert status.getStatus() != null;
+      for (Map.Entry<PropertyChange, Optional<String>> result : status
+          .getStatus().entrySet()) {
+        GetReconfigurationStatusConfigChangeProto.Builder changeBuilder =
+            GetReconfigurationStatusConfigChangeProto.newBuilder();
+        PropertyChange change = result.getKey();
+        changeBuilder.setName(change.prop);
+        changeBuilder.setOldValue(change.oldVal != null ? change.oldVal : "");
+        if (change.newVal != null) {
+          changeBuilder.setNewValue(change.newVal);
+        }
+        if (result.getValue().isPresent()) {
+          // Get full stack trace.
+          changeBuilder.setErrorMessage(result.getValue().get());
+        }
+        builder.addChanges(changeBuilder);
+      }
+    }
+    return builder.build();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c95cb3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 150ce6b..15e556b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -49,6 +49,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
+import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ReconfigurationProtocolService;
 
 import java.io.BufferedOutputStream;
 import java.io.ByteArrayInputStream;
@@ -124,6 +125,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
@@ -143,6 +145,8 @@ import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
+import org.apache.hadoop.hdfs.protocolPB.ReconfigurationProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.ReconfigurationProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode;
@@ -251,7 +255,7 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.Private
 public class DataNode extends ReconfigurableBase
     implements InterDatanodeProtocol, ClientDatanodeProtocol,
-        TraceAdminProtocol, DataNodeMXBean {
+        TraceAdminProtocol, DataNodeMXBean, ReconfigurationProtocol {
   public static final Logger LOG = LoggerFactory.getLogger(DataNode.class);
   
   static{
@@ -914,7 +918,14 @@ public class DataNode extends ReconfigurableBase
             conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY,
                 DFS_DATANODE_HANDLER_COUNT_DEFAULT)).setVerbose(false)
         .setSecretManager(blockPoolTokenSecretManager).build();
-    
+
+    ReconfigurationProtocolServerSideTranslatorPB reconfigurationProtocolXlator
+      = new ReconfigurationProtocolServerSideTranslatorPB(this);
+    service = ReconfigurationProtocolService
+        .newReflectiveBlockingService(reconfigurationProtocolXlator);
+    DFSUtil.addPBProtocol(conf, ReconfigurationProtocolPB.class, service,
+        ipcServer);
+
     InterDatanodeProtocolServerSideTranslatorPB interDatanodeProtocolXlator = 
         new InterDatanodeProtocolServerSideTranslatorPB(this);
     service = InterDatanodeProtocolService
@@ -2918,19 +2929,19 @@ public class DataNode extends ReconfigurableBase
         confVersion, uptime);
   }
 
-  @Override // ClientDatanodeProtocol
+  @Override // ClientDatanodeProtocol & ReconfigurationProtocol
   public void startReconfiguration() throws IOException {
     checkSuperuserPrivilege();
     startReconfigurationTask();
   }
 
-  @Override // ClientDatanodeProtocol
+  @Override // ClientDatanodeProtocol & ReconfigurationProtocol
   public ReconfigurationTaskStatus getReconfigurationStatus() throws IOException {
     checkSuperuserPrivilege();
     return getReconfigurationTaskStatus();
   }
 
-  @Override // ClientDatanodeProtocol
+  @Override // ClientDatanodeProtocol & ReconfigurationProtocol
   public List<String> listReconfigurableProperties()
       throws IOException {
     return RECONFIGURABLE_PROPERTIES;


[10/38] hadoop git commit: HADOOP-12609. Fix intermittent failure of TestDecayRpcScheduler. (Contributed by Masatake Iwasaki)

Posted by as...@apache.org.
HADOOP-12609. Fix intermittent failure of TestDecayRpcScheduler. (Contributed by Masatake Iwasaki)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/485c3468
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/485c3468
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/485c3468

Branch: refs/heads/yarn-2877
Commit: 485c3468a8520fcde14800af3e4a075231c946de
Parents: 0288942
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Dec 1 09:27:23 2015 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Tue Dec 1 09:27:23 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java    | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/485c3468/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 32f6905..b19b703 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1491,6 +1491,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12313. NPE in JvmPauseMonitor when calling stop() before start().
     (Gabor Liptak via wheat9)
 
+    HADOOP-12609. Fix intermittent failure of TestDecayRpcScheduler.
+    (Masatake Iwasaki via Arpit Agarwal)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/485c3468/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
index d06b25c..a6a14d0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
@@ -151,7 +151,7 @@ public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean
     // Setup delay timer
     Timer timer = new Timer();
     DecayTask task = new DecayTask(this, timer);
-    timer.scheduleAtFixedRate(task, 0, this.decayPeriodMillis);
+    timer.scheduleAtFixedRate(task, decayPeriodMillis, decayPeriodMillis);
 
     MetricsProxy prox = MetricsProxy.getInstance(ns);
     prox.setDelegate(this);


[34/38] hadoop git commit: HDFS-9214. Add missing license header

Posted by as...@apache.org.
HDFS-9214. Add missing license header


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc6d8d98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc6d8d98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc6d8d98

Branch: refs/heads/yarn-2877
Commit: bc6d8d981477c1d8d6cff19f2b6fc01676b4d332
Parents: 9d817fa
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Dec 4 15:33:09 2015 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Dec 4 15:33:09 2015 -0800

----------------------------------------------------------------------
 .../datanode/TestDataNodeReconfiguration.java     | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d8d98/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
index edaf7ab..249b5c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
@@ -1,3 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY;


[14/38] hadoop git commit: HDFS-9129. Move the safemode block count into BlockManager. Contributed by Mingliang Liu.

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index 64d80bd..a828e64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -27,6 +27,7 @@ import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerSafeMode.BMSafeModeStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -35,6 +36,7 @@ import org.apache.hadoop.util.Daemon;
 import org.junit.Assert;
 
 import com.google.common.base.Preconditions;
+import org.mockito.internal.util.reflection.Whitebox;
 
 public class BlockManagerTestUtil {
   public static void setNodeReplicationLimit(final BlockManager blockManager,
@@ -314,4 +316,11 @@ public class BlockManagerTestUtil {
       Block block, DatanodeStorageInfo[] targets) {
     node.addBlockToBeReplicated(block, targets);
   }
+
+  public static void setStartupSafeModeForTest(BlockManager bm) {
+    BlockManagerSafeMode bmSafeMode = (BlockManagerSafeMode)Whitebox
+        .getInternalState(bm, "bmSafeMode");
+    Whitebox.setInternalState(bmSafeMode, "extension", Integer.MAX_VALUE);
+    Whitebox.setInternalState(bmSafeMode, "status", BMSafeModeStatus.EXTENSION);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
new file mode 100644
index 0000000..606b282
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
@@ -0,0 +1,420 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import com.google.common.base.Supplier;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerSafeMode.BMSafeModeStatus;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.concurrent.TimeoutException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
+
+/**
+ * This test is for testing {@link BlockManagerSafeMode} package local APIs.
+ *
+ * They use heavily mocked objects, treating the {@link BlockManagerSafeMode}
+ * as white-box. Tests are light-weight thus no multi-thread scenario or real
+ * mini-cluster is tested.
+ *
+ * @see org.apache.hadoop.hdfs.TestSafeMode
+ * @see org.apache.hadoop.hdfs.server.namenode.ha.TestHASafeMode
+ * @see org.apache.hadoop.hdfs.TestSafeModeWithStripedFile
+ */
+public class TestBlockManagerSafeMode {
+  private static final int DATANODE_NUM = 3;
+  private static final long BLOCK_TOTAL = 10;
+  private static final double THRESHOLD = 0.99;
+  private static final long BLOCK_THRESHOLD = (long)(BLOCK_TOTAL * THRESHOLD);
+  private static final int EXTENSION = 1000; // 1 second
+
+  private BlockManager bm;
+  private DatanodeManager dn;
+  private BlockManagerSafeMode bmSafeMode;
+
+  /**
+   * Set up the mock context.
+   *
+   * - extension is always needed (default period is {@link #EXTENSION} ms
+   * - datanode threshold is always reached via mock
+   * - safe block is 0 and it needs {@link #BLOCK_THRESHOLD} to reach threshold
+   * - write/read lock is always held by current thread
+   *
+   * @throws IOException
+   */
+  @Before
+  public void setupMockCluster() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    conf.setDouble(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,
+        THRESHOLD);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,
+        EXTENSION);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,
+        DATANODE_NUM);
+
+    FSNamesystem fsn = mock(FSNamesystem.class);
+    Mockito.doReturn(true).when(fsn).hasWriteLock();
+    Mockito.doReturn(true).when(fsn).hasReadLock();
+    Mockito.doReturn(true).when(fsn).isRunning();
+    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
+
+    bm = spy(new BlockManager(fsn, conf));
+    dn = spy(bm.getDatanodeManager());
+    Whitebox.setInternalState(bm, "datanodeManager", dn);
+    // the datanode threshold is always met
+    when(dn.getNumLiveDataNodes()).thenReturn(DATANODE_NUM);
+
+    bmSafeMode = new BlockManagerSafeMode(bm, fsn, conf);
+  }
+
+  /**
+   * Test set block total.
+   *
+   * The block total is set which will call checkSafeMode for the first time
+   * and bmSafeMode transfers from INITIALIZED to PENDING_THRESHOLD status
+   */
+  @Test(timeout = 30000)
+  public void testInitialize() {
+    assertFalse("Block manager should not be in safe mode at beginning.",
+        bmSafeMode.isInSafeMode());
+    bmSafeMode.activate(BLOCK_TOTAL);
+    assertEquals(BMSafeModeStatus.PENDING_THRESHOLD, getSafeModeStatus());
+    assertTrue(bmSafeMode.isInSafeMode());
+  }
+
+  /**
+   * Test the state machine transition.
+   */
+  @Test(timeout = 30000)
+  public void testCheckSafeMode() {
+    bmSafeMode.activate(BLOCK_TOTAL);
+
+    // stays in PENDING_THRESHOLD: pending block threshold
+    setSafeModeStatus(BMSafeModeStatus.PENDING_THRESHOLD);
+    for (long i = 0; i < BLOCK_THRESHOLD; i++) {
+      setBlockSafe(i);
+      bmSafeMode.checkSafeMode();
+      assertEquals(BMSafeModeStatus.PENDING_THRESHOLD, getSafeModeStatus());
+    }
+
+    // PENDING_THRESHOLD -> EXTENSION
+    setSafeModeStatus(BMSafeModeStatus.PENDING_THRESHOLD);
+    setBlockSafe(BLOCK_THRESHOLD);
+    bmSafeMode.checkSafeMode();
+    assertEquals(BMSafeModeStatus.EXTENSION, getSafeModeStatus());
+    Whitebox.setInternalState(bmSafeMode, "smmthread", null);
+
+    // PENDING_THRESHOLD -> OFF
+    setSafeModeStatus(BMSafeModeStatus.PENDING_THRESHOLD);
+    setBlockSafe(BLOCK_THRESHOLD);
+    Whitebox.setInternalState(bmSafeMode, "extension", 0);
+    bmSafeMode.checkSafeMode();
+    assertEquals(BMSafeModeStatus.OFF, getSafeModeStatus());
+
+    // stays in EXTENSION
+    setSafeModeStatus(BMSafeModeStatus.EXTENSION);
+    setBlockSafe(0);
+    Whitebox.setInternalState(bmSafeMode, "extension", 0);
+    bmSafeMode.checkSafeMode();
+    assertEquals(BMSafeModeStatus.EXTENSION, getSafeModeStatus());
+
+    // stays in EXTENSION: pending extension period
+    setSafeModeStatus(BMSafeModeStatus.EXTENSION);
+    setBlockSafe(BLOCK_THRESHOLD);
+    Whitebox.setInternalState(bmSafeMode, "extension", Integer.MAX_VALUE);
+    bmSafeMode.checkSafeMode();
+    assertEquals(BMSafeModeStatus.EXTENSION, getSafeModeStatus());
+  }
+
+  /**
+   * Test that the block safe increases up to block threshold.
+   *
+   * Once the block threshold is reached, the block manger leaves safe mode and
+   * increment will be a no-op.
+   * The safe mode status lifecycle: INITIALIZED -> PENDING_THRESHOLD -> OFF
+   */
+  @Test(timeout = 30000)
+  public void testIncrementSafeBlockCount() {
+    bmSafeMode.activate(BLOCK_TOTAL);
+    Whitebox.setInternalState(bmSafeMode, "extension", 0);
+
+    for (long i = 1; i <= BLOCK_TOTAL; i++) {
+      BlockInfo blockInfo = mock(BlockInfo.class);
+      doReturn(false).when(blockInfo).isStriped();
+      bmSafeMode.incrementSafeBlockCount(1, blockInfo);
+      if (i < BLOCK_THRESHOLD) {
+        assertEquals(i, getblockSafe());
+        assertTrue(bmSafeMode.isInSafeMode());
+      } else {
+        // block manager leaves safe mode if block threshold is met
+        assertFalse(bmSafeMode.isInSafeMode());
+        // the increment will be a no-op if safe mode is OFF
+        assertEquals(BLOCK_THRESHOLD, getblockSafe());
+      }
+    }
+  }
+
+  /**
+   * Test that the block safe increases up to block threshold.
+   *
+   * Once the block threshold is reached, the block manger leaves safe mode and
+   * increment will be a no-op.
+   * The safe mode status lifecycle: INITIALIZED -> PENDING_THRESHOLD -> EXTENSION-> OFF
+   */
+  @Test(timeout = 30000)
+  public void testIncrementSafeBlockCountWithExtension() throws Exception {
+    bmSafeMode.activate(BLOCK_TOTAL);
+
+    for (long i = 1; i <= BLOCK_TOTAL; i++) {
+      BlockInfo blockInfo = mock(BlockInfo.class);
+      doReturn(false).when(blockInfo).isStriped();
+      bmSafeMode.incrementSafeBlockCount(1, blockInfo);
+      if (i < BLOCK_THRESHOLD) {
+        assertTrue(bmSafeMode.isInSafeMode());
+      }
+    }
+    waitForExtensionPeriod();
+    assertFalse(bmSafeMode.isInSafeMode());
+  }
+
+  /**
+   * Test that the block safe decreases the block safe.
+   *
+   * The block manager stays in safe mode.
+   * The safe mode status lifecycle: INITIALIZED -> PENDING_THRESHOLD
+   */
+  @Test(timeout = 30000)
+  public void testDecrementSafeBlockCount() {
+    bmSafeMode.activate(BLOCK_TOTAL);
+    Whitebox.setInternalState(bmSafeMode, "extension", 0);
+
+    mockBlockManagerForBlockSafeDecrement();
+    setBlockSafe(BLOCK_THRESHOLD);
+    for (long i = BLOCK_THRESHOLD; i > 0; i--) {
+      BlockInfo blockInfo = mock(BlockInfo.class);
+      bmSafeMode.decrementSafeBlockCount(blockInfo);
+
+      assertEquals(i - 1, getblockSafe());
+      assertTrue(bmSafeMode.isInSafeMode());
+    }
+  }
+
+  /**
+   * Test when the block safe increment and decrement interleave.
+   *
+   * Both the increment and decrement will be a no-op if the safe mode is OFF.
+   * The safe mode status lifecycle: INITIALIZED -> PENDING_THRESHOLD -> OFF
+   */
+  @Test(timeout = 30000)
+  public void testIncrementAndDecrementSafeBlockCount() {
+    bmSafeMode.activate(BLOCK_TOTAL);
+    Whitebox.setInternalState(bmSafeMode, "extension", 0);
+
+    mockBlockManagerForBlockSafeDecrement();
+    for (long i = 1; i <= BLOCK_TOTAL; i++) {
+      BlockInfo blockInfo = mock(BlockInfo.class);
+      doReturn(false).when(blockInfo).isStriped();
+
+      bmSafeMode.incrementSafeBlockCount(1, blockInfo);
+      bmSafeMode.decrementSafeBlockCount(blockInfo);
+      bmSafeMode.incrementSafeBlockCount(1, blockInfo);
+
+      if (i < BLOCK_THRESHOLD) {
+        assertEquals(i, getblockSafe());
+        assertTrue(bmSafeMode.isInSafeMode());
+      } else {
+        // block manager leaves safe mode if block threshold is met
+        assertEquals(BLOCK_THRESHOLD, getblockSafe());
+        assertFalse(bmSafeMode.isInSafeMode());
+      }
+    }
+  }
+
+  /**
+   * Test the safe mode monitor.
+   *
+   * The monitor will make block manager leave the safe mode after  extension
+   * period.
+   */
+  @Test(timeout = 30000)
+  public void testSafeModeMonitor() throws Exception {
+    bmSafeMode.activate(BLOCK_TOTAL);
+
+    setBlockSafe(BLOCK_THRESHOLD);
+    // PENDING_THRESHOLD -> EXTENSION
+    bmSafeMode.checkSafeMode();
+
+    assertTrue(bmSafeMode.isInSafeMode());
+    waitForExtensionPeriod();
+    assertFalse(bmSafeMode.isInSafeMode());
+  }
+
+  /**
+   * Test block manager won't leave safe mode if datanode threshold is not met.
+   */
+  @Test(timeout = 30000)
+  public void testDatanodeThreshodShouldBeMet() throws Exception {
+    bmSafeMode.activate(BLOCK_TOTAL);
+
+    // All datanode have not registered yet.
+    when(dn.getNumLiveDataNodes()).thenReturn(1);
+    setBlockSafe(BLOCK_THRESHOLD);
+    bmSafeMode.checkSafeMode();
+    assertTrue(bmSafeMode.isInSafeMode());
+
+    // The datanode number reaches threshold after all data nodes register
+    when(dn.getNumLiveDataNodes()).thenReturn(DATANODE_NUM);
+    bmSafeMode.checkSafeMode();
+    waitForExtensionPeriod();
+    assertFalse(bmSafeMode.isInSafeMode());
+  }
+
+  /**
+   * Test block manager won't leave safe mode if there are orphan blocks.
+   */
+  @Test(timeout = 30000)
+  public void testStayInSafeModeWhenBytesInFuture() throws Exception {
+    bmSafeMode.activate(BLOCK_TOTAL);
+
+    when(bm.getBytesInFuture()).thenReturn(1L);
+    // safe blocks are enough
+   setBlockSafe(BLOCK_THRESHOLD);
+
+    // PENDING_THRESHOLD -> EXTENSION
+    bmSafeMode.checkSafeMode();
+    try {
+      waitForExtensionPeriod();
+      fail("Safe mode should not leave extension period with orphan blocks!");
+    } catch (TimeoutException e) {
+      assertEquals(BMSafeModeStatus.EXTENSION, getSafeModeStatus());
+    }
+  }
+
+  /**
+   * Test get safe mode tip.
+   */
+  @Test(timeout = 30000)
+  public void testGetSafeModeTip() throws Exception {
+    bmSafeMode.activate(BLOCK_TOTAL);
+    String tip = bmSafeMode.getSafeModeTip();
+    assertTrue(tip.contains(
+        String.format(
+            "The reported blocks %d needs additional %d blocks to reach the " +
+                "threshold %.4f of total blocks %d.%n",
+            0, BLOCK_THRESHOLD, THRESHOLD, BLOCK_TOTAL)));
+    assertTrue(tip.contains(
+        String.format("The number of live datanodes %d has reached the " +
+            "minimum number %d. ", dn.getNumLiveDataNodes(), DATANODE_NUM)));
+    assertTrue(tip.contains("Safe mode will be turned off automatically once " +
+        "the thresholds have been reached."));
+
+    // safe blocks are enough
+    setBlockSafe(BLOCK_THRESHOLD);
+    bmSafeMode.checkSafeMode();
+    tip = bmSafeMode.getSafeModeTip();
+    assertTrue(tip.contains(
+        String.format("The reported blocks %d has reached the threshold"
+            + " %.4f of total blocks %d. ",
+            getblockSafe(), THRESHOLD, BLOCK_TOTAL)));
+    assertTrue(tip.contains(
+        String.format("The number of live datanodes %d has reached the " +
+            "minimum number %d. ", dn.getNumLiveDataNodes(), DATANODE_NUM)));
+    assertTrue(tip.contains("In safe mode extension. Safe mode will be turned" +
+        " off automatically in"));
+
+    waitForExtensionPeriod();
+    tip = bmSafeMode.getSafeModeTip();
+    System.out.println(tip);
+    assertTrue(tip.contains(
+        String.format("The reported blocks %d has reached the threshold"
+                + " %.4f of total blocks %d. ",
+            getblockSafe(), THRESHOLD, BLOCK_TOTAL)));
+    assertTrue(tip.contains(
+        String.format("The number of live datanodes %d has reached the " +
+            "minimum number %d. ", dn.getNumLiveDataNodes(), DATANODE_NUM)));
+    assertTrue(tip.contains("Safe mode will be turned off automatically soon"));
+  }
+
+  /**
+   * Mock block manager internal state for decrement safe block
+   */
+  private void mockBlockManagerForBlockSafeDecrement() {
+    BlockInfo storedBlock = mock(BlockInfo.class);
+    when(storedBlock.isComplete()).thenReturn(true);
+    doReturn(storedBlock).when(bm).getStoredBlock(any(Block.class));
+    NumberReplicas numberReplicas = mock(NumberReplicas.class);
+    when(numberReplicas.liveReplicas()).thenReturn(0);
+    doReturn(numberReplicas).when(bm).countNodes(any(Block.class));
+  }
+
+  /**
+   * Wait the bmSafeMode monitor for the extension period.
+   * @throws InterruptedIOException
+   * @throws TimeoutException
+   */
+  private void waitForExtensionPeriod() throws Exception{
+    assertEquals(BMSafeModeStatus.EXTENSION, getSafeModeStatus());
+
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          return getSafeModeStatus() != BMSafeModeStatus.EXTENSION;
+        }
+    }, EXTENSION / 10, EXTENSION * 2);
+  }
+
+  private void setSafeModeStatus(BMSafeModeStatus status) {
+    Whitebox.setInternalState(bmSafeMode, "status", status);
+  }
+
+  private BMSafeModeStatus getSafeModeStatus() {
+    return (BMSafeModeStatus)Whitebox.getInternalState(bmSafeMode, "status");
+  }
+
+  private void setBlockSafe(long blockSafe) {
+    Whitebox.setInternalState(bmSafeMode, "blockSafe", blockSafe);
+  }
+
+  private long getblockSafe() {
+    return (long)Whitebox.getInternalState(bmSafeMode, "blockSafe");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 22b6697..1a8a088 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -1359,7 +1359,7 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
       testConvertLastBlockToUnderConstructionDoesNotCauseSkippedReplication()
           throws IOException {
     Namesystem mockNS = mock(Namesystem.class);
-    when(mockNS.hasReadLock()).thenReturn(true);
+    when(mockNS.hasWriteLock()).thenReturn(true);
 
     BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
     UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index fd11cc0..69980db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -236,12 +235,13 @@ public class NameNodeAdapter {
    * @return the number of blocks marked safe by safemode, or -1
    * if safemode is not running.
    */
-  public static int getSafeModeSafeBlocks(NameNode nn) {
-    SafeModeInfo smi = nn.getNamesystem().getSafeModeInfoForTests();
-    if (smi == null) {
+  public static long getSafeModeSafeBlocks(NameNode nn) {
+    if (!nn.getNamesystem().isInSafeMode()) {
       return -1;
     }
-    return smi.blockSafe;
+    Object bmSafeMode = Whitebox.getInternalState(
+        nn.getNamesystem().getBlockManager(), "bmSafeMode");
+    return (long)Whitebox.getInternalState(bmSafeMode, "blockSafe");
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index 15fc40e..be72192 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@@ -332,7 +331,11 @@ public class TestFSNamesystem {
     Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 2);
     FSNamesystem fsn = new FSNamesystem(conf, fsImage);
-    SafeModeInfo safemodeInfo = fsn.getSafeModeInfoForTests();
-    assertTrue(safemodeInfo.toString().contains("Minimal replication = 2"));
+
+    Object bmSafeMode = Whitebox.getInternalState(fsn.getBlockManager(),
+        "bmSafeMode");
+    int safeReplication = (int)Whitebox.getInternalState(bmSafeMode,
+        "safeReplication");
+    assertEquals(2, safeReplication);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
index 2d36bc1..9171194 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
@@ -103,8 +103,6 @@ public class TestNameNodeMetadataConsistency {
 
     // we also need to tell block manager that we are in the startup path
     FSNamesystem spyNameSystem = spy(cluster.getNameNode().getNamesystem());
-    spyNameSystem.enableSafeModeForTesting(conf);
-
     Whitebox.setInternalState(cluster.getNameNode()
             .getNamesystem().getBlockManager(),
         "namesystem", spyNameSystem);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
index 9ded0ed..21de9606 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
@@ -32,7 +32,6 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -56,7 +55,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IOUtils;
@@ -126,10 +124,13 @@ public class TestHASafeMode {
         .synchronizedMap(new HashMap<Path, Boolean>());
     final Path test = new Path("/test");
     // let nn0 enter safemode
+    cluster.getConfiguration(0).setInt(
+        DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 3);
     NameNodeAdapter.enterSafeMode(nn0, false);
-    SafeModeInfo safeMode = (SafeModeInfo) Whitebox.getInternalState(
-        nn0.getNamesystem(), "safeMode");
-    Whitebox.setInternalState(safeMode, "extension", Integer.valueOf(30000));
+    Whitebox.setInternalState(nn0.getNamesystem(), "manualSafeMode", false);
+    BlockManagerTestUtil.setStartupSafeModeForTest(nn0.getNamesystem()
+        .getBlockManager());
+    assertTrue(nn0.getNamesystem().isInStartupSafeMode());
     LOG.info("enter safemode");
     new Thread() {
       @Override


[35/38] hadoop git commit: HDFS-9491. Tests should get the number of pending async deletes via FsDatasetTestUtils. (Tony Wu via lei)

Posted by as...@apache.org.
HDFS-9491. Tests should get the number of pending async deletes via FsDatasetTestUtils. (Tony Wu via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4265a85f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4265a85f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4265a85f

Branch: refs/heads/yarn-2877
Commit: 4265a85f6d9fbf48cdd741bd3d27bfced52f34ca
Parents: bc6d8d9
Author: Lei Xu <le...@apache.org>
Authored: Fri Dec 4 18:03:41 2015 -0800
Committer: Lei Xu <le...@apache.org>
Committed: Fri Dec 4 18:06:16 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                     | 3 +++
 .../apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java   | 4 ----
 .../apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java  | 5 +++++
 .../server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java  | 5 +++++
 .../hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java  | 4 ----
 .../server/datanode/fsdataset/impl/LazyPersistTestCase.java     | 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java   | 2 +-
 7 files changed, 15 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4265a85f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e10450d..c7074b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -886,6 +886,9 @@ Release 2.9.0 - UNRELEASED
       HDFS-9267. TestDiskError should get stored replicas through
       FsDatasetTestUtils. (Lei (Eddy) Xu via Colin P. McCabe)
 
+      HDFS-9491. Tests should get the number of pending async delets via
+      FsDatasetTestUtils. (Tony Wu via lei)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4265a85f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index ec45dac..1d47192 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -158,10 +158,6 @@ public class DataNodeTestUtils {
     return dn.getFSDataset();
   }
 
-  public static long getPendingAsyncDeletions(DataNode dn) {
-    return FsDatasetTestUtil.getPendingAsyncDeletions(dn.getFSDataset());
-  }
-
   /**
    * Fetch a copy of ReplicaInfo from a datanode by block id
    * @param dn datanode to retrieve a replicainfo object from

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4265a85f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
index e89e1f2..2a8119f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
@@ -255,4 +255,9 @@ public interface FsDatasetTestUtils {
 
   /** Get all stored replicas in the specified block pool. */
   Iterator<Replica> getStoredReplicas(String bpid) throws IOException;
+
+  /**
+   * Get the number of pending async deletions.
+   */
+  long getPendingAsyncDeletions();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4265a85f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
index f67eeb8..c85ca2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
@@ -400,4 +400,9 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils {
     }
     return ret.iterator();
   }
+
+  @Override
+  public long getPendingAsyncDeletions() {
+    return dataset.asyncDiskService.countPendingDeletions();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4265a85f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
index 164385e..f4480a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
@@ -59,10 +59,6 @@ public class FsDatasetTestUtil {
       final String bpid, final long blockId) {
     return ((FsDatasetImpl)fsd).fetchReplicaInfo(bpid, blockId);
   }
-
-  public static long getPendingAsyncDeletions(FsDatasetSpi<?> fsd) {
-    return ((FsDatasetImpl)fsd).asyncDiskService.countPendingDeletions();
-  }
   
   public static Collection<ReplicaInfo> getReplicas(FsDatasetSpi<?> fsd,
       String bpid) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4265a85f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
index 9155344..3593e2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
@@ -452,7 +452,7 @@ public abstract class LazyPersistTestCase {
     triggerBlockReport();
 
     while(
-      DataNodeTestUtils.getPendingAsyncDeletions(cluster.getDataNodes().get(0))
+        cluster.getFsDatasetTestUtils(0).getPendingAsyncDeletions()
         > 0L){
       Thread.sleep(1000);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4265a85f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
index 5543a2a..42cf3d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
@@ -101,7 +101,7 @@ public abstract class HATestUtil {
       @Override
       public Boolean get() {
         for (DataNode dn : cluster.getDataNodes()) {
-          if (DataNodeTestUtils.getPendingAsyncDeletions(dn) > 0) {
+          if (cluster.getFsDatasetTestUtils(dn).getPendingAsyncDeletions() > 0) {
             return false;
           }
         }


[22/38] hadoop git commit: YARN-3840. Resource Manager web ui issue when sorting application by id (with application having id > 9999). Contributed by Mohammad Shahid Khan and Varun Saxena

Posted by as...@apache.org.
YARN-3840. Resource Manager web ui issue when sorting application by id (with application having id > 9999). Contributed by Mohammad Shahid Khan and Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f77ccad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f77ccad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f77ccad

Branch: refs/heads/yarn-2877
Commit: 9f77ccad735f4843ce2c38355de9f434838d4507
Parents: 62e9348
Author: Jian He <ji...@apache.org>
Authored: Thu Dec 3 12:42:53 2015 -0800
Committer: Jian He <ji...@apache.org>
Committed: Thu Dec 3 12:48:50 2015 -0800

----------------------------------------------------------------------
 .../mapreduce/v2/app/webapp/TaskPage.java       |  2 +-
 .../mapreduce/v2/app/webapp/TasksPage.java      |  2 +-
 hadoop-yarn-project/CHANGES.txt                 |  4 ++
 .../hadoop/yarn/webapp/view/JQueryUI.java       | 21 ++++----
 .../webapps/static/dt-sorting/natural.js        | 54 ++++++++++++++++++++
 .../webapp/AppAttemptPage.java                  |  2 +-
 .../webapp/AppPage.java                         |  2 +-
 .../webapp/TestAHSWebApp.java                   | 50 +++++++++++++++++-
 .../hadoop/yarn/server/webapp/WebPageUtils.java |  6 +--
 .../nodemanager/webapp/AllApplicationsPage.java |  9 +++-
 .../nodemanager/webapp/AllContainersPage.java   |  8 ++-
 .../resourcemanager/webapp/TestRMWebApp.java    |  5 ++
 12 files changed, 143 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f77ccad/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
index 19b0d7c..f8637e7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
@@ -221,7 +221,7 @@ public class TaskPage extends AppView {
     .append("\n{'aTargets': [ 5 ]")
     .append(", 'bSearchable': false }")
 
-    .append("\n, {'sType':'string', 'aTargets': [ 0 ]")
+    .append("\n, {'sType':'natural', 'aTargets': [ 0 ]")
     .append(", 'mRender': parseHadoopID }")
 
     .append("\n, {'sType':'numeric', 'aTargets': [ 6, 7")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f77ccad/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
index 9648527..e2f12dc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
@@ -43,7 +43,7 @@ public class TasksPage extends AppView {
       .append(", bProcessing: true")
 
       .append("\n, aoColumnDefs: [\n")
-      .append("{'sType':'string', 'aTargets': [0]")
+      .append("{'sType':'natural', 'aTargets': [0]")
       .append(", 'mRender': parseHadoopID }")
 
       .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [1]")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f77ccad/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7258b36..cd19488 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1126,6 +1126,10 @@ Release 2.7.3 - UNRELEASED
 
     YARN-4398. Remove unnecessary synchronization in RMStateStore. (Ning Ding via jianhe)
 
+    YARN-3840. Resource Manager web ui issue when sorting application by id
+    (with application having id > 9999) (Mohammad Shahid Khan & Varun Saxena
+    via jianhe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f77ccad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
index 9abfdac..06372e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
@@ -65,14 +65,14 @@ public class JQueryUI extends HtmlBlock {
 
   @Override
   protected void render(Block html) {
-    html.
-      link(root_url("static/jquery/themes-1.9.1/base/jquery-ui.css")).
-      link(root_url("static/dt-1.9.4/css/jui-dt.css")).
-      script(root_url("static/jquery/jquery-1.8.2.min.js")).
-      script(root_url("static/jquery/jquery-ui-1.9.1.custom.min.js")).
-      script(root_url("static/dt-1.9.4/js/jquery.dataTables.min.js")).
-      script(root_url("static/yarn.dt.plugins.js")).
-      style("#jsnotice { padding: 0.2em; text-align: center; }",
+    html.link(root_url("static/jquery/themes-1.9.1/base/jquery-ui.css"))
+        .link(root_url("static/dt-1.9.4/css/jui-dt.css"))
+        .script(root_url("static/jquery/jquery-1.8.2.min.js"))
+        .script(root_url("static/jquery/jquery-ui-1.9.1.custom.min.js"))
+        .script(root_url("static/dt-1.9.4/js/jquery.dataTables.min.js"))
+        .script(root_url("static/yarn.dt.plugins.js"))
+        .script(root_url("static/dt-sorting/natural.js"))
+        .style("#jsnotice { padding: 0.2em; text-align: center; }",
             ".ui-progressbar { height: 1em; min-width: 5em }"); // required
 
     List<String> list = Lists.newArrayList();
@@ -82,9 +82,8 @@ public class JQueryUI extends HtmlBlock {
     initProgressBars(list);
 
     if (!list.isEmpty()) {
-      html.
-        script().$type("text/javascript").
-          _("$(function() {")._(list.toArray())._("});")._();
+      html.script().$type("text/javascript")._("$(function() {")
+          ._(list.toArray())._("});")._();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f77ccad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-sorting/natural.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-sorting/natural.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-sorting/natural.js
new file mode 100644
index 0000000..3475edd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-sorting/natural.js
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+(function() {
+  function naturalSort (a, b) {
+    var diff = a.length - b.length;
+    if (diff != 0) {
+      var splitA = a.split("_");
+      var splitB = b.split("_");
+      if (splitA.length != splitB.length) {
+        return a.localeCompare(b);
+      }
+      for (var i=1; i < splitA.length; i++) {
+        var splitdiff = splitA[i].length - splitB[i].length;
+        if (splitdiff != 0) {
+          return splitdiff;
+        }
+        var splitCompare = splitA[i].localeCompare(splitB[i]);
+        if (splitCompare != 0) {
+          return splitCompare;
+        }
+      }
+      return diff;
+    }
+    return a.localeCompare(b);
+  }
+
+jQuery.extend( jQuery.fn.dataTableExt.oSort, {
+  "natural-asc": function ( a, b ) {
+    return naturalSort(a,b);
+  },
+
+  "natural-desc": function ( a, b ) {
+    return naturalSort(a,b) * -1;
+  }
+} );
+
+}());
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f77ccad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
index c7fe318..a08297d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
@@ -53,7 +53,7 @@ public class AppAttemptPage extends AHSView {
 
   protected String getContainersTableColumnDefs() {
     StringBuilder sb = new StringBuilder();
-    return sb.append("[\n").append("{'sType':'string', 'aTargets': [0]")
+    return sb.append("[\n").append("{'sType':'natural', 'aTargets': [0]")
       .append(", 'mRender': parseHadoopID }]").toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f77ccad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
index 50dcd96..c0e1394 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
@@ -55,7 +55,7 @@ public class AppPage extends AHSView {
 
   protected String getAttemptsTableColumnDefs() {
     StringBuilder sb = new StringBuilder();
-    return sb.append("[\n").append("{'sType':'string', 'aTargets': [0]")
+    return sb.append("[\n").append("{'sType':'natural', 'aTargets': [0]")
       .append(", 'mRender': parseHadoopID }")
 
       .append("\n, {'sType':'numeric', 'aTargets': [1]")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f77ccad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
index 1e0886f..49fcc58 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 import static org.apache.hadoop.yarn.webapp.Params.TITLE;
 import static org.mockito.Mockito.mock;
 
-import org.junit.Assert;
+import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.server.applicationhistoryservice.MemoryApplication
 import org.apache.hadoop.yarn.util.StringHelper;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -88,6 +89,21 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
   }
 
   @Test
+  public void testAPPViewNaturalSortType() throws Exception {
+    Injector injector =
+        WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+            mockApplicationHistoryClientService(5, 1, 1));
+    AHSView ahsViewInstance = injector.getInstance(AHSView.class);
+
+    ahsViewInstance.render();
+    WebAppTests.flushOutput(injector);
+    Map<String, String> moreParams =
+        ahsViewInstance.context().requestContext().moreParams();
+    String appTableColumnsMeta = moreParams.get("ui.dataTables.apps.init");
+    Assert.assertTrue(appTableColumnsMeta.indexOf("natural") != -1);
+  }
+
+  @Test
   public void testAboutPage() throws Exception {
     Injector injector =
         WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
@@ -118,6 +134,22 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
   }
 
   @Test
+  public void testAppPageNaturalSortType() throws Exception {
+    Injector injector =
+        WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+            mockApplicationHistoryClientService(1, 5, 1));
+    AppPage appPageInstance = injector.getInstance(AppPage.class);
+
+    appPageInstance.render();
+    WebAppTests.flushOutput(injector);
+    Map<String, String> moreParams =
+        appPageInstance.context().requestContext().moreParams();
+    String attemptsTableColumnsMeta =
+        moreParams.get("ui.dataTables.attempts.init");
+    Assert.assertTrue(attemptsTableColumnsMeta.indexOf("natural") != -1);
+  }
+
+  @Test
   public void testAppAttemptPage() throws Exception {
     Injector injector =
         WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
@@ -136,6 +168,21 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
   }
 
   @Test
+  public void testAppAttemptPageNaturalSortType() throws Exception {
+    Injector injector =
+        WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+            mockApplicationHistoryClientService(1, 1, 5));
+    AppAttemptPage appAttemptPageInstance =
+        injector.getInstance(AppAttemptPage.class);
+    appAttemptPageInstance.render();
+    WebAppTests.flushOutput(injector);
+    Map<String, String> moreParams =
+        appAttemptPageInstance.context().requestContext().moreParams();
+    String tableColumnsMeta = moreParams.get("ui.dataTables.containers.init");
+    Assert.assertTrue(tableColumnsMeta.indexOf("natural") != -1);
+  }
+
+  @Test
   public void testContainerPage() throws Exception {
     Injector injector =
         WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
@@ -195,5 +242,4 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
       return store;
     }
   };
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f77ccad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
index d51c632..a07baa2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -49,7 +49,7 @@ public class WebPageUtils {
       boolean isFairSchedulerPage, boolean isResourceManager) {
     StringBuilder sb = new StringBuilder();
     sb.append("[\n")
-      .append("{'sType':'string', 'aTargets': [0]")
+      .append("{'sType':'natural', 'aTargets': [0]")
       .append(", 'mRender': parseHadoopID }")
       .append("\n, {'sType':'numeric', 'aTargets': [6, 7]")
       .append(", 'mRender': renderHadoopDate }")
@@ -75,7 +75,7 @@ public class WebPageUtils {
 
   private static String getAttemptsTableColumnDefs() {
     StringBuilder sb = new StringBuilder();
-    return sb.append("[\n").append("{'sType':'string', 'aTargets': [0]")
+    return sb.append("[\n").append("{'sType':'natural', 'aTargets': [0]")
       .append(", 'mRender': parseHadoopID }")
       .append("\n, {'sType':'numeric', 'aTargets': [1]")
       .append(", 'mRender': renderHadoopDate }]").toString();
@@ -91,7 +91,7 @@ public class WebPageUtils {
 
   private static String getContainersTableColumnDefs() {
     StringBuilder sb = new StringBuilder();
-    return sb.append("[\n").append("{'sType':'string', 'aTargets': [0]")
+    return sb.append("[\n").append("{'sType':'natural', 'aTargets': [0]")
       .append(", 'mRender': parseHadoopID }]").toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f77ccad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java
index 9a22743..d32b271 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java
@@ -54,7 +54,14 @@ public class AllApplicationsPage extends NMView {
         // Sort by id upon page load
         append(", aaSorting: [[0, 'asc']]").
         // applicationid, applicationstate
-        append(", aoColumns:[null, null]} ").toString();
+        append(", aoColumns:[").append(getApplicationsIdColumnDefs())
+        .append(", null]} ").toString();
+  }
+
+  private String getApplicationsIdColumnDefs() {
+    StringBuilder sb = new StringBuilder();
+    return sb.append("{'sType':'natural', 'aTargets': [0]")
+        .append(", 'mRender': parseHadoopID }").toString();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f77ccad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java
index 1bbb945..24b8575 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java
@@ -52,9 +52,15 @@ public class AllContainersPage extends NMView {
   private String containersTableInit() {
     return tableInit().
         // containerid, containerid, log-url
-        append(", aoColumns:[null, null, {bSearchable:false}]} ").toString();
+        append(", aoColumns:[").append(getContainersIdColumnDefs())
+        .append(", null, {bSearchable:false}]} ").toString();
   }
 
+  private String getContainersIdColumnDefs() {
+    StringBuilder sb = new StringBuilder();
+    return sb.append("{'sType':'natural', 'aTargets': [0]")
+        .append(", 'mRender': parseHadoopID }").toString();
+  }
   @Override
   protected Class<? extends SubView> content() {
     return AllContainersBlock.class;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f77ccad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
index 0290421..edf3b3f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
@@ -28,6 +28,7 @@ import static org.mockito.Mockito.when;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.hadoop.conf.Configuration;
@@ -119,6 +120,10 @@ public class TestRMWebApp {
         YarnApplicationState.RUNNING.toString()));
     rmViewInstance.render();
     WebAppTests.flushOutput(injector);
+    Map<String, String> moreParams =
+        rmViewInstance.context().requestContext().moreParams();
+    String appsTableColumnsMeta = moreParams.get("ui.dataTables.apps.init");
+    Assert.assertTrue(appsTableColumnsMeta.indexOf("natural") != -1);
   }
 
   @Test public void testNodesPage() {


[38/38] hadoop git commit: YARN-4358. Reservation System: Improve relationship between SharingPolicy and ReservationAgent. (Carlo Curino via asuresh)

Posted by as...@apache.org.
YARN-4358. Reservation System: Improve relationship between SharingPolicy and ReservationAgent. (Carlo Curino via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/742632e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/742632e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/742632e3

Branch: refs/heads/yarn-2877
Commit: 742632e346604fd2b263bd42367165638fcf2416
Parents: 42d4901
Author: Arun Suresh <as...@apache.org>
Authored: Sat Dec 5 21:26:16 2015 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Sat Dec 5 21:26:16 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../reservation/CapacityOverTimePolicy.java     |  52 +++++++-
 .../reservation/InMemoryPlan.java               | 123 ++++++++++++++++++-
 .../InMemoryReservationAllocation.java          |  13 +-
 .../reservation/NoOverCommitPolicy.java         |   8 ++
 .../resourcemanager/reservation/PlanView.java   |  65 ++++++++--
 .../reservation/ReservationAllocation.java      |  12 +-
 .../reservation/SharingPolicy.java              |  24 +++-
 .../reservation/planning/IterativePlanner.java  |  16 +--
 .../reservation/planning/PlanningAlgorithm.java |  34 ++---
 .../reservation/planning/StageAllocator.java    |   6 +-
 .../planning/StageAllocatorGreedy.java          |  23 ++--
 .../planning/StageAllocatorLowCostAligned.java  |  14 ++-
 .../reservation/TestInMemoryPlan.java           |  72 ++++++-----
 .../planning/TestGreedyReservationAgent.java    |  94 +++++++++++++-
 15 files changed, 464 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 90ada4b..1fed6a6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -592,6 +592,9 @@ Release 2.8.0 - UNRELEASED
     YARN-4405. Support node label store in non-appendable file system. (Wangda
     Tan via jianhe)
 
+    YARN-4358. Reservation System: Improve relationship between SharingPolicy
+    and ReservationAgent. (Carlo Curino via asuresh)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
index afba7ea..424b543 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
@@ -18,10 +18,14 @@
 package org.apache.hadoop.yarn.server.resourcemanager.reservation;
 
 import java.util.Date;
+import java.util.NavigableMap;
+import java.util.TreeMap;
 
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation.RLEOperator;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.MismatchedUserException;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningQuotaException;
@@ -104,14 +108,17 @@ public class CapacityOverTimePolicy implements SharingPolicy {
     IntegralResource maxAllowed = new IntegralResource(maxAvgRes);
     maxAllowed.multiplyBy(validWindow / step);
 
+    RLESparseResourceAllocation userCons =
+        plan.getConsumptionForUserOverTime(reservation.getUser(), startTime
+            - validWindow, endTime + validWindow);
+
     // check that the resources offered to the user during any window of length
     // "validWindow" overlapping this allocation are within maxAllowed
     // also enforce instantaneous and physical constraints during this pass
     for (long t = startTime - validWindow; t < endTime + validWindow; t += step) {
 
       Resource currExistingAllocTot = plan.getTotalCommittedResources(t);
-      Resource currExistingAllocForUser =
-          plan.getConsumptionForUser(reservation.getUser(), t);
+      Resource currExistingAllocForUser = userCons.getCapacityAtTime(t);
       Resource currNewAlloc = reservation.getResourcesAtTime(t);
       Resource currOldAlloc = Resources.none();
       if (oldReservation != null) {
@@ -163,8 +170,7 @@ public class CapacityOverTimePolicy implements SharingPolicy {
 
       // expire contributions from instant in time before (t - validWindow)
       if (t > startTime) {
-        Resource pastOldAlloc =
-            plan.getConsumptionForUser(reservation.getUser(), t - validWindow);
+        Resource pastOldAlloc = userCons.getCapacityAtTime(t - validWindow);
         Resource pastNewAlloc = reservation.getResourcesAtTime(t - validWindow);
 
         // runningTot = runningTot - pastExistingAlloc - pastNewAlloc;
@@ -189,6 +195,39 @@ public class CapacityOverTimePolicy implements SharingPolicy {
   }
 
   @Override
+  public RLESparseResourceAllocation availableResources(
+      RLESparseResourceAllocation available, Plan plan, String user,
+      ReservationId oldId, long start, long end) throws PlanningException {
+
+    // this only propagates the instantaneous maxInst properties, while
+    // the time-varying one depends on the current allocation as well
+    // and are not easily captured here
+    Resource planTotalCapacity = plan.getTotalCapacity();
+    Resource maxInsRes = Resources.multiply(planTotalCapacity, maxInst);
+    NavigableMap<Long, Resource> instQuota = new TreeMap<Long, Resource>();
+    instQuota.put(start, maxInsRes);
+
+    RLESparseResourceAllocation instRLEQuota =
+        new RLESparseResourceAllocation(instQuota,
+            plan.getResourceCalculator());
+
+    RLESparseResourceAllocation used =
+        plan.getConsumptionForUserOverTime(user, start, end);
+
+    instRLEQuota =
+        RLESparseResourceAllocation.merge(plan.getResourceCalculator(),
+            planTotalCapacity, instRLEQuota, used, RLEOperator.subtract, start,
+            end);
+
+    instRLEQuota =
+        RLESparseResourceAllocation.merge(plan.getResourceCalculator(),
+            planTotalCapacity, available, instRLEQuota, RLEOperator.min, start,
+            end);
+
+    return instRLEQuota;
+  }
+
+  @Override
   public long getValidWindow() {
     return validWindow;
   }
@@ -198,7 +237,7 @@ public class CapacityOverTimePolicy implements SharingPolicy {
    * long(s), as using Resource to store the "integral" of the allocation over
    * time leads to integer overflows for large allocations/clusters. (Evolving
    * Resource to use long is too disruptive at this point.)
-   * 
+   *
    * The comparison/multiplication behaviors of IntegralResource are consistent
    * with the DefaultResourceCalculator.
    */
@@ -244,4 +283,7 @@ public class CapacityOverTimePolicy implements SharingPolicy {
       return "<memory:" + memory + ", vCores:" + vcores + ">";
     }
   }
+
+
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
index af42df9..c51c3ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
@@ -27,11 +27,13 @@ import java.util.Map;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
+import java.util.TreeSet;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation.RLEOperator;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
@@ -65,6 +67,9 @@ public class InMemoryPlan implements Plan {
   private Map<String, RLESparseResourceAllocation> userResourceAlloc =
       new HashMap<String, RLESparseResourceAllocation>();
 
+  private Map<String, RLESparseResourceAllocation> userActiveReservationCount =
+      new HashMap<String, RLESparseResourceAllocation>();
+
   private Map<ReservationId, InMemoryReservationAllocation> reservationTable =
       new HashMap<ReservationId, InMemoryReservationAllocation>();
 
@@ -121,6 +126,7 @@ public class InMemoryPlan implements Plan {
     return queueMetrics;
   }
 
+
   private void incrementAllocation(ReservationAllocation reservation) {
     assert (readWriteLock.isWriteLockedByCurrentThread());
     Map<ReservationInterval, Resource> allocationRequests =
@@ -132,11 +138,27 @@ public class InMemoryPlan implements Plan {
       resAlloc = new RLESparseResourceAllocation(resCalc);
       userResourceAlloc.put(user, resAlloc);
     }
+    RLESparseResourceAllocation resCount = userActiveReservationCount.get(user);
+    if (resCount == null) {
+      resCount = new RLESparseResourceAllocation(resCalc);
+      userActiveReservationCount.put(user, resCount);
+    }
+
+    long earliestActive = Long.MAX_VALUE;
+    long latestActive = Long.MIN_VALUE;
+
     for (Map.Entry<ReservationInterval, Resource> r : allocationRequests
         .entrySet()) {
       resAlloc.addInterval(r.getKey(), r.getValue());
       rleSparseVector.addInterval(r.getKey(), r.getValue());
+      if (Resources.greaterThan(resCalc, totalCapacity, r.getValue(),
+          ZERO_RESOURCE)) {
+        earliestActive = Math.min(earliestActive, r.getKey().getStartTime());
+        latestActive = Math.max(latestActive, r.getKey().getEndTime());
+      }
     }
+    resCount.addInterval(new ReservationInterval(earliestActive, latestActive),
+        Resource.newInstance(1, 1));
   }
 
   private void decrementAllocation(ReservationAllocation reservation) {
@@ -145,14 +167,29 @@ public class InMemoryPlan implements Plan {
         reservation.getAllocationRequests();
     String user = reservation.getUser();
     RLESparseResourceAllocation resAlloc = userResourceAlloc.get(user);
+
+    long earliestActive = Long.MAX_VALUE;
+    long latestActive = Long.MIN_VALUE;
     for (Map.Entry<ReservationInterval, Resource> r : allocationRequests
         .entrySet()) {
       resAlloc.removeInterval(r.getKey(), r.getValue());
       rleSparseVector.removeInterval(r.getKey(), r.getValue());
+      if (Resources.greaterThan(resCalc, totalCapacity, r.getValue(),
+          ZERO_RESOURCE)) {
+        earliestActive = Math.min(earliestActive, r.getKey().getStartTime());
+        latestActive = Math.max(latestActive, r.getKey().getEndTime());
+      }
     }
     if (resAlloc.isEmpty()) {
       userResourceAlloc.remove(user);
     }
+
+    RLESparseResourceAllocation resCount = userActiveReservationCount.get(user);
+    resCount.removeInterval(new ReservationInterval(earliestActive,
+        latestActive), Resource.newInstance(1, 1));
+    if (resCount.isEmpty()) {
+      userActiveReservationCount.remove(user);
+    }
   }
 
   public Set<ReservationAllocation> getAllReservations() {
@@ -160,9 +197,9 @@ public class InMemoryPlan implements Plan {
     try {
       if (currentReservations != null) {
         Set<ReservationAllocation> flattenedReservations =
-            new HashSet<ReservationAllocation>();
-        for (Set<InMemoryReservationAllocation> reservationEntries : currentReservations
-            .values()) {
+            new TreeSet<ReservationAllocation>();
+        for (Set<InMemoryReservationAllocation> reservationEntries :
+            currentReservations.values()) {
           flattenedReservations.addAll(reservationEntries);
         }
         return flattenedReservations;
@@ -417,14 +454,34 @@ public class InMemoryPlan implements Plan {
   }
 
   @Override
-  public Resource getConsumptionForUser(String user, long t) {
+  public RLESparseResourceAllocation getReservationCountForUserOverTime(
+      String user, long start, long end) {
+    readLock.lock();
+    try {
+      RLESparseResourceAllocation userResAlloc =
+          userActiveReservationCount.get(user);
+
+      if (userResAlloc != null) {
+        return userResAlloc.getRangeOverlapping(start, end);
+      } else {
+        return new RLESparseResourceAllocation(resCalc);
+      }
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @Override
+  public RLESparseResourceAllocation getConsumptionForUserOverTime(String user,
+      long start, long end) {
     readLock.lock();
     try {
       RLESparseResourceAllocation userResAlloc = userResourceAlloc.get(user);
+
       if (userResAlloc != null) {
-        return userResAlloc.getCapacityAtTime(t);
+        return userResAlloc.getRangeOverlapping(start, end);
       } else {
-        return Resources.clone(ZERO_RESOURCE);
+        return new RLESparseResourceAllocation(resCalc);
       }
     } finally {
       readLock.unlock();
@@ -465,6 +522,43 @@ public class InMemoryPlan implements Plan {
   }
 
   @Override
+  public RLESparseResourceAllocation getAvailableResourceOverTime(String user,
+      ReservationId oldId, long start, long end) throws PlanningException {
+    readLock.lock();
+    try {
+      // create RLE of totCapacity
+      TreeMap<Long, Resource> totAvailable = new TreeMap<Long, Resource>();
+      totAvailable.put(start, Resources.clone(totalCapacity));
+      RLESparseResourceAllocation totRLEAvail =
+          new RLESparseResourceAllocation(totAvailable, resCalc);
+
+      // subtract used from available
+      RLESparseResourceAllocation netAvailable;
+
+      netAvailable =
+          RLESparseResourceAllocation.merge(resCalc,
+              Resources.clone(totalCapacity), totRLEAvail, rleSparseVector,
+              RLEOperator.subtractTestNonNegative, start, end);
+
+      // add back in old reservation used resources if any
+      ReservationAllocation old = reservationTable.get(oldId);
+      if (old != null) {
+        netAvailable =
+            RLESparseResourceAllocation.merge(resCalc,
+                Resources.clone(totalCapacity), netAvailable,
+                old.getResourcesOverTime(), RLEOperator.add, start, end);
+      }
+      // lower it if this is needed by the sharing policy
+      netAvailable =
+          getSharingPolicy().availableResources(netAvailable, this, user,
+              oldId, start, end);
+      return netAvailable;
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @Override
   public Resource getMinimumAllocation() {
     return Resources.clone(minAlloc);
   }
@@ -549,4 +643,21 @@ public class InMemoryPlan implements Plan {
     }
   }
 
+  @Override
+  public Set<ReservationAllocation> getReservationByUserAtTime(String user,
+      long t) {
+    readLock.lock();
+    try {
+      Set<ReservationAllocation> resSet = new HashSet<ReservationAllocation>();
+      for (ReservationAllocation ra : getReservationsAtTime(t)) {
+        String resUser = ra.getUser();
+        if (resUser != null && resUser.equals(user)) {
+          resSet.add(ra);
+        }
+      }
+      return resSet;
+    } finally {
+      readLock.unlock();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryReservationAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryReservationAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryReservationAllocation.java
index 55ab066..69fd43f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryReservationAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryReservationAllocation.java
@@ -133,11 +133,16 @@ public class InMemoryReservationAllocation implements ReservationAllocation {
   }
 
   @Override
+  public RLESparseResourceAllocation getResourcesOverTime(){
+    return resourcesOverTime;
+  }
+
+  @Override
   public String toString() {
     StringBuilder sBuf = new StringBuilder();
     sBuf.append(getReservationId()).append(" user:").append(getUser())
         .append(" startTime: ").append(getStartTime()).append(" endTime: ")
-        .append(getEndTime()).append(" alloc:[")
+        .append(getEndTime()).append(" alloc:\n[")
         .append(resourcesOverTime.toString()).append("] ");
     return sBuf.toString();
   }
@@ -151,6 +156,12 @@ public class InMemoryReservationAllocation implements ReservationAllocation {
     if (this.getAcceptanceTime() < other.getAcceptanceTime()) {
       return 1;
     }
+    if (this.getReservationId().getId() > other.getReservationId().getId()) {
+      return -1;
+    }
+    if (this.getReservationId().getId() < other.getReservationId().getId()) {
+      return 1;
+    }
     return 0;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
index f87e9dc..119520b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation;
 
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.MismatchedUserException;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
@@ -89,4 +90,11 @@ public class NoOverCommitPolicy implements SharingPolicy {
     // nothing to do for this policy
   }
 
+  @Override
+  public RLESparseResourceAllocation availableResources(
+      RLESparseResourceAllocation available, Plan plan, String user,
+      ReservationId oldId, long start, long end) throws PlanningException {
+    return available;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java
index 66c66ca..f57c2e0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation;
 
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
 
 import java.util.Set;
 
@@ -41,6 +42,17 @@ public interface PlanView extends PlanContext {
   public ReservationAllocation getReservationById(ReservationId reservationID);
 
   /**
+   * Return a set of {@link ReservationAllocation} that belongs to a certain
+   * user and overlaps time t.
+   *
+   * @param user the user being considered
+   * @param t the instant in time being considered
+   * @return {@link Set<ReservationAllocation>} for this user at this time
+   */
+  public Set<ReservationAllocation> getReservationByUserAtTime(String user,
+      long t);
+
+  /**
    * Gets all the active reservations at the specified point of time
    * 
    * @param tick the time (UTC in ms) for which the active reservations are
@@ -68,18 +80,6 @@ public interface PlanView extends PlanContext {
   Resource getTotalCommittedResources(long tick);
 
   /**
-   * Returns the total {@link Resource} reserved for a given user at the
-   * specified time
-   * 
-   * @param user the user who made the reservation(s)
-   * @param tick the time (UTC in ms) for which the reserved resources are
-   *          requested
-   * @return the total {@link Resource} reserved for a given user at the
-   *         specified time
-   */
-  public Resource getConsumptionForUser(String user, long tick);
-
-  /**
    * Returns the overall capacity in terms of {@link Resource} assigned to this
    * plan (typically will correspond to the absolute capacity of the
    * corresponding queue).
@@ -98,9 +98,48 @@ public interface PlanView extends PlanContext {
 
   /**
    * Returns the time (UTC in ms) at which the last reservation terminates
-   * 
+   *
    * @return the time (UTC in ms) at which the last reservation terminates
    */
   public long getLastEndTime();
 
+  /**
+   * This method returns the amount of resources available to a given user
+   * (optionally if removing a certain reservation) over the start-end time
+   * range.
+   *
+   * @param user
+   * @param oldId
+   * @param start
+   * @param end
+   * @return a view of the plan as it is available to this user
+   * @throws PlanningException
+   */
+  public RLESparseResourceAllocation getAvailableResourceOverTime(String user,
+      ReservationId oldId, long start, long end) throws PlanningException;
+
+  /**
+   * This method returns a RLE encoded view of the user reservation count
+   * utilization between start and end time.
+   *
+   * @param user
+   * @param start
+   * @param end
+   * @return RLE encoded view of reservation used over time
+   */
+  public RLESparseResourceAllocation getReservationCountForUserOverTime(
+      String user, long start, long end);
+
+  /**
+   * This method returns a RLE encoded view of the user reservation utilization
+   * between start and end time.
+   *
+   * @param user
+   * @param start
+   * @param end
+   * @return RLE encoded view of resources used over time
+   */
+  public RLESparseResourceAllocation getConsumptionForUserOverTime(String user,
+      long start, long end);
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java
index 0d3c692..0da95ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java
@@ -50,14 +50,14 @@ public interface ReservationAllocation extends
   public ReservationDefinition getReservationDefinition();
 
   /**
-   * Returns the time at which the reservation is activated
+   * Returns the time at which the reservation is activated.
    * 
    * @return the time at which the reservation is activated
    */
   public long getStartTime();
 
   /**
-   * Returns the time at which the reservation terminates
+   * Returns the time at which the reservation terminates.
    * 
    * @return the time at which the reservation terminates
    */
@@ -65,7 +65,7 @@ public interface ReservationAllocation extends
 
   /**
    * Returns the map of resources requested against the time interval for which
-   * they were
+   * they were.
    * 
    * @return the allocationRequests the map of resources requested against the
    *         time interval for which they were
@@ -118,4 +118,10 @@ public interface ReservationAllocation extends
    */
   public Resource getResourcesAtTime(long tick);
 
+  /**
+   * Return a RLE representation of used resources.
+   * @return a RLE encoding of resources allocated over time.
+   */
+  public RLESparseResourceAllocation getResourcesOverTime();
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/SharingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/SharingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/SharingPolicy.java
index 8f8d24c..e458055 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/SharingPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/SharingPolicy.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation;
 
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
 
 /**
@@ -32,7 +33,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.Plan
 public interface SharingPolicy {
 
   /**
-   * Initialize this policy
+   * Initialize this policy.
    * 
    * @param planQueuePath the name of the queue for this plan
    * @param conf the system configuration
@@ -54,6 +55,26 @@ public interface SharingPolicy {
       throws PlanningException;
 
   /**
+   * This method provide a (partial) instantaneous validation by applying
+   * business rules (such as max number of parallel containers allowed for a
+   * user). To provide the agent with more feedback the returned parameter is
+   * expressed in number of containers that can be fit in this time according to
+   * the business rules.
+   *
+   * @param available the amount of resources that would be offered if not
+   *          constrained by the policy
+   * @param plan reference the the current Plan
+   * @param user the username
+   * @param start the start time for the range we are querying
+   * @param end the end time for the range we are querying
+   * @param oldId (optional) the id of a reservation being updated
+   * @throws PlanningException throws if the request is not valid
+   */
+  public RLESparseResourceAllocation availableResources(
+      RLESparseResourceAllocation available, Plan plan, String user,
+      ReservationId oldId, long start, long end) throws PlanningException;
+
+  /**
    * Returns the time range before and after the current reservation considered
    * by this policy. In particular, this informs the archival process for the
    * {@link Plan}, i.e., reservations regarding times before (now - validWindow)
@@ -63,4 +84,5 @@ public interface SharingPolicy {
    */
   public long getValidWindow();
 
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/IterativePlanner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/IterativePlanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/IterativePlanner.java
index d05b0ef..77362d5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/IterativePlanner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/IterativePlanner.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResour
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationAllocation;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.ContractValidationException;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 /**
@@ -80,8 +81,8 @@ public class IterativePlanner extends PlanningAlgorithm {
 
   @Override
   public RLESparseResourceAllocation computeJobAllocation(Plan plan,
-      ReservationId reservationId, ReservationDefinition reservation)
-      throws ContractValidationException {
+      ReservationId reservationId, ReservationDefinition reservation,
+      String user) throws PlanningException {
 
     // Initialize
     initialize(plan, reservation);
@@ -142,7 +143,7 @@ public class IterativePlanner extends PlanningAlgorithm {
       // Compute the allocation of a single stage
       Map<ReservationInterval, Resource> curAlloc =
           computeStageAllocation(plan, currentReservationStage,
-              stageArrivalTime, stageDeadline);
+              stageArrivalTime, stageDeadline, user, reservationId);
 
       // If we did not find an allocation, return NULL
       // (unless it's an ANY job, then we simply continue).
@@ -159,8 +160,8 @@ public class IterativePlanner extends PlanningAlgorithm {
       }
 
       // Get the start & end time of the current allocation
-      Long stageStartTime = findEarliestTime(curAlloc.keySet());
-      Long stageEndTime = findLatestTime(curAlloc.keySet());
+      Long stageStartTime = findEarliestTime(curAlloc);
+      Long stageEndTime = findLatestTime(curAlloc);
 
       // If we did find an allocation for the stage, add it
       for (Entry<ReservationInterval, Resource> entry : curAlloc.entrySet()) {
@@ -310,10 +311,11 @@ public class IterativePlanner extends PlanningAlgorithm {
   // Call algStageAllocator
   protected Map<ReservationInterval, Resource> computeStageAllocation(
       Plan plan, ReservationRequest rr, long stageArrivalTime,
-      long stageDeadline) {
+      long stageDeadline, String user, ReservationId oldId)
+      throws PlanningException {
 
     return algStageAllocator.computeStageAllocation(plan, planLoads,
-        planModifications, rr, stageArrivalTime, stageDeadline);
+        planModifications, rr, stageArrivalTime, stageDeadline, user, oldId);
 
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/PlanningAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/PlanningAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/PlanningAlgorithm.java
index 8b72b9f..e1b508d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/PlanningAlgorithm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/PlanningAlgorithm.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
 
 import java.util.Map;
-import java.util.Set;
+import java.util.Map.Entry;
 
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
@@ -62,7 +62,7 @@ public abstract class PlanningAlgorithm implements ReservationAgent {
 
     // Compute the job allocation
     RLESparseResourceAllocation allocation =
-        computeJobAllocation(plan, reservationId, adjustedContract);
+        computeJobAllocation(plan, reservationId, adjustedContract, user);
 
     // If no job allocation was found, fail
     if (allocation == null) {
@@ -84,8 +84,8 @@ public abstract class PlanningAlgorithm implements ReservationAgent {
             adjustedContract, // Contract
             user, // User name
             plan.getQueueName(), // Queue name
-            findEarliestTime(mapAllocations.keySet()), // Earliest start time
-            findLatestTime(mapAllocations.keySet()), // Latest end time
+            findEarliestTime(mapAllocations), // Earliest start time
+            findLatestTime(mapAllocations), // Latest end time
             mapAllocations, // Allocations
             plan.getResourceCalculator(), // Resource calculator
             plan.getMinimumAllocation()); // Minimum allocation
@@ -111,14 +111,14 @@ public abstract class PlanningAlgorithm implements ReservationAgent {
     Resource zeroResource = Resource.newInstance(0, 0);
 
     // Pad at the beginning
-    long earliestStart = findEarliestTime(mapAllocations.keySet());
+    long earliestStart = findEarliestTime(mapAllocations);
     if (jobArrival < earliestStart) {
       mapAllocations.put(new ReservationInterval(jobArrival, earliestStart),
           zeroResource);
     }
 
     // Pad at the beginning
-    long latestEnd = findLatestTime(mapAllocations.keySet());
+    long latestEnd = findLatestTime(mapAllocations);
     if (latestEnd < jobDeadline) {
       mapAllocations.put(new ReservationInterval(latestEnd, jobDeadline),
           zeroResource);
@@ -129,8 +129,8 @@ public abstract class PlanningAlgorithm implements ReservationAgent {
   }
 
   public abstract RLESparseResourceAllocation computeJobAllocation(Plan plan,
-      ReservationId reservationId, ReservationDefinition reservation)
-      throws PlanningException, ContractValidationException;
+      ReservationId reservationId, ReservationDefinition reservation,
+      String user) throws PlanningException, ContractValidationException;
 
   @Override
   public boolean createReservation(ReservationId reservationId, String user,
@@ -162,24 +162,26 @@ public abstract class PlanningAlgorithm implements ReservationAgent {
 
   }
 
-  protected static long findEarliestTime(Set<ReservationInterval> sesInt) {
+  protected static long findEarliestTime(
+      Map<ReservationInterval, Resource> sesInt) {
 
     long ret = Long.MAX_VALUE;
-    for (ReservationInterval s : sesInt) {
-      if (s.getStartTime() < ret) {
-        ret = s.getStartTime();
+    for (Entry<ReservationInterval, Resource> s : sesInt.entrySet()) {
+      if (s.getKey().getStartTime() < ret && s.getValue() != null) {
+        ret = s.getKey().getStartTime();
       }
     }
     return ret;
 
   }
 
-  protected static long findLatestTime(Set<ReservationInterval> sesInt) {
+  protected static long findLatestTime(Map<ReservationInterval,
+      Resource> sesInt) {
 
     long ret = Long.MIN_VALUE;
-    for (ReservationInterval s : sesInt) {
-      if (s.getEndTime() > ret) {
-        ret = s.getEndTime();
+    for (Entry<ReservationInterval, Resource> s : sesInt.entrySet()) {
+      if (s.getKey().getEndTime() > ret && s.getValue() != null) {
+        ret = s.getKey().getEndTime();
       }
     }
     return ret;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocator.java
index 9df6b74..b95f8d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocator.java
@@ -20,11 +20,13 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
 
 import java.util.Map;
 
+import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.ReservationRequest;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
 
 /**
  * Interface for allocating a single stage in IterativePlanner.
@@ -46,10 +48,12 @@ public interface StageAllocator {
    *
    * @return The computed allocation (or null if the stage could not be
    *         allocated)
+   * @throws PlanningException
    */
   Map<ReservationInterval, Resource> computeStageAllocation(Plan plan,
       Map<Long, Resource> planLoads,
       RLESparseResourceAllocation planModifications, ReservationRequest rr,
-      long stageEarliestStart, long stageDeadline);
+      long stageEarliestStart, long stageDeadline, String user,
+      ReservationId oldId) throws PlanningException;
 
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedy.java
index 773fbdf..c836970 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedy.java
@@ -21,11 +21,14 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.ReservationRequest;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation.RLEOperator;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 /**
@@ -40,7 +43,8 @@ public class StageAllocatorGreedy implements StageAllocator {
   public Map<ReservationInterval, Resource> computeStageAllocation(Plan plan,
       Map<Long, Resource> planLoads,
       RLESparseResourceAllocation planModifications, ReservationRequest rr,
-      long stageEarliestStart, long stageDeadline) {
+      long stageEarliestStart, long stageDeadline, String user,
+      ReservationId oldId) throws PlanningException {
 
     Resource totalCapacity = plan.getTotalCapacity();
 
@@ -63,6 +67,15 @@ public class StageAllocatorGreedy implements StageAllocator {
 
     int maxGang = 0;
 
+    RLESparseResourceAllocation netAvailable =
+        plan.getAvailableResourceOverTime(user, oldId, stageEarliestStart,
+            stageDeadline);
+
+    netAvailable =
+        RLESparseResourceAllocation.merge(plan.getResourceCalculator(),
+            plan.getTotalCapacity(), netAvailable, planModifications,
+            RLEOperator.subtract, stageEarliestStart, stageDeadline);
+
     // loop trying to place until we are done, or we are considering
     // an invalid range of times
     while (gangsToPlace > 0 && stageDeadline - dur >= stageEarliestStart) {
@@ -79,13 +92,7 @@ public class StageAllocatorGreedy implements StageAllocator {
       for (long t = stageDeadline - plan.getStep(); t >= stageDeadline - dur
           && maxGang > 0; t = t - plan.getStep()) {
 
-        // compute net available resources
-        Resource netAvailableRes = Resources.clone(totalCapacity);
-        // Resources.addTo(netAvailableRes, oldResCap);
-        Resources.subtractFrom(netAvailableRes,
-            plan.getTotalCommittedResources(t));
-        Resources.subtractFrom(netAvailableRes,
-            planModifications.getCapacityAtTime(t));
+        Resource netAvailableRes = netAvailable.getCapacityAtTime(t);
 
         // compute maximum number of gangs we could fit
         curMaxGang =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorLowCostAligned.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorLowCostAligned.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorLowCostAligned.java
index 04cce7b..b9fd8e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorLowCostAligned.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorLowCostAligned.java
@@ -22,6 +22,7 @@ import java.util.Comparator;
 import java.util.Map;
 import java.util.TreeSet;
 
+import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.ReservationRequest;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
@@ -60,7 +61,8 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
   public Map<ReservationInterval, Resource> computeStageAllocation(
       Plan plan, Map<Long, Resource> planLoads,
       RLESparseResourceAllocation planModifications, ReservationRequest rr,
-      long stageEarliestStart, long stageDeadline) {
+      long stageEarliestStart, long stageDeadline, String user,
+      ReservationId oldId) {
 
     // Initialize
     ResourceCalculator resCalc = plan.getResourceCalculator();
@@ -136,7 +138,9 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
       DurationInterval bestDurationInterval =
           durationIntervalsSortedByCost.first();
       int numGangsToAllocate = Math.min(maxGangsPerUnit, remainingGangs);
-
+      numGangsToAllocate =
+          Math.min(numGangsToAllocate,
+              bestDurationInterval.numCanFit(gang, capacity, resCalc));
       // Add it
       remainingGangs -= numGangsToAllocate;
 
@@ -355,5 +359,11 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
       this.cost = value;
     }
 
+    public String toString() {
+      StringBuilder sb = new StringBuilder();
+      sb.append(" start: " + startTime).append(" end: " + endTime)
+          .append(" cost: " + cost).append(" maxLoad: " + maxLoad);
+      return sb.toString();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
index 2e262a0..1756e86 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
@@ -118,11 +118,18 @@ public class TestInMemoryPlan {
       Assert.fail(e.getMessage());
     }
     doAssertions(plan, rAllocation);
+    checkAllocation(plan, alloc, start);
+  }
+
+  private void checkAllocation(Plan plan, int[] alloc, int start) {
+    RLESparseResourceAllocation userCons =
+        plan.getConsumptionForUserOverTime(user, start, start + alloc.length);
+
     for (int i = 0; i < alloc.length; i++) {
       Assert.assertEquals(Resource.newInstance(1024 * (alloc[i]), (alloc[i])),
           plan.getTotalCommittedResources(start + i));
       Assert.assertEquals(Resource.newInstance(1024 * (alloc[i]), (alloc[i])),
-          plan.getConsumptionForUser(user, start + i));
+          userCons.getCapacityAtTime(start + i));
     }
   }
 
@@ -180,12 +187,7 @@ public class TestInMemoryPlan {
       Assert.fail(e.getMessage());
     }
     doAssertions(plan, rAllocation);
-    for (int i = 0; i < alloc.length; i++) {
-      Assert.assertEquals(Resource.newInstance(1024 * (alloc[i]), (alloc[i])),
-          plan.getTotalCommittedResources(start + i));
-      Assert.assertEquals(Resource.newInstance(1024 * (alloc[i]), (alloc[i])),
-          plan.getConsumptionForUser(user, start + i));
-    }
+    checkAllocation(plan, alloc, start);
 
     // Try to add it again
     try {
@@ -226,11 +228,14 @@ public class TestInMemoryPlan {
       Assert.fail(e.getMessage());
     }
     doAssertions(plan, rAllocation);
+
+    RLESparseResourceAllocation userCons =
+        plan.getConsumptionForUserOverTime(user, start, start + alloc.length);
     for (int i = 0; i < alloc.length; i++) {
       Assert.assertEquals(Resource.newInstance(1024 * (alloc[i]), (alloc[i])),
           plan.getTotalCommittedResources(start + i));
       Assert.assertEquals(Resource.newInstance(1024 * (alloc[i]), (alloc[i])),
-          plan.getConsumptionForUser(user, start + i));
+          userCons.getCapacityAtTime(start + i));
     }
 
     // Now update it
@@ -252,13 +257,18 @@ public class TestInMemoryPlan {
       Assert.fail(e.getMessage());
     }
     doAssertions(plan, rAllocation);
+
+    userCons =
+        plan.getConsumptionForUserOverTime(user, start, start
+            + updatedAlloc.length);
+
     for (int i = 0; i < updatedAlloc.length; i++) {
       Assert.assertEquals(
-          Resource.newInstance(1024 * (updatedAlloc[i] + i), updatedAlloc[i]
+     Resource.newInstance(1024 * (updatedAlloc[i] + i), updatedAlloc[i]
               + i), plan.getTotalCommittedResources(start + i));
       Assert.assertEquals(
           Resource.newInstance(1024 * (updatedAlloc[i] + i), updatedAlloc[i]
-              + i), plan.getConsumptionForUser(user, start + i));
+              + i), userCons.getCapacityAtTime(start + i));
     }
   }
 
@@ -321,13 +331,17 @@ public class TestInMemoryPlan {
       Assert.fail(e.getMessage());
     }
     doAssertions(plan, rAllocation);
+
+    RLESparseResourceAllocation userCons =
+        plan.getConsumptionForUserOverTime(user, start, start + alloc.length);
+
     for (int i = 0; i < alloc.length; i++) {
       Assert.assertEquals(
           Resource.newInstance(1024 * (alloc[i] + i), (alloc[i] + i)),
           plan.getTotalCommittedResources(start + i));
       Assert.assertEquals(
           Resource.newInstance(1024 * (alloc[i] + i), (alloc[i] + i)),
-          plan.getConsumptionForUser(user, start + i));
+          userCons.getCapacityAtTime(start + i));
     }
 
     // Now delete it
@@ -337,11 +351,13 @@ public class TestInMemoryPlan {
       Assert.fail(e.getMessage());
     }
     Assert.assertNull(plan.getReservationById(reservationID));
+    userCons =
+        plan.getConsumptionForUserOverTime(user, start, start + alloc.length);
     for (int i = 0; i < alloc.length; i++) {
       Assert.assertEquals(Resource.newInstance(0, 0),
           plan.getTotalCommittedResources(start + i));
       Assert.assertEquals(Resource.newInstance(0, 0),
-          plan.getConsumptionForUser(user, start + i));
+          userCons.getCapacityAtTime(start + i));
     }
   }
 
@@ -393,14 +409,8 @@ public class TestInMemoryPlan {
       Assert.fail(e.getMessage());
     }
     doAssertions(plan, rAllocation);
-    for (int i = 0; i < alloc1.length; i++) {
-      Assert.assertEquals(
-          Resource.newInstance(1024 * (alloc1[i]), (alloc1[i])),
-          plan.getTotalCommittedResources(start + i));
-      Assert.assertEquals(
-          Resource.newInstance(1024 * (alloc1[i]), (alloc1[i])),
-          plan.getConsumptionForUser(user, start + i));
-    }
+    checkAllocation(plan, alloc1, start);
+
 
     // Now add another one
     ReservationId reservationID2 =
@@ -424,13 +434,17 @@ public class TestInMemoryPlan {
       Assert.fail(e.getMessage());
     }
     Assert.assertNotNull(plan.getReservationById(reservationID2));
+
+    RLESparseResourceAllocation userCons =
+        plan.getConsumptionForUserOverTime(user, start, start + alloc2.length);
+
     for (int i = 0; i < alloc2.length; i++) {
       Assert.assertEquals(
           Resource.newInstance(1024 * (alloc1[i] + alloc2[i] + i), alloc1[i]
               + alloc2[i] + i), plan.getTotalCommittedResources(start + i));
       Assert.assertEquals(
           Resource.newInstance(1024 * (alloc1[i] + alloc2[i] + i), alloc1[i]
-              + alloc2[i] + i), plan.getConsumptionForUser(user, start + i));
+              + alloc2[i] + i), userCons.getCapacityAtTime(start + i));
     }
 
     // Now archive completed reservations
@@ -445,14 +459,8 @@ public class TestInMemoryPlan {
     }
     Assert.assertNotNull(plan.getReservationById(reservationID1));
     Assert.assertNull(plan.getReservationById(reservationID2));
-    for (int i = 0; i < alloc1.length; i++) {
-      Assert.assertEquals(
-          Resource.newInstance(1024 * (alloc1[i]), (alloc1[i])),
-          plan.getTotalCommittedResources(start + i));
-      Assert.assertEquals(
-          Resource.newInstance(1024 * (alloc1[i]), (alloc1[i])),
-          plan.getConsumptionForUser(user, start + i));
-    }
+    checkAllocation(plan, alloc1, start);
+
     when(clock.getTime()).thenReturn(107L);
     try {
       // will remove 1st reservation also as it has fallen out of the archival
@@ -461,12 +469,16 @@ public class TestInMemoryPlan {
     } catch (PlanningException e) {
       Assert.fail(e.getMessage());
     }
+
+    userCons =
+        plan.getConsumptionForUserOverTime(user, start, start + alloc1.length);
+
     Assert.assertNull(plan.getReservationById(reservationID1));
     for (int i = 0; i < alloc1.length; i++) {
       Assert.assertEquals(Resource.newInstance(0, 0),
           plan.getTotalCommittedResources(start + i));
       Assert.assertEquals(Resource.newInstance(0, 0),
-          plan.getConsumptionForUser(user, start + i));
+          userCons.getCapacityAtTime(start + i));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742632e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
index cb4eaeb..f81e7ec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
@@ -18,9 +18,12 @@
 package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
 
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -86,6 +89,7 @@ public class TestGreedyReservationAgent {
             instConstraint, avgConstraint);
     CapacityOverTimePolicy policy = new CapacityOverTimePolicy();
     policy.init(reservationQ, conf);
+
     agent = new GreedyReservationAgent();
 
     QueueMetrics queueMetrics = mock(QueueMetrics.class);
@@ -135,6 +139,94 @@ public class TestGreedyReservationAgent {
 
   }
 
+  @SuppressWarnings("javadoc")
+  @Test
+  public void testSharingPolicyFeedback() throws PlanningException {
+
+    prepareBasicPlan();
+
+    // let's constraint the instantaneous allocation and see the
+    // policy kicking in during planning
+    float instConstraint = 40;
+    float avgConstraint = 40;
+
+    ReservationSchedulerConfiguration conf =
+        ReservationSystemTestUtil.createConf(plan.getQueueName(), 100000,
+            instConstraint, avgConstraint);
+
+    plan.getSharingPolicy().init(plan.getQueueName(), conf);
+
+    // create a request with a single atomic ask
+    ReservationDefinition rr = new ReservationDefinitionPBImpl();
+    rr.setArrival(5 * step);
+    rr.setDeadline(100 * step);
+    ReservationRequest r =
+        ReservationRequest.newInstance(Resource.newInstance(2048, 2), 20, 20,
+            10 * step);
+    ReservationRequests reqs = new ReservationRequestsPBImpl();
+    reqs.setReservationResources(Collections.singletonList(r));
+    rr.setReservationRequests(reqs);
+
+    ReservationId reservationID =
+        ReservationSystemTestUtil.getNewReservationId();
+    agent.createReservation(reservationID, "u3", plan, rr);
+
+    ReservationId reservationID2 =
+        ReservationSystemTestUtil.getNewReservationId();
+    agent.createReservation(reservationID2, "u3", plan, rr);
+
+    ReservationDefinition rr3 = new ReservationDefinitionPBImpl();
+    rr3.setArrival(5 * step);
+    rr3.setDeadline(100 * step);
+    ReservationRequest r3 =
+        ReservationRequest.newInstance(Resource.newInstance(2048, 2), 45, 45,
+            10 * step);
+    ReservationRequests reqs3 = new ReservationRequestsPBImpl();
+    reqs3.setReservationResources(Collections.singletonList(r3));
+    rr3.setReservationRequests(reqs3);
+
+    ReservationId reservationID3 =
+        ReservationSystemTestUtil.getNewReservationId();
+    try {
+      // RR3 is simply too big to fit
+      agent.createReservation(reservationID3, "u3", plan, rr3);
+      fail();
+    } catch (PlanningException pe) {
+      // expected
+    }
+
+    assertTrue("Agent-based allocation failed", reservationID != null);
+    assertTrue("Agent-based allocation failed", plan.getAllReservations()
+        .size() == 4);
+
+    ReservationAllocation cs = plan.getReservationById(reservationID);
+    ReservationAllocation cs2 = plan.getReservationById(reservationID2);
+    ReservationAllocation cs3 = plan.getReservationById(reservationID3);
+
+    assertNotNull(cs);
+    assertNotNull(cs2);
+    assertNull(cs3);
+
+    System.out.println("--------AFTER SIMPLE ALLOCATION (queue: "
+        + reservationID + ")----------");
+    System.out.println(plan.toString());
+    System.out.println(plan.toCumulativeString());
+
+    for (long i = 90 * step; i < 100 * step; i++) {
+      assertTrue(
+          "Agent-based allocation unexpected",
+          Resources.equals(cs.getResourcesAtTime(i),
+              Resource.newInstance(2048 * 20, 2 * 20)));
+    }
+    // RR2 is pushed out by the presence of RR
+    for (long i = 80 * step; i < 90 * step; i++) {
+      assertTrue(
+          "Agent-based allocation unexpected",
+          Resources.equals(cs2.getResourcesAtTime(i),
+              Resource.newInstance(2048 * 20, 2 * 20)));
+    }
+  }
+
   @Test
   public void testOrder() throws PlanningException {
     prepareBasicPlan();
@@ -186,7 +278,6 @@ public class TestGreedyReservationAgent {
     assertTrue(cs.toString(), check(cs, 10 * step, 30 * step, 10, 1024, 1));
     assertTrue(cs.toString(), check(cs, 40 * step, 50 * step, 20, 1024, 1));
     assertTrue(cs.toString(), check(cs, 50 * step, 70 * step, 10, 1024, 1));
-
     System.out.println("--------AFTER ORDER ALLOCATION (queue: "
         + reservationID + ")----------");
     System.out.println(plan.toString());
@@ -376,7 +467,6 @@ public class TestGreedyReservationAgent {
     ReservationAllocation cs = plan.getReservationById(reservationID);
 
     assertTrue(cs.toString(), check(cs, 110 * step, 120 * step, 20, 1024, 1));
-
     System.out.println("--------AFTER ANY ALLOCATION (queue: " + reservationID
         + ")----------");
     System.out.println(plan.toString());


[25/38] hadoop git commit: HDFS-9484. NNThroughputBenchmark$BlockReportStats should not send empty block reports. Contributed by Mingliang Liu.

Posted by as...@apache.org.
HDFS-9484. NNThroughputBenchmark$BlockReportStats should not send empty block reports. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/924a33d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/924a33d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/924a33d0

Branch: refs/heads/yarn-2877
Commit: 924a33d02d82adef11a56bf97ff4100c567d9e5d
Parents: e71aa71
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Thu Dec 3 14:51:06 2015 -0800
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Thu Dec 3 15:03:34 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                    | 3 +++
 .../hadoop/hdfs/server/namenode/NNThroughputBenchmark.java     | 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/924a33d0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ef2efc7..40fdc58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2436,6 +2436,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9429. Tests in TestDFSAdminWithHA intermittently fail with
     EOFException (Xiao Chen via Colin P. McCabe)
 
+    HDFS-9484. NNThroughputBenchmark$BlockReportStats should not send empty
+    block reports. (Mingliang Liu via shv)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/924a33d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 91f9793..8a594ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -925,7 +925,7 @@ public class NNThroughputBenchmark implements Tool {
     NamespaceInfo nsInfo;
     DatanodeRegistration dnRegistration;
     DatanodeStorage storage; //only one storage 
-    final ArrayList<BlockReportReplica> blocks;
+    final List<BlockReportReplica> blocks;
     int nrBlocks; // actual number of blocks
     BlockListAsLongs blockReportList;
     final int dnIdx;
@@ -938,7 +938,7 @@ public class NNThroughputBenchmark implements Tool {
 
     TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
       this.dnIdx = dnIdx;
-      this.blocks = new ArrayList<BlockReportReplica>(blockCapacity);
+      this.blocks = Arrays.asList(new BlockReportReplica[blockCapacity]);
       this.nrBlocks = 0;
     }
 
@@ -1013,7 +1013,7 @@ public class NNThroughputBenchmark implements Tool {
         Block block = new Block(blocks.size() - idx, 0, 0);
         blocks.set(idx, new BlockReportReplica(block));
       }
-      blockReportList = BlockListAsLongs.EMPTY;
+      blockReportList = BlockListAsLongs.encode(blocks);
     }
 
     BlockListAsLongs getBlockReportList() {


[15/38] hadoop git commit: HDFS-9129. Move the safemode block count into BlockManager. Contributed by Mingliang Liu.

Posted by as...@apache.org.
HDFS-9129. Move the safemode block count into BlockManager. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a49cc74b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a49cc74b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a49cc74b

Branch: refs/heads/yarn-2877
Commit: a49cc74b4c72195dee1dfb6f9548e5e411dff553
Parents: 58f6f54
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Dec 1 16:09:19 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Tue Dec 1 16:09:19 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../server/blockmanagement/BlockManager.java    |  95 ++-
 .../blockmanagement/BlockManagerSafeMode.java   | 573 +++++++++++++
 .../server/blockmanagement/DatanodeManager.java |   6 +-
 .../hdfs/server/namenode/Checkpointer.java      |   3 +-
 .../hdfs/server/namenode/FSDirDeleteOp.java     |   2 +-
 .../hdfs/server/namenode/FSDirRenameOp.java     |   3 +-
 .../hdfs/server/namenode/FSDirTruncateOp.java   |   2 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   3 +-
 .../hdfs/server/namenode/FSNamesystem.java      | 855 ++-----------------
 .../hadoop/hdfs/server/namenode/NameNode.java   |  10 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  15 +-
 .../hadoop/hdfs/server/namenode/SafeMode.java   |  18 -
 .../org/apache/hadoop/hdfs/TestSafeMode.java    |   2 +-
 .../blockmanagement/BlockManagerTestUtil.java   |   9 +
 .../TestBlockManagerSafeMode.java               | 420 +++++++++
 .../blockmanagement/TestReplicationPolicy.java  |   2 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |  10 +-
 .../hdfs/server/namenode/TestFSNamesystem.java  |   9 +-
 .../TestNameNodeMetadataConsistency.java        |   2 -
 .../hdfs/server/namenode/ha/TestHASafeMode.java |  11 +-
 21 files changed, 1208 insertions(+), 845 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3e1718d..ee6d38f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -879,6 +879,9 @@ Release 2.9.0 - UNRELEASED
 
   IMPROVEMENTS
 
+      HDFS-9129. Move the safemode block count into BlockManager. (Mingliang Liu
+      via jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3033eaa..8c94c03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
+import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
@@ -125,6 +126,8 @@ public class BlockManager implements BlockStatsMXBean {
 
   private final Namesystem namesystem;
 
+  private final BlockManagerSafeMode bmSafeMode;
+
   private final DatanodeManager datanodeManager;
   private final HeartbeatManager heartbeatManager;
   private final BlockTokenSecretManager blockTokenSecretManager;
@@ -380,6 +383,8 @@ public class BlockManager implements BlockStatsMXBean {
     this.numberOfBytesInFutureBlocks = new AtomicLong();
     this.inRollBack = isInRollBackMode(NameNode.getStartupOption(conf));
 
+    bmSafeMode = new BlockManagerSafeMode(this, namesystem, conf);
+
     LOG.info("defaultReplication         = " + defaultReplication);
     LOG.info("maxReplication             = " + maxReplication);
     LOG.info("minReplication             = " + minReplication);
@@ -488,15 +493,17 @@ public class BlockManager implements BlockStatsMXBean {
         : false;
   }
 
-  public void activate(Configuration conf) {
+  public void activate(Configuration conf, long blockTotal) {
     pendingReplications.start();
     datanodeManager.activate(conf);
     this.replicationThread.setName("ReplicationMonitor");
     this.replicationThread.start();
     mxBeanName = MBeans.register("NameNode", "BlockStats", this);
+    bmSafeMode.activate(blockTotal);
   }
 
   public void close() {
+    bmSafeMode.close();
     try {
       replicationThread.interrupt();
       replicationThread.join(3000);
@@ -741,11 +748,11 @@ public class BlockManager implements BlockStatsMXBean {
     // count. (We may not have the minimum replica count yet if this is
     // a "forced" completion when a file is getting closed by an
     // OP_CLOSE edit on the standby).
-    namesystem.adjustSafeModeBlockTotals(0, 1);
+    bmSafeMode.adjustBlockTotals(0, 1);
     final int minStorage = curBlock.isStriped() ?
         ((BlockInfoStriped) curBlock).getRealDataBlockNum() : minReplication;
-    namesystem.incrementSafeBlockCount(
-        Math.min(numNodes, minStorage), curBlock);
+    bmSafeMode.incrementSafeBlockCount(Math.min(numNodes, minStorage),
+        curBlock);
   }
 
   /**
@@ -805,7 +812,7 @@ public class BlockManager implements BlockStatsMXBean {
     
     // Adjust safe-mode totals, since under-construction blocks don't
     // count in safe-mode.
-    namesystem.adjustSafeModeBlockTotals(
+    bmSafeMode.adjustBlockTotals(
         // decrement safe if we had enough
         hasMinStorage(lastBlock, targets.length) ? -1 : 0,
         // always decrement total blocks
@@ -1188,7 +1195,7 @@ public class BlockManager implements BlockStatsMXBean {
         invalidateBlocks.remove(node, b);
       }
     }
-    namesystem.checkSafeMode();
+    checkSafeMode();
   }
 
   /**
@@ -1933,6 +1940,74 @@ public class BlockManager implements BlockStatsMXBean {
     return leaseId;
   }
 
+  public void registerDatanode(DatanodeRegistration nodeReg)
+      throws IOException {
+    assert namesystem.hasWriteLock();
+    datanodeManager.registerDatanode(nodeReg);
+    bmSafeMode.checkSafeMode();
+  }
+
+  /**
+   * Set the total number of blocks in the system.
+   * If safe mode is not currently on, this is a no-op.
+   */
+  public void setBlockTotal(long total) {
+    if (bmSafeMode.isInSafeMode()) {
+      bmSafeMode.setBlockTotal(total);
+      bmSafeMode.checkSafeMode();
+    }
+  }
+
+  public boolean isInSafeMode() {
+    return bmSafeMode.isInSafeMode();
+  }
+
+  public String getSafeModeTip() {
+    return bmSafeMode.getSafeModeTip();
+  }
+
+  public void leaveSafeMode(boolean force) {
+    bmSafeMode.leaveSafeMode(force);
+  }
+
+  void checkSafeMode() {
+    bmSafeMode.checkSafeMode();
+  }
+
+  /**
+   * Removes the blocks from blocksmap and updates the safemode blocks total.
+   * @param blocks An instance of {@link BlocksMapUpdateInfo} which contains a
+   *               list of blocks that need to be removed from blocksMap
+   */
+  public void removeBlocksAndUpdateSafemodeTotal(BlocksMapUpdateInfo blocks) {
+    assert namesystem.hasWriteLock();
+    // In the case that we are a Standby tailing edits from the
+    // active while in safe-mode, we need to track the total number
+    // of blocks and safe blocks in the system.
+    boolean trackBlockCounts = bmSafeMode.isSafeModeTrackingBlocks();
+    int numRemovedComplete = 0, numRemovedSafe = 0;
+
+    for (BlockInfo b : blocks.getToDeleteList()) {
+      if (trackBlockCounts) {
+        if (b.isComplete()) {
+          numRemovedComplete++;
+          if (hasMinStorage(b, b.numNodes())) {
+            numRemovedSafe++;
+          }
+        }
+      }
+      removeBlock(b);
+    }
+    if (trackBlockCounts) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Adjusting safe-mode totals for deletion."
+            + "decreasing safeBlocks by " + numRemovedSafe
+            + ", totalBlocks by " + numRemovedComplete);
+      }
+      bmSafeMode.adjustBlockTotals(-numRemovedSafe, -numRemovedComplete);
+    }
+  }
+
   /**
    * StatefulBlockInfo is used to build the "toUC" list, which is a list of
    * updates to the information about under-construction blocks.
@@ -2333,7 +2408,7 @@ public class BlockManager implements BlockStatsMXBean {
         if (namesystem.isInSnapshot(storedBlock)) {
           int numOfReplicas = storedBlock.getUnderConstructionFeature()
               .getNumExpectedLocations();
-          namesystem.incrementSafeBlockCount(numOfReplicas, storedBlock);
+          bmSafeMode.incrementSafeBlockCount(numOfReplicas, storedBlock);
         }
         //and fall through to next clause
       }      
@@ -2732,7 +2807,7 @@ public class BlockManager implements BlockStatsMXBean {
       // only complete blocks are counted towards that.
       // In the case that the block just became complete above, completeBlock()
       // handles the safe block count maintenance.
-      namesystem.incrementSafeBlockCount(numCurrentReplica, storedBlock);
+      bmSafeMode.incrementSafeBlockCount(numCurrentReplica, storedBlock);
     }
   }
 
@@ -2808,7 +2883,7 @@ public class BlockManager implements BlockStatsMXBean {
       // Is no-op if not in safe mode.
       // In the case that the block just became complete above, completeBlock()
       // handles the safe block count maintenance.
-      namesystem.incrementSafeBlockCount(numCurrentReplica, storedBlock);
+      bmSafeMode.incrementSafeBlockCount(numCurrentReplica, storedBlock);
     }
     
     // if file is under construction, then done for now
@@ -3352,7 +3427,7 @@ public class BlockManager implements BlockStatsMXBean {
       //
       BlockCollection bc = getBlockCollection(storedBlock);
       if (bc != null) {
-        namesystem.decrementSafeBlockCount(storedBlock);
+        bmSafeMode.decrementSafeBlockCount(storedBlock);
         updateNeededReplications(storedBlock, -1, 0);
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
new file mode 100644
index 0000000..297532e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
@@ -0,0 +1,573 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.Status;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.util.Daemon;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY;
+import static org.apache.hadoop.util.Time.monotonicNow;
+
+/**
+ * Block manager safe mode info.
+ *
+ * During name node startup, counts the number of <em>safe blocks</em>, those
+ * that have at least the minimal number of replicas, and calculates the ratio
+ * of safe blocks to the total number of blocks in the system, which is the size
+ * of blocks. When the ratio reaches the {@link #threshold} and enough live data
+ * nodes have registered, it needs to wait for the safe mode {@link #extension}
+ * interval. After the extension period has passed, it will not leave safe mode
+ * until the safe blocks ratio reaches the {@link #threshold} and enough live
+ * data node registered.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+class BlockManagerSafeMode {
+  enum BMSafeModeStatus {
+    PENDING_THRESHOLD, /** Pending on more safe blocks or live datanode. */
+    EXTENSION,         /** In extension period. */
+    OFF                /** Safe mode is off. */
+  }
+
+  static final Logger LOG = LoggerFactory.getLogger(BlockManagerSafeMode.class);
+  static final Step STEP_AWAITING_REPORTED_BLOCKS =
+      new Step(StepType.AWAITING_REPORTED_BLOCKS);
+
+  private final BlockManager blockManager;
+  private final Namesystem namesystem;
+  private final boolean haEnabled;
+  private volatile BMSafeModeStatus status = BMSafeModeStatus.OFF;
+
+  /** Safe mode threshold condition %.*/
+  private final double threshold;
+  /** Number of blocks needed to satisfy safe mode threshold condition. */
+  private long blockThreshold;
+  /** Total number of blocks. */
+  private long blockTotal;
+  /** Number of safe blocks. */
+  private long blockSafe;
+  /** Safe mode minimum number of datanodes alive. */
+  private final int datanodeThreshold;
+  /** Min replication required by safe mode. */
+  private final int safeReplication;
+  /** Threshold for populating needed replication queues. */
+  private final double replQueueThreshold;
+  /** Number of blocks needed before populating replication queues. */
+  private long blockReplQueueThreshold;
+
+  /** How long (in ms) is the extension period. */
+  private final int extension;
+  /** Timestamp of the first time when thresholds are met. */
+  private final AtomicLong reachedTime = new AtomicLong();
+  /** Timestamp of the safe mode initialized. */
+  private long startTime;
+  /** the safe mode monitor thread. */
+  private final Daemon smmthread = new Daemon(new SafeModeMonitor());
+
+  /** time of the last status printout */
+  private long lastStatusReport;
+  /** Counter for tracking startup progress of reported blocks. */
+  private Counter awaitingReportedBlocksCounter;
+
+  BlockManagerSafeMode(BlockManager blockManager, Namesystem namesystem,
+      Configuration conf) {
+    this.blockManager = blockManager;
+    this.namesystem = namesystem;
+    this.haEnabled = namesystem.isHaEnabled();
+    this.threshold = conf.getFloat(DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,
+        DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT);
+    if (this.threshold > 1.0) {
+      LOG.warn("The threshold value should't be greater than 1, threshold: {}",
+          threshold);
+    }
+    this.datanodeThreshold = conf.getInt(
+        DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,
+        DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT);
+    int minReplication =
+        conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
+            DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
+    // DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY is an expert level setting,
+    // setting this lower than the min replication is not recommended
+    // and/or dangerous for production setups.
+    // When it's unset, safeReplication will use dfs.namenode.replication.min
+    this.safeReplication =
+        conf.getInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY,
+            minReplication);
+    // default to safe mode threshold (i.e., don't populate queues before
+    // leaving safe mode)
+    this.replQueueThreshold =
+        conf.getFloat(DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,
+            (float) threshold);
+
+    this.extension = conf.getInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
+
+    LOG.info("{} = {}", DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, threshold);
+    LOG.info("{} = {}", DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,
+        datanodeThreshold);
+    LOG.info("{} = {}", DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, extension);
+  }
+
+  /**
+   * Initialize the safe mode information.
+   * @param total initial total blocks
+   */
+  void activate(long total) {
+    assert namesystem.hasWriteLock();
+    assert status == BMSafeModeStatus.OFF;
+
+    startTime = monotonicNow();
+    setBlockTotal(total);
+    if (areThresholdsMet()) {
+      leaveSafeMode(true);
+    } else {
+      // enter safe mode
+      status = BMSafeModeStatus.PENDING_THRESHOLD;
+      initializeReplQueuesIfNecessary();
+      reportStatus("STATE* Safe mode ON.", true);
+      lastStatusReport = monotonicNow();
+    }
+  }
+
+  /**
+   * @return true if it stays in start up safe mode else false.
+   */
+  boolean isInSafeMode() {
+    if (status != BMSafeModeStatus.OFF) {
+      doConsistencyCheck();
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  /**
+   * The transition of the safe mode state machine.
+   * If safe mode is not currently on, this is a no-op.
+   */
+  void checkSafeMode() {
+    assert namesystem.hasWriteLock();
+    if (namesystem.inTransitionToActive()) {
+      return;
+    }
+
+    switch (status) {
+    case PENDING_THRESHOLD:
+      if (areThresholdsMet()) {
+        if (extension > 0) {
+          // PENDING_THRESHOLD -> EXTENSION
+          status = BMSafeModeStatus.EXTENSION;
+          reachedTime.set(monotonicNow());
+          smmthread.start();
+          initializeReplQueuesIfNecessary();
+          reportStatus("STATE* Safe mode extension entered.", true);
+        } else {
+          // PENDING_THRESHOLD -> OFF
+          leaveSafeMode(false);
+        }
+      } else {
+        initializeReplQueuesIfNecessary();
+        reportStatus("STATE* Safe mode ON.", false);
+      }
+      break;
+    case EXTENSION:
+      reportStatus("STATE* Safe mode ON.", false);
+      break;
+    case OFF:
+      break;
+    default:
+      assert false : "Non-recognized block manager safe mode status: " + status;
+    }
+  }
+
+  /**
+   * Adjust the total number of blocks safe and expected during safe mode.
+   * If safe mode is not currently on, this is a no-op.
+   * @param deltaSafe  the change in number of safe blocks
+   * @param deltaTotal the change in number of total blocks expected
+   */
+  void adjustBlockTotals(int deltaSafe, int deltaTotal) {
+    assert namesystem.hasWriteLock();
+    if (!isSafeModeTrackingBlocks()) {
+      return;
+    }
+
+    long newBlockTotal;
+    synchronized (this) {
+      LOG.debug("Adjusting block totals from {}/{} to {}/{}",  blockSafe,
+          blockTotal, blockSafe + deltaSafe, blockTotal + deltaTotal);
+      assert blockSafe + deltaSafe >= 0 : "Can't reduce blockSafe " +
+          blockSafe + " by " + deltaSafe + ": would be negative";
+      assert blockTotal + deltaTotal >= 0 : "Can't reduce blockTotal " +
+          blockTotal + " by " + deltaTotal + ": would be negative";
+
+      blockSafe += deltaSafe;
+      newBlockTotal = blockTotal + deltaTotal;
+    }
+    setBlockTotal(newBlockTotal);
+    checkSafeMode();
+  }
+
+  /**
+   * Should we track blocks in safe mode.
+   * <p/>
+   * Never track blocks incrementally in non-HA code.
+   * <p/>
+   * In the HA case, the StandbyNode can be in safemode while the namespace
+   * is modified by the edit log tailer. In this case, the number of total
+   * blocks changes as edits are processed (eg blocks are added and deleted).
+   * However, we don't want to do the incremental tracking during the
+   * startup-time loading process -- only once the initial total has been
+   * set after the image has been loaded.
+   */
+  boolean isSafeModeTrackingBlocks() {
+    assert namesystem.hasWriteLock();
+    return haEnabled && status != BMSafeModeStatus.OFF;
+  }
+
+  /**
+   * Set total number of blocks.
+   */
+  void setBlockTotal(long total) {
+    assert namesystem.hasWriteLock();
+    synchronized (this) {
+      this.blockTotal = total;
+      this.blockThreshold = (long) (total * threshold);
+    }
+    this.blockReplQueueThreshold = (long) (total * replQueueThreshold);
+  }
+
+  String getSafeModeTip() {
+    String msg = "";
+
+    synchronized (this) {
+      if (blockSafe < blockThreshold) {
+        msg += String.format(
+            "The reported blocks %d needs additional %d"
+                + " blocks to reach the threshold %.4f of total blocks %d.%n",
+            blockSafe, (blockThreshold - blockSafe), threshold, blockTotal);
+      } else {
+        msg += String.format("The reported blocks %d has reached the threshold"
+            + " %.4f of total blocks %d. ", blockSafe, threshold, blockTotal);
+      }
+    }
+
+    int numLive = blockManager.getDatanodeManager().getNumLiveDataNodes();
+    if (numLive < datanodeThreshold) {
+      msg += String.format(
+          "The number of live datanodes %d needs an additional %d live "
+              + "datanodes to reach the minimum number %d.%n",
+          numLive, (datanodeThreshold - numLive), datanodeThreshold);
+    } else {
+      msg += String.format("The number of live datanodes %d has reached "
+              + "the minimum number %d. ",
+          numLive, datanodeThreshold);
+    }
+
+    if (blockManager.getBytesInFuture() > 0) {
+      msg += "Name node detected blocks with generation stamps " +
+          "in future. This means that Name node metadata is inconsistent." +
+          "This can happen if Name node metadata files have been manually " +
+          "replaced. Exiting safe mode will cause loss of " + blockManager
+          .getBytesInFuture() + " byte(s). Please restart name node with " +
+          "right metadata or use \"hdfs dfsadmin -safemode forceExit" +
+          "if you are certain that the NameNode was started with the" +
+          "correct FsImage and edit logs. If you encountered this during" +
+          "a rollback, it is safe to exit with -safemode forceExit.";
+      return msg;
+    }
+
+    final String turnOffTip = "Safe mode will be turned off automatically ";
+    switch(status) {
+    case PENDING_THRESHOLD:
+      msg += turnOffTip + "once the thresholds have been reached.";
+      break;
+    case EXTENSION:
+      msg += "In safe mode extension. "+ turnOffTip + "in " +
+          timeToLeaveExtension() / 1000 + " seconds.";
+      break;
+    case OFF:
+      msg += turnOffTip + "soon.";
+      break;
+    default:
+      assert false : "Non-recognized block manager safe mode status: " + status;
+    }
+    return msg;
+  }
+
+  /**
+   * Leave start up safe mode.
+   * @param force - true to force exit
+   */
+  void leaveSafeMode(boolean force) {
+    assert namesystem.hasWriteLock() : "Leaving safe mode needs write lock!";
+
+    // if not done yet, initialize replication queues.
+    // In the standby, do not populate repl queues
+    if (!blockManager.isPopulatingReplQueues() &&
+        blockManager.shouldPopulateReplQueues()) {
+      blockManager.initializeReplQueues();
+    }
+
+    if (!force && blockManager.getBytesInFuture() > 0) {
+      LOG.error("Refusing to leave safe mode without a force flag. " +
+          "Exiting safe mode will cause a deletion of {} byte(s). Please use " +
+          "-forceExit flag to exit safe mode forcefully if data loss is " +
+          "acceptable.", blockManager.getBytesInFuture());
+      return;
+    }
+
+    if (status != BMSafeModeStatus.OFF) {
+      NameNode.stateChangeLog.info("STATE* Safe mode is OFF");
+    }
+    status = BMSafeModeStatus.OFF;
+
+    final long timeInSafemode = monotonicNow() - startTime;
+    NameNode.stateChangeLog.info("STATE* Leaving safe mode after {} secs",
+        timeInSafemode / 1000);
+    NameNode.getNameNodeMetrics().setSafeModeTime(timeInSafemode);
+
+    final NetworkTopology nt = blockManager.getDatanodeManager()
+        .getNetworkTopology();
+    NameNode.stateChangeLog.info("STATE* Network topology has {} racks and {}" +
+        " datanodes", nt.getNumOfRacks(), nt.getNumOfLeaves());
+    NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has {} blocks",
+        blockManager.numOfUnderReplicatedBlocks());
+
+    namesystem.startSecretManagerIfNecessary();
+
+    // If startup has not yet completed, end safemode phase.
+    StartupProgress prog = NameNode.getStartupProgress();
+    if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
+      prog.endStep(Phase.SAFEMODE,
+          BlockManagerSafeMode.STEP_AWAITING_REPORTED_BLOCKS);
+      prog.endPhase(Phase.SAFEMODE);
+    }
+  }
+
+  /**
+   * Increment number of safe blocks if current block has reached minimal
+   * replication.
+   * If safe mode is not currently on, this is a no-op.
+   * @param storageNum  current number of replicas or number of internal blocks
+   *                    of a striped block group
+   * @param storedBlock current storedBlock which is either a
+   *                    BlockInfoContiguous or a BlockInfoStriped
+   */
+  synchronized void incrementSafeBlockCount(int storageNum,
+      BlockInfo storedBlock) {
+    assert namesystem.hasWriteLock();
+    if (status == BMSafeModeStatus.OFF) {
+      return;
+    }
+
+    final int safe = storedBlock.isStriped() ?
+        ((BlockInfoStriped)storedBlock).getRealDataBlockNum() : safeReplication;
+    if (storageNum == safe) {
+      this.blockSafe++;
+
+      // Report startup progress only if we haven't completed startup yet.
+      StartupProgress prog = NameNode.getStartupProgress();
+      if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
+        if (this.awaitingReportedBlocksCounter == null) {
+          this.awaitingReportedBlocksCounter = prog.getCounter(Phase.SAFEMODE,
+              STEP_AWAITING_REPORTED_BLOCKS);
+        }
+        this.awaitingReportedBlocksCounter.increment();
+      }
+
+      checkSafeMode();
+    }
+  }
+
+  /**
+   * Decrement number of safe blocks if current block has fallen below minimal
+   * replication.
+   * If safe mode is not currently on, this is a no-op.
+   */
+  synchronized void decrementSafeBlockCount(BlockInfo b) {
+    assert namesystem.hasWriteLock();
+    if (status == BMSafeModeStatus.OFF) {
+      return;
+    }
+
+    BlockInfo storedBlock = blockManager.getStoredBlock(b);
+    if (storedBlock.isComplete() &&
+        blockManager.countNodes(b).liveReplicas() == safeReplication - 1) {
+      this.blockSafe--;
+      assert blockSafe >= 0;
+      checkSafeMode();
+    }
+  }
+
+  void close() {
+    assert namesystem.hasWriteLock() : "Closing bmSafeMode needs write lock!";
+    try {
+      smmthread.interrupt();
+      smmthread.join(3000);
+    } catch (InterruptedException ignored) {
+    }
+  }
+
+  /**
+   * Get time (counting in milliseconds) left to leave extension period.
+   *
+   * Negative value indicates the extension period has passed.
+   */
+  private long timeToLeaveExtension() {
+    return reachedTime.get() + extension - monotonicNow();
+  }
+
+  /** Check if we are ready to initialize replication queues. */
+  private void initializeReplQueuesIfNecessary() {
+    assert namesystem.hasWriteLock();
+    // Whether it has reached the threshold for initializing replication queues.
+    boolean canInitializeReplQueues = blockManager.shouldPopulateReplQueues() &&
+        blockSafe >= blockReplQueueThreshold;
+    if (canInitializeReplQueues &&
+        !blockManager.isPopulatingReplQueues() &&
+        !haEnabled) {
+      blockManager.initializeReplQueues();
+    }
+  }
+
+  /**
+   * @return true if both block and datanode threshold are met else false.
+   */
+  private boolean areThresholdsMet() {
+    assert namesystem.hasWriteLock();
+    int datanodeNum = blockManager.getDatanodeManager().getNumLiveDataNodes();
+    synchronized (this) {
+      return blockSafe >= blockThreshold && datanodeNum >= datanodeThreshold;
+    }
+  }
+
+  /**
+   * Checks consistency of the class state.
+   * This is costly so only runs if asserts are enabled.
+   */
+  private void doConsistencyCheck() {
+    boolean assertsOn = false;
+    assert assertsOn = true; // set to true if asserts are on
+    if (!assertsOn) {
+      return;
+    }
+
+    int activeBlocks = blockManager.getActiveBlockCount();
+    synchronized (this) {
+      if (blockTotal != activeBlocks &&
+          !(blockSafe >= 0 && blockSafe <= blockTotal)) {
+        LOG.warn("SafeMode is in inconsistent filesystem state. " +
+            "BlockManagerSafeMode data: blockTotal={}, blockSafe={}; " +
+            "BlockManager data: activeBlocks={}",
+            blockTotal, blockSafe, activeBlocks);
+      }
+    }
+  }
+
+  /**
+   * Print status every 20 seconds.
+   */
+  private void reportStatus(String msg, boolean rightNow) {
+    assert namesystem.hasWriteLock();
+    long curTime = monotonicNow();
+    if(!rightNow && (curTime - lastStatusReport < 20 * 1000)) {
+      return;
+    }
+    NameNode.stateChangeLog.info(msg + " \n" + getSafeModeTip());
+    lastStatusReport = curTime;
+  }
+
+  /**
+   * Periodically check whether it is time to leave safe mode.
+   * This thread starts when the threshold level is reached.
+   */
+  private class SafeModeMonitor implements Runnable {
+    /** Interval in msec for checking safe mode. */
+    private static final long RECHECK_INTERVAL = 1000;
+
+    @Override
+    public void run() {
+      while (namesystem.isRunning()) {
+        try {
+          namesystem.writeLock();
+          if (status == BMSafeModeStatus.OFF) { // Not in safe mode.
+            break;
+          }
+          if (canLeave()) {
+            // EXTENSION -> OFF
+            leaveSafeMode(false);
+            break;
+          }
+        } finally {
+          namesystem.writeUnlock();
+        }
+
+        try {
+          Thread.sleep(RECHECK_INTERVAL);
+        } catch (InterruptedException ignored) {
+        }
+      }
+
+      if (!namesystem.isRunning()) {
+        LOG.info("NameNode is being shutdown, exit SafeModeMonitor thread");
+      }
+    }
+
+    /**
+     * Check whether the safe mode can be turned off by this monitor.
+     *
+     * Safe mode can be turned off iff
+     * the threshold is reached, and
+     * the extension time has passed.
+     */
+    private boolean canLeave() {
+      if (timeToLeaveExtension() > 0) {
+        reportStatus("STATE* Safe mode ON, in safe mode extension.", false);
+        return false;
+      } else if (!areThresholdsMet()) {
+        reportStatus("STATE* Safe mode ON, thresholds not met.", false);
+        return false;
+      } else {
+        return true;
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index d35b237..f758454 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -88,7 +88,7 @@ public class DatanodeManager {
   private final Map<String, DatanodeDescriptor> datanodeMap
       = new HashMap<>();
 
-  /** Cluster network topology */
+  /** Cluster network topology. */
   private final NetworkTopology networktopology;
 
   /** Host names to datanode descriptors mapping. */
@@ -105,7 +105,7 @@ public class DatanodeManager {
 
   private final int defaultIpcPort;
 
-  /** Read include/exclude files*/
+  /** Read include/exclude files. */
   private final HostFileManager hostFileManager = new HostFileManager();
 
   /** The period to wait for datanode heartbeat.*/
@@ -560,7 +560,7 @@ public class DatanodeManager {
     if (LOG.isDebugEnabled()) {
       LOG.debug("remove datanode " + nodeInfo);
     }
-    namesystem.checkSafeMode();
+    blockManager.checkSafeMode();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
index 83d835ac..a782049 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
@@ -256,7 +256,8 @@ class Checkpointer extends Daemon {
       if(backupNode.namesystem.getBlocksTotal() > 0) {
         long completeBlocksTotal =
             backupNode.namesystem.getCompleteBlocksTotal();
-        backupNode.namesystem.setBlockTotal(completeBlocksTotal);
+        backupNode.namesystem.getBlockManager().setBlockTotal(
+            completeBlocksTotal);
       }
       bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid);
       if (!backupNode.namesystem.isRollingUpgrade()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 006fbc2..6db2ce8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -150,7 +150,7 @@ class FSDirDeleteOp {
 
     if (filesRemoved) {
       fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, false);
-      fsn.removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
+      fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 210a060..c64dfea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -312,7 +312,8 @@ class FSDirRenameOp {
     unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, timestamp,
         collectedBlocks, options);
     if (!collectedBlocks.getToDeleteList().isEmpty()) {
-      fsd.getFSNamesystem().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
+      fsd.getFSNamesystem().getBlockManager()
+          .removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
index b46a195..03eb96d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
@@ -191,7 +191,7 @@ final class FSDirTruncateOp {
     }
     assert onBlockBoundary == (truncateBlock == null) :
       "truncateBlock is null iff on block boundary: " + truncateBlock;
-    fsn.removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
+    fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 6819d8d..23683d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -747,7 +747,8 @@ public class FSEditLogLoader {
           deleteSnapshotOp.snapshotName,
           new INode.ReclaimContext(fsNamesys.dir.getBlockStoragePolicySuite(),
               collectedBlocks, removedINodes, null));
-      fsNamesys.removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
+      fsNamesys.getBlockManager().removeBlocksAndUpdateSafemodeTotal(
+          collectedBlocks);
       collectedBlocks.clear();
       fsNamesys.dir.removeFromInodeMap(removedINodes);
       removedINodes.clear();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 89df008..6af7265 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -69,18 +69,12 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FIL
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
@@ -236,8 +230,6 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
-import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
-import org.apache.hadoop.hdfs.server.namenode.startupprogress.Status;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
@@ -267,7 +259,6 @@ import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -440,8 +431,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   final LeaseManager leaseManager = new LeaseManager(this); 
 
-  volatile Daemon smmthread = null;  // SafeModeMonitor thread
-  
   Daemon nnrmthread = null; // NamenodeResourceMonitor thread
 
   Daemon nnEditLogRoller = null; // NameNodeEditLogRoller thread
@@ -479,8 +468,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   private final FsServerDefaults serverDefaults;
   private final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
 
-  private volatile SafeModeInfo safeMode;  // safe mode information
-
   private final long maxFsObjects;          // maximum number of fs objects
 
   private final long minBlockSize;         // minimum block size
@@ -536,6 +523,15 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   private INodeAttributeProvider inodeAttributeProvider;
 
   /**
+   * If the NN is in safemode, and not due to manual / low resources, we
+   * assume it must be because of startup. If the NN had low resources during
+   * startup, we assume it came out of startup safemode and it is now in low
+   * resources safemode.
+   */
+  private boolean manualSafeMode = false;
+  private boolean resourceLowSafeMode = false;
+
+  /**
    * Notify that loading of this FSDirectory is complete, and
    * it is imageLoaded for use
    */
@@ -606,7 +602,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return leaseManager;
   }
   
-  boolean isHaEnabled() {
+  @Override
+  public boolean isHaEnabled() {
     return haEnabled;
   }
 
@@ -742,10 +739,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
           DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
 
-      this.blockManager = new BlockManager(this, conf);
-      this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
-      this.blockIdManager = new BlockIdManager(blockManager);
-
       this.fsOwner = UserGroupInformation.getCurrentUser();
       this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, 
                                  DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
@@ -771,8 +764,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
             "must not be specified if HA is not enabled.");
       }
 
+      // block manager needs the haEnabled initialized
+      this.blockManager = new BlockManager(this, conf);
+      this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
+      this.blockIdManager = new BlockIdManager(blockManager);
+
       // Get the checksum type from config
-      String checksumTypeStr = conf.get(DFS_CHECKSUM_TYPE_KEY, DFS_CHECKSUM_TYPE_DEFAULT);
+      String checksumTypeStr = conf.get(DFS_CHECKSUM_TYPE_KEY,
+          DFS_CHECKSUM_TYPE_DEFAULT);
       DataChecksum.Type checksumType;
       try {
          checksumType = DataChecksum.Type.valueOf(checksumTypeStr);
@@ -836,7 +835,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       this.snapshotManager = new SnapshotManager(dir);
       this.cacheManager = new CacheManager(this, conf, blockManager);
       this.ecPolicyManager = new ErasureCodingPolicyManager();
-      this.safeMode = new SafeModeInfo(conf);
       this.topConf = new TopConf(conf);
       this.auditLoggers = initAuditLoggers(conf);
       this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
@@ -1023,7 +1021,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
   }
   
-  private void startSecretManagerIfNecessary() {
+  @Override
+  public void startSecretManagerIfNecessary() {
     boolean shouldRun = shouldUseDelegationTokens() &&
       !isInSafeMode() && getEditLog().isOpenForWrite();
     boolean running = dtSecretManager.isRunning();
@@ -1048,14 +1047,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     try {
       nnResourceChecker = new NameNodeResourceChecker(conf);
       checkAvailableResources();
-      assert safeMode != null && !blockManager.isPopulatingReplQueues();
+      assert !blockManager.isPopulatingReplQueues();
       StartupProgress prog = NameNode.getStartupProgress();
       prog.beginPhase(Phase.SAFEMODE);
       long completeBlocksTotal = getCompleteBlocksTotal();
       prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS,
           completeBlocksTotal);
-      setBlockTotal(completeBlocksTotal);
-      blockManager.activate(conf);
+      blockManager.activate(conf, completeBlocksTotal);
     } finally {
       writeUnlock();
     }
@@ -1123,7 +1121,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
               "replication and invalidation queues during failover:\n" +
               metaSaveAsString());
         }
-        
+
         long nextTxId = getFSImage().getLastAppliedTxId() + 1;
         LOG.info("Will take over writing edit logs at txnid " + 
             nextTxId);
@@ -1167,7 +1165,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       blockManager.getDatanodeManager().setShouldSendCachingCommands(true);
     } finally {
       startingActiveService = false;
-      checkSafeMode();
       writeUnlock();
     }
   }
@@ -1177,10 +1174,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         haContext.getState().getServiceState() == HAServiceState.ACTIVE;
   }
 
-  /**
-   * @return Whether the namenode is transitioning to active state and is in the
-   *         middle of the {@link #startActiveServices()}
-   */
+  @Override
   public boolean inTransitionToActive() {
     return haEnabled && inActiveState() && startingActiveService;
   }
@@ -1317,7 +1311,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       SafeModeException se = newSafemodeException(errorMsg);
       if (haEnabled && haContext != null
           && haContext.getState().getServiceState() == HAServiceState.ACTIVE
-          && shouldRetrySafeMode(this.safeMode)) {
+          && isInStartupSafeMode()) {
         throw new RetriableException(se);
       } else {
         throw se;
@@ -1327,25 +1321,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   private SafeModeException newSafemodeException(String errorMsg) {
     return new SafeModeException(errorMsg + ". Name node is in safe " +
-        "mode.\n" + safeMode.getTurnOffTip());
+        "mode.\n" + getSafeModeTip());
   }
 
   boolean isPermissionEnabled() {
     return isPermissionEnabled;
   }
 
-  /**
-   * We already know that the safemode is on. We will throw a RetriableException
-   * if the safemode is not manual or caused by low resource.
-   */
-  private boolean shouldRetrySafeMode(SafeModeInfo safeMode) {
-    if (safeMode == null) {
-      return false;
-    } else {
-      return !safeMode.isManual() && !safeMode.areResourcesLow();
-    }
-  }
-  
   public static Collection<URI> getNamespaceDirs(Configuration conf) {
     return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY);
   }
@@ -1579,7 +1561,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     fsRunning = false;
     try {
       stopCommonServices();
-      if (smmthread != null) smmthread.interrupt();
     } finally {
       // using finally to ensure we also wait for lease daemon
       try {
@@ -2813,54 +2794,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   /**
-   * Removes the blocks from blocksmap and updates the safemode blocks total
-   * 
-   * @param blocks
-   *          An instance of {@link BlocksMapUpdateInfo} which contains a list
-   *          of blocks that need to be removed from blocksMap
-   */
-  void removeBlocksAndUpdateSafemodeTotal(BlocksMapUpdateInfo blocks) {
-    assert hasWriteLock();
-    // In the case that we are a Standby tailing edits from the
-    // active while in safe-mode, we need to track the total number
-    // of blocks and safe blocks in the system.
-    boolean trackBlockCounts = isSafeModeTrackingBlocks();
-    int numRemovedComplete = 0, numRemovedSafe = 0;
-
-    for (BlockInfo b : blocks.getToDeleteList()) {
-      if (trackBlockCounts) {
-        if (b.isComplete()) {
-          numRemovedComplete++;
-          if (blockManager.hasMinStorage(b, b.numNodes())) {
-            numRemovedSafe++;
-          }
-        }
-      }
-      blockManager.removeBlock(b);
-    }
-    if (trackBlockCounts) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adjusting safe-mode totals for deletion."
-            + "decreasing safeBlocks by " + numRemovedSafe
-            + ", totalBlocks by " + numRemovedComplete);
-      }
-      adjustSafeModeBlockTotals(-numRemovedSafe, -numRemovedComplete);
-    }
-  }
-
-  /**
-   * @see SafeModeInfo#shouldIncrementallyTrackBlocks
-   */
-  private boolean isSafeModeTrackingBlocks() {
-    if (!haEnabled) {
-      // Never track blocks incrementally in non-HA code.
-      return false;
-    }
-    SafeModeInfo sm = this.safeMode;
-    return sm != null && sm.shouldIncrementallyTrackBlocks();
-  }
-
-  /**
    * Get the file info for a specific file.
    *
    * @param src The string representation of the path to the file
@@ -3587,8 +3520,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   void registerDatanode(DatanodeRegistration nodeReg) throws IOException {
     writeLock();
     try {
-      getBlockManager().getDatanodeManager().registerDatanode(nodeReg);
-      checkSafeMode();
+      blockManager.registerDatanode(nodeReg);
     } finally {
       writeUnlock();
     }
@@ -4142,575 +4074,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
   }
 
-  /**
-   * SafeModeInfo contains information related to the safe mode.
-   * <p>
-   * An instance of {@link SafeModeInfo} is created when the name node
-   * enters safe mode.
-   * <p>
-   * During name node startup {@link SafeModeInfo} counts the number of
-   * <em>safe blocks</em>, those that have at least the minimal number of
-   * replicas, and calculates the ratio of safe blocks to the total number
-   * of blocks in the system, which is the size of blocks in
-   * {@link FSNamesystem#blockManager}. When the ratio reaches the
-   * {@link #threshold} it starts the SafeModeMonitor daemon in order
-   * to monitor whether the safe mode {@link #extension} is passed.
-   * Then it leaves safe mode and destroys itself.
-   * <p>
-   * If safe mode is turned on manually then the number of safe blocks is
-   * not tracked because the name node is not intended to leave safe mode
-   * automatically in the case.
-   *
-   * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction, boolean)
-   */
-  public class SafeModeInfo {
-    // configuration fields
-    /** Safe mode threshold condition %.*/
-    private final double threshold;
-    /** Safe mode minimum number of datanodes alive */
-    private final int datanodeThreshold;
-    /**
-     * Safe mode extension after the threshold.
-     * Make it volatile so that getSafeModeTip can read the latest value
-     * without taking a lock.
-     */
-    private volatile int extension;
-    /** Min replication required by safe mode. */
-    private final int safeReplication;
-    /** threshold for populating needed replication queues */
-    private final double replQueueThreshold;
-    // internal fields
-    /** Time when threshold was reached.
-     * <br> -1 safe mode is off
-     * <br> 0 safe mode is on, and threshold is not reached yet
-     * <br> >0 safe mode is on, but we are in extension period 
-     */
-    private long reached = -1;  
-    private long reachedTimestamp = -1;
-    /** Total number of blocks. */
-    int blockTotal; 
-    /** Number of safe blocks. */
-    int blockSafe;
-    /** Number of blocks needed to satisfy safe mode threshold condition */
-    private int blockThreshold;
-    /** Number of blocks needed before populating replication queues */
-    private int blockReplQueueThreshold;
-    /** time of the last status printout */
-    private long lastStatusReport = 0;
-    /**
-     * Was safemode entered automatically because available resources were low.
-     * Make it volatile so that getSafeModeTip can read the latest value
-     * without taking a lock.
-     */
-    private volatile boolean resourcesLow = false;
-    /** Should safemode adjust its block totals as blocks come in */
-    private boolean shouldIncrementallyTrackBlocks = false;
-    /** counter for tracking startup progress of reported blocks */
-    private Counter awaitingReportedBlocksCounter;
-    
-    /**
-     * Creates SafeModeInfo when the name node enters
-     * automatic safe mode at startup.
-     *  
-     * @param conf configuration
-     */
-    private SafeModeInfo(Configuration conf) {
-      this.threshold = conf.getFloat(DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,
-          DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT);
-      if(threshold > 1.0) {
-        LOG.warn("The threshold value should't be greater than 1, threshold: " + threshold);
-      }
-      this.datanodeThreshold = conf.getInt(
-        DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,
-        DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT);
-      this.extension = conf.getInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
-      int minReplication =
-          conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
-              DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
-      // DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY is an expert level setting,
-      // setting this lower than the min replication is not recommended
-      // and/or dangerous for production setups.
-      // When it's unset, safeReplication will use dfs.namenode.replication.min
-      this.safeReplication =
-          conf.getInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY,
-              minReplication);
-
-      LOG.info(DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY + " = " + threshold);
-      LOG.info(DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY + " = " + datanodeThreshold);
-      LOG.info(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + "     = " + extension);
-
-      // default to safe mode threshold (i.e., don't populate queues before leaving safe mode)
-      this.replQueueThreshold = 
-        conf.getFloat(DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,
-                      (float) threshold);
-      this.blockTotal = 0; 
-      this.blockSafe = 0;
-    }
-
-    /**
-     * In the HA case, the StandbyNode can be in safemode while the namespace
-     * is modified by the edit log tailer. In this case, the number of total
-     * blocks changes as edits are processed (eg blocks are added and deleted).
-     * However, we don't want to do the incremental tracking during the
-     * startup-time loading process -- only once the initial total has been
-     * set after the image has been loaded.
-     */
-    private boolean shouldIncrementallyTrackBlocks() {
-      return shouldIncrementallyTrackBlocks;
-    }
-
-    /**
-     * Creates SafeModeInfo when safe mode is entered manually, or because
-     * available resources are low.
-     *
-     * The {@link #threshold} is set to 1.5 so that it could never be reached.
-     * {@link #blockTotal} is set to -1 to indicate that safe mode is manual.
-     * 
-     * @see SafeModeInfo
-     */
-    private SafeModeInfo(boolean resourcesLow) {
-      this.threshold = 1.5f;  // this threshold can never be reached
-      this.datanodeThreshold = Integer.MAX_VALUE;
-      this.extension = Integer.MAX_VALUE;
-      this.safeReplication = Short.MAX_VALUE + 1; // more than maxReplication
-      this.replQueueThreshold = 1.5f; // can never be reached
-      this.blockTotal = -1;
-      this.blockSafe = -1;
-      this.resourcesLow = resourcesLow;
-      enter();
-      reportStatus("STATE* Safe mode is ON.", true);
-    }
-      
-    /**
-     * Check if safe mode is on.
-     * @return true if in safe mode
-     */
-    private synchronized boolean isOn() {
-      doConsistencyCheck();
-      return this.reached >= 0;
-    }
-      
-    /**
-     * Enter safe mode.
-     */
-    private void enter() {
-      this.reached = 0;
-      this.reachedTimestamp = 0;
-    }
-      
-    /**
-     * Leave safe mode.
-     * <p>
-     * Check for invalid, under- & over-replicated blocks in the end of startup.
-     * @param force - true to force exit
-     */
-    private synchronized void leave(boolean force) {
-      // if not done yet, initialize replication queues.
-      // In the standby, do not populate repl queues
-      if (!blockManager.isPopulatingReplQueues() && blockManager.shouldPopulateReplQueues()) {
-        blockManager.initializeReplQueues();
-      }
-
-
-      if (!force && (blockManager.getBytesInFuture() > 0)) {
-        LOG.error("Refusing to leave safe mode without a force flag. " +
-            "Exiting safe mode will cause a deletion of " + blockManager
-            .getBytesInFuture() + " byte(s). Please use " +
-            "-forceExit flag to exit safe mode forcefully if data loss is " +
-            "acceptable.");
-        return;
-      }
-
-      long timeInSafemode = now() - startTime;
-      NameNode.stateChangeLog.info("STATE* Leaving safe mode after " 
-                                    + timeInSafemode/1000 + " secs");
-      NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode);
-
-      //Log the following only once (when transitioning from ON -> OFF)
-      if (reached >= 0) {
-        NameNode.stateChangeLog.info("STATE* Safe mode is OFF"); 
-      }
-      reached = -1;
-      reachedTimestamp = -1;
-      safeMode = null;
-      final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
-      NameNode.stateChangeLog.info("STATE* Network topology has "
-          + nt.getNumOfRacks() + " racks and "
-          + nt.getNumOfLeaves() + " datanodes");
-      NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
-          + blockManager.numOfUnderReplicatedBlocks() + " blocks");
-
-      startSecretManagerIfNecessary();
-
-      // If startup has not yet completed, end safemode phase.
-      StartupProgress prog = NameNode.getStartupProgress();
-      if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
-        prog.endStep(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS);
-        prog.endPhase(Phase.SAFEMODE);
-      }
-    }
-
-    /**
-     * Check whether we have reached the threshold for 
-     * initializing replication queues.
-     */
-    private synchronized boolean canInitializeReplQueues() {
-      return blockManager.shouldPopulateReplQueues()
-          && blockSafe >= blockReplQueueThreshold;
-    }
-      
-    /** 
-     * Safe mode can be turned off iff 
-     * the threshold is reached and 
-     * the extension time have passed.
-     * @return true if can leave or false otherwise.
-     */
-    private synchronized boolean canLeave() {
-      if (reached == 0) {
-        return false;
-      }
-
-      if (monotonicNow() - reached < extension) {
-        reportStatus("STATE* Safe mode ON, in safe mode extension.", false);
-        return false;
-      }
-
-      if (needEnter()) {
-        reportStatus("STATE* Safe mode ON, thresholds not met.", false);
-        return false;
-      }
-
-      return true;
-    }
-      
-    /** 
-     * There is no need to enter safe mode 
-     * if DFS is empty or {@link #threshold} == 0
-     */
-    private boolean needEnter() {
-      return (threshold != 0 && blockSafe < blockThreshold) ||
-        (datanodeThreshold != 0 && getNumLiveDataNodes() < datanodeThreshold) ||
-        (!nameNodeHasResourcesAvailable());
-    }
-      
-    /**
-     * Check and trigger safe mode if needed. 
-     */
-    private void checkMode() {
-      // Have to have write-lock since leaving safemode initializes
-      // repl queues, which requires write lock
-      assert hasWriteLock();
-      if (inTransitionToActive()) {
-        return;
-      }
-      // if smmthread is already running, the block threshold must have been 
-      // reached before, there is no need to enter the safe mode again
-      if (smmthread == null && needEnter()) {
-        enter();
-        // check if we are ready to initialize replication queues
-        if (canInitializeReplQueues() && !blockManager.isPopulatingReplQueues()
-            && !haEnabled) {
-          blockManager.initializeReplQueues();
-        }
-        reportStatus("STATE* Safe mode ON.", false);
-        return;
-      }
-      // the threshold is reached or was reached before
-      if (!isOn() ||                           // safe mode is off
-          extension <= 0 || threshold <= 0) {  // don't need to wait
-        this.leave(false); // leave safe mode
-        return;
-      }
-      if (reached > 0) {  // threshold has already been reached before
-        reportStatus("STATE* Safe mode ON.", false);
-        return;
-      }
-      // start monitor
-      reached = monotonicNow();
-      reachedTimestamp = now();
-      if (smmthread == null) {
-        smmthread = new Daemon(new SafeModeMonitor());
-        smmthread.start();
-        reportStatus("STATE* Safe mode extension entered.", true);
-      }
-
-      // check if we are ready to initialize replication queues
-      if (canInitializeReplQueues() && !blockManager.isPopulatingReplQueues() && !haEnabled) {
-        blockManager.initializeReplQueues();
-      }
-    }
-      
-    /**
-     * Set total number of blocks.
-     */
-    private synchronized void setBlockTotal(int total) {
-      this.blockTotal = total;
-      this.blockThreshold = (int) (blockTotal * threshold);
-      this.blockReplQueueThreshold = 
-        (int) (blockTotal * replQueueThreshold);
-      if (haEnabled) {
-        // After we initialize the block count, any further namespace
-        // modifications done while in safe mode need to keep track
-        // of the number of total blocks in the system.
-        this.shouldIncrementallyTrackBlocks = true;
-      }
-      if(blockSafe < 0)
-        this.blockSafe = 0;
-      checkMode();
-    }
-      
-    /**
-     * Increment number of safe blocks if current block has 
-     * reached minimal replication.
-     * @param storageNum current number of replicas or number of internal blocks
-     *                   of a striped block group
-     * @param storedBlock current storedBlock which is either a
-     *                    BlockInfoContiguous or a BlockInfoStriped
-     */
-    private synchronized void incrementSafeBlockCount(short storageNum,
-        BlockInfo storedBlock) {
-      final int safe = storedBlock.isStriped() ?
-          ((BlockInfoStriped) storedBlock).getRealDataBlockNum() : safeReplication;
-      if (storageNum == safe) {
-        this.blockSafe++;
-
-        // Report startup progress only if we haven't completed startup yet.
-        StartupProgress prog = NameNode.getStartupProgress();
-        if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
-          if (this.awaitingReportedBlocksCounter == null) {
-            this.awaitingReportedBlocksCounter = prog.getCounter(Phase.SAFEMODE,
-              STEP_AWAITING_REPORTED_BLOCKS);
-          }
-          this.awaitingReportedBlocksCounter.increment();
-        }
-
-        checkMode();
-      }
-    }
-      
-    /**
-     * Decrement number of safe blocks if current block has 
-     * fallen below minimal replication.
-     * @param replication current replication 
-     */
-    private synchronized void decrementSafeBlockCount(short replication) {
-      if (replication == safeReplication-1) {
-        this.blockSafe--;
-        //blockSafe is set to -1 in manual / low resources safemode
-        assert blockSafe >= 0 || isManual() || areResourcesLow();
-        checkMode();
-      }
-    }
-
-    /**
-     * Check if safe mode was entered manually
-     */
-    private boolean isManual() {
-      return extension == Integer.MAX_VALUE;
-    }
-
-    /**
-     * Set manual safe mode.
-     */
-    private synchronized void setManual() {
-      extension = Integer.MAX_VALUE;
-    }
-
-    /**
-     * Check if safe mode was entered due to resources being low.
-     */
-    private boolean areResourcesLow() {
-      return resourcesLow;
-    }
-
-    /**
-     * Set that resources are low for this instance of safe mode.
-     */
-    private void setResourcesLow() {
-      resourcesLow = true;
-    }
-
-    /**
-     * A tip on how safe mode is to be turned off: manually or automatically.
-     */
-    String getTurnOffTip() {
-      if(!isOn()) {
-        return "Safe mode is OFF.";
-      }
-
-      //Manual OR low-resource safemode. (Admin intervention required)
-      String adminMsg = "It was turned on manually. ";
-      if (areResourcesLow()) {
-        adminMsg = "Resources are low on NN. Please add or free up more "
-          + "resources then turn off safe mode manually. NOTE:  If you turn off"
-          + " safe mode before adding resources, "
-          + "the NN will immediately return to safe mode. ";
-      }
-      if (isManual() || areResourcesLow()) {
-        return adminMsg
-          + "Use \"hdfs dfsadmin -safemode leave\" to turn safe mode off.";
-      }
-
-      boolean thresholdsMet = true;
-      int numLive = getNumLiveDataNodes();
-      String msg = "";
-      if (blockSafe < blockThreshold) {
-        msg += String.format(
-          "The reported blocks %d needs additional %d"
-          + " blocks to reach the threshold %.4f of total blocks %d.%n",
-                blockSafe, (blockThreshold - blockSafe), threshold, blockTotal);
-        thresholdsMet = false;
-      } else {
-        msg += String.format("The reported blocks %d has reached the threshold"
-            + " %.4f of total blocks %d. ", blockSafe, threshold, blockTotal);
-      }
-      if (numLive < datanodeThreshold) {
-        msg += String.format(
-          "The number of live datanodes %d needs an additional %d live "
-          + "datanodes to reach the minimum number %d.%n",
-          numLive, (datanodeThreshold - numLive), datanodeThreshold);
-        thresholdsMet = false;
-      } else {
-        msg += String.format("The number of live datanodes %d has reached "
-            + "the minimum number %d. ",
-            numLive, datanodeThreshold);
-      }
-
-      if(blockManager.getBytesInFuture() > 0) {
-        msg += "Name node detected blocks with generation stamps " +
-            "in future. This means that Name node metadata is inconsistent." +
-            "This can happen if Name node metadata files have been manually " +
-            "replaced. Exiting safe mode will cause loss of " + blockManager
-            .getBytesInFuture() + " byte(s). Please restart name node with " +
-            "right metadata or use \"hdfs dfsadmin -safemode forceExit" +
-            "if you are certain that the NameNode was started with the" +
-            "correct FsImage and edit logs. If you encountered this during" +
-            "a rollback, it is safe to exit with -safemode forceExit.";
-        return msg;
-      }
-
-
-      msg += (reached > 0) ? "In safe mode extension. " : "";
-      msg += "Safe mode will be turned off automatically ";
-
-      if (!thresholdsMet) {
-        msg += "once the thresholds have been reached.";
-      } else if (reached + extension - monotonicNow() > 0) {
-        msg += ("in " + (reached + extension - monotonicNow()) / 1000 + " seconds.");
-      } else {
-        msg += "soon.";
-      }
-
-      return msg;
-    }
-
-    /**
-     * Print status every 20 seconds.
-     */
-    private void reportStatus(String msg, boolean rightNow) {
-      long curTime = now();
-      if(!rightNow && (curTime - lastStatusReport < 20 * 1000))
-        return;
-      NameNode.stateChangeLog.info(msg + " \n" + getTurnOffTip());
-      lastStatusReport = curTime;
-    }
-
-    @Override
-    public String toString() {
-      String resText = "Current safe blocks = " 
-        + blockSafe 
-        + ". Target blocks = " + blockThreshold + " for threshold = %" + threshold
-        + ". Minimal replication = " + safeReplication + ".";
-      if (reached > 0) 
-        resText += " Threshold was reached " + new Date(reachedTimestamp) + ".";
-      return resText;
-    }
-      
-    /**
-     * Checks consistency of the class state.
-     * This is costly so only runs if asserts are enabled.
-     */
-    private void doConsistencyCheck() {
-      boolean assertsOn = false;
-      assert assertsOn = true; // set to true if asserts are on
-      if (!assertsOn) return;
-      
-      if (blockTotal == -1 && blockSafe == -1) {
-        return; // manual safe mode
-      }
-      int activeBlocks = blockManager.getActiveBlockCount();
-      if ((blockTotal != activeBlocks) &&
-          !(blockSafe >= 0 && blockSafe <= blockTotal)) {
-        throw new AssertionError(
-            " SafeMode: Inconsistent filesystem state: "
-        + "SafeMode data: blockTotal=" + blockTotal
-        + " blockSafe=" + blockSafe + "; "
-        + "BlockManager data: active="  + activeBlocks);
-      }
-    }
-
-    private synchronized void adjustBlockTotals(int deltaSafe, int deltaTotal) {
-      if (!shouldIncrementallyTrackBlocks) {
-        return;
-      }
-      assert haEnabled;
-      
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adjusting block totals from " +
-            blockSafe + "/" + blockTotal + " to " +
-            (blockSafe + deltaSafe) + "/" + (blockTotal + deltaTotal));
-      }
-      assert blockSafe + deltaSafe >= 0 : "Can't reduce blockSafe " +
-        blockSafe + " by " + deltaSafe + ": would be negative";
-      assert blockTotal + deltaTotal >= 0 : "Can't reduce blockTotal " +
-        blockTotal + " by " + deltaTotal + ": would be negative";
-      
-      blockSafe += deltaSafe;
-      setBlockTotal(blockTotal + deltaTotal);
-    }
-  }
-    
-  /**
-   * Periodically check whether it is time to leave safe mode.
-   * This thread starts when the threshold level is reached.
-   *
-   */
-  class SafeModeMonitor implements Runnable {
-    /** interval in msec for checking safe mode: {@value} */
-    private static final long recheckInterval = 1000;
-      
-    /**
-     */
-    @Override
-    public void run() {
-      while (fsRunning) {
-        writeLock();
-        try {
-          if (safeMode == null) { // Not in safe mode.
-            break;
-          }
-          if (safeMode.canLeave()) {
-            // Leave safe mode.
-            safeMode.leave(false);
-            smmthread = null;
-            break;
-          }
-        } finally {
-          writeUnlock();
-        }
-
-        try {
-          Thread.sleep(recheckInterval);
-        } catch (InterruptedException ie) {
-          // Ignored
-        }
-      }
-      if (!fsRunning) {
-        LOG.info("NameNode is being shutdown, exit SafeModeMonitor thread");
-      }
-    }
-  }
-    
   boolean setSafeMode(SafeModeAction action) throws IOException {
     if (action != SafeModeAction.SAFEMODE_GET) {
       checkSuperuserPrivilege();
@@ -4722,9 +4085,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
               .getBytesInFuture() + " byte(s). Please use " +
               "-forceExit flag to exit safe mode forcefully and data loss is " +
               "acceptable.");
-          return isInSafeMode();
+        } else {
+          leaveSafeMode();
         }
-        leaveSafeMode();
         break;
       case SAFEMODE_ENTER: // enter safe mode
         enterSafeMode(false);
@@ -4733,7 +4096,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         if (blockManager.getBytesInFuture() > 0) {
           LOG.warn("Leaving safe mode due to forceExit. This will cause a data "
               + "loss of " + blockManager.getBytesInFuture() + " byte(s).");
-          safeMode.leave(true);
           blockManager.clearBytesInFuture();
         } else {
           LOG.warn("forceExit used when normal exist would suffice. Treating " +
@@ -4748,85 +4110,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return isInSafeMode();
   }
 
-  @Override
-  public void checkSafeMode() {
-    // safeMode is volatile, and may be set to null at any time
-    SafeModeInfo safeMode = this.safeMode;
-    if (safeMode != null) {
-      safeMode.checkMode();
-    }
-  }
-
-  @Override
-  public boolean isInSafeMode() {
-    // safeMode is volatile, and may be set to null at any time
-    SafeModeInfo safeMode = this.safeMode;
-    if (safeMode == null)
-      return false;
-    return safeMode.isOn();
-  }
-
-  @Override
-  public boolean isInStartupSafeMode() {
-    // safeMode is volatile, and may be set to null at any time
-    SafeModeInfo safeMode = this.safeMode;
-    if (safeMode == null)
-      return false;
-    // If the NN is in safemode, and not due to manual / low resources, we
-    // assume it must be because of startup. If the NN had low resources during
-    // startup, we assume it came out of startup safemode and it is now in low
-    // resources safemode
-    return !safeMode.isManual() && !safeMode.areResourcesLow()
-      && safeMode.isOn();
-  }
-
-  @Override
-  public void incrementSafeBlockCount(int storageNum, BlockInfo storedBlock) {
-    // safeMode is volatile, and may be set to null at any time
-    SafeModeInfo safeMode = this.safeMode;
-    if (safeMode == null)
-      return;
-    safeMode.incrementSafeBlockCount((short) storageNum, storedBlock);
-  }
-
-  @Override
-  public void decrementSafeBlockCount(BlockInfo b) {
-    // safeMode is volatile, and may be set to null at any time
-    SafeModeInfo safeMode = this.safeMode;
-    if (safeMode == null) // mostly true
-      return;
-    BlockInfo storedBlock = getStoredBlock(b);
-    if (storedBlock.isComplete()) {
-      safeMode.decrementSafeBlockCount((short)blockManager.countNodes(b).liveReplicas());
-    }
-  }
-  
-  /**
-   * Adjust the total number of blocks safe and expected during safe mode.
-   * If safe mode is not currently on, this is a no-op.
-   * @param deltaSafe the change in number of safe blocks
-   * @param deltaTotal the change i nnumber of total blocks expected
-   */
-  @Override
-  public void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal) {
-    // safeMode is volatile, and may be set to null at any time
-    SafeModeInfo safeMode = this.safeMode;
-    if (safeMode == null)
-      return;
-    safeMode.adjustBlockTotals(deltaSafe, deltaTotal);
-  }
-
-  /**
-   * Set the total number of blocks in the system. 
-   */
-  public void setBlockTotal(long completeBlocksTotal) {
-    // safeMode is volatile, and may be set to null at any time
-    SafeModeInfo safeMode = this.safeMode;
-    if (safeMode == null)
-      return;
-    safeMode.setBlockTotal((int) completeBlocksTotal);
-  }
-
   /**
    * Get the total number of blocks in the system. 
    */
@@ -4870,6 +4153,17 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
   }
 
+
+  @Override
+  public boolean isInSafeMode() {
+    return isInManualOrResourceLowSafeMode() || blockManager.isInSafeMode();
+  }
+
+  @Override
+  public boolean isInStartupSafeMode() {
+    return !isInManualOrResourceLowSafeMode() && blockManager.isInSafeMode();
+  }
+
   /**
    * Enter safe mode. If resourcesLow is false, then we assume it is manual
    * @throws IOException
@@ -4890,20 +4184,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       if (isEditlogOpenForWrite) {
         getEditLog().logSyncAll();
       }
-      if (!isInSafeMode()) {
-        safeMode = new SafeModeInfo(resourcesLow);
-        return;
-      }
-      if (resourcesLow) {
-        safeMode.setResourcesLow();
-      } else {
-        safeMode.setManual();
-      }
+      setManualAndResourceLowSafeMode(!resourcesLow, resourcesLow);
+      NameNode.stateChangeLog.info("STATE* Safe mode is ON.\n" +
+          getSafeModeTip());
       if (isEditlogOpenForWrite) {
         getEditLog().logSyncAll();
       }
-      NameNode.stateChangeLog.info("STATE* Safe mode is ON"
-          + safeMode.getTurnOffTip());
+      NameNode.stateChangeLog.info("STATE* Safe mode is ON" + getSafeModeTip());
     } finally {
       writeUnlock();
     }
@@ -4919,29 +4206,40 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         NameNode.stateChangeLog.info("STATE* Safe mode is already OFF"); 
         return;
       }
-      safeMode.leave(false);
+      setManualAndResourceLowSafeMode(false, false);
+      blockManager.leaveSafeMode(true);
     } finally {
       writeUnlock();
     }
   }
-    
+
   String getSafeModeTip() {
-    // There is no need to take readLock.
-    // Don't use isInSafeMode as this.safeMode might be set to null.
-    // after isInSafeMode returns.
-    boolean inSafeMode;
-    SafeModeInfo safeMode = this.safeMode;
-    if (safeMode == null) {
-      inSafeMode = false;
-    } else {
-      inSafeMode = safeMode.isOn();
+    String cmd = "Use \"hdfs dfsadmin -safemode leave\" to turn safe mode off.";
+    synchronized (this) {
+      if (resourceLowSafeMode) {
+        return "Resources are low on NN. Please add or free up more resources"
+            + "then turn off safe mode manually. NOTE:  If you turn off safe "
+            + "mode before adding resources, the NN will immediately return to "
+            + "safe mode. " + cmd;
+      } else if (manualSafeMode) {
+        return "It was turned on manually. " + cmd;
+      }
     }
 
-    if (!inSafeMode) {
-      return "";
-    } else {
-      return safeMode.getTurnOffTip();
-    }
+    return blockManager.getSafeModeTip();
+  }
+
+  /**
+   * @return true iff it is in manual safe mode or resource low safe mode.
+   */
+  private synchronized boolean isInManualOrResourceLowSafeMode() {
+    return manualSafeMode || resourceLowSafeMode;
+  }
+
+  private synchronized void setManualAndResourceLowSafeMode(boolean manual,
+      boolean resourceLow) {
+    this.manualSafeMode = manual;
+    this.resourceLowSafeMode = resourceLow;
   }
 
   CheckpointSignature rollEditLog() throws IOException {
@@ -6428,11 +5726,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   public ReentrantLock getCpLockForTests() {
     return cpLock;
   }
-
-  @VisibleForTesting
-  public SafeModeInfo getSafeModeInfoForTests() {
-    return safeMode;
-  }
   
   @VisibleForTesting
   public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) {
@@ -7559,11 +6852,5 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return blockManager.getBytesInFuture();
   }
 
-  @VisibleForTesting
-  synchronized void enableSafeModeForTesting(Configuration conf) {
-    SafeModeInfo newSafemode = new SafeModeInfo(conf);
-    newSafemode.enter();
-    this.safeMode = newSafemode;
-  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 7371d84..c3f3017 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -383,7 +383,7 @@ public class NameNode implements NameNodeStatusMXBean {
     return rpcServer;
   }
   
-  static void initMetrics(Configuration conf, NamenodeRole role) {
+  public static void initMetrics(Configuration conf, NamenodeRole role) {
     metrics = NameNodeMetrics.create(conf, role);
   }
 
@@ -1682,11 +1682,9 @@ public class NameNode implements NameNodeStatusMXBean {
     HAServiceState retState = state.getServiceState();
     HAServiceStatus ret = new HAServiceStatus(retState);
     if (retState == HAServiceState.STANDBY) {
-      String safemodeTip = namesystem.getSafeModeTip();
-      if (!safemodeTip.isEmpty()) {
-        ret.setNotReadyToBecomeActive(
-            "The NameNode is in safemode. " +
-            safemodeTip);
+      if (namesystem.isInSafeMode()) {
+        ret.setNotReadyToBecomeActive("The NameNode is in safemode. " +
+            namesystem.getSafeModeTip());
       } else {
         ret.setReadyToBecomeActive();
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
index b1012c2..59ad092 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
@@ -48,10 +48,10 @@ public interface Namesystem extends RwLock, SafeMode {
 
   BlockCollection getBlockCollection(long id);
 
-  void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal);
-
   void checkOperation(OperationCategory read) throws StandbyException;
 
+  void startSecretManagerIfNecessary();
+
   /**
    * Gets the erasure coding policy for the path
    * @param src
@@ -67,4 +67,15 @@ public interface Namesystem extends RwLock, SafeMode {
   CacheManager getCacheManager();
 
   HAContext getHAContext();
+
+  /**
+   * @return true if the HA is enabled else false
+   */
+  boolean isHaEnabled();
+
+  /**
+   * @return Whether the namenode is transitioning to active state and is in the
+   *         middle of the starting active services.
+   */
+  boolean inTransitionToActive();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeMode.java
index 98deed2..9eb5796 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeMode.java
@@ -18,18 +18,10 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 
 /** SafeMode related operations. */
 @InterfaceAudience.Private
 public interface SafeMode {
-  /**
-   * Check safe mode conditions.
-   * If the corresponding conditions are satisfied,
-   * trigger the system to enter/leave safe mode.
-   */
-  public void checkSafeMode();
-
   /** Is the system in safe mode? */
   public boolean isInSafeMode();
 
@@ -38,14 +30,4 @@ public interface SafeMode {
    * safe mode turned on automatically?
    */
   public boolean isInStartupSafeMode();
-
-  /**
-   * Increment number of blocks that reached minimal replication.
-   * @param replication current replication
-   * @param storedBlock current stored Block
-   */
-  public void incrementSafeBlockCount(int replication, BlockInfo storedBlock);
-
-  /** Decrement number of blocks that reached minimal replication. */
-  public void decrementSafeBlockCount(BlockInfo b);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a49cc74b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index 6b4e46a..c26fc75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -219,7 +219,7 @@ public class TestSafeMode {
       }
     }, 10, 10000);
 
-    final int safe = NameNodeAdapter.getSafeModeSafeBlocks(nn);
+    final long safe = NameNodeAdapter.getSafeModeSafeBlocks(nn);
     assertTrue("Expected first block report to make some blocks safe.", safe > 0);
     assertTrue("Did not expect first block report to make all blocks safe.", safe < 15);
 


[21/38] hadoop git commit: YARN-4408. Fix issue that NodeManager still reports negative running containers. Contributed by Robert Kanter.

Posted by as...@apache.org.
YARN-4408. Fix issue that NodeManager still reports negative running containers. Contributed by Robert Kanter.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62e9348b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62e9348b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62e9348b

Branch: refs/heads/yarn-2877
Commit: 62e9348bc10bb97a5fcb4281f7996a09d8e69c60
Parents: 3857fed
Author: Junping Du <ju...@apache.org>
Authored: Thu Dec 3 06:36:37 2015 -0800
Committer: Junping Du <ju...@apache.org>
Committed: Thu Dec 3 06:36:37 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../container/ContainerImpl.java                |  7 ++-
 .../container/TestContainer.java                | 59 +++++++++++++++++++-
 3 files changed, 67 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62e9348b/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 748a841..7258b36 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1084,6 +1084,9 @@ Release 2.8.0 - UNRELEASED
     YARN-4384. updateNodeResource CLI should not accept negative values for resource.
     (Junping Du via wangda)
 
+    YARN-4408. Fix issue that NodeManager reports negative running containers. 
+    (Robert Kanter via junping_du)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62e9348b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index eff2188..e16ea93 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -1041,7 +1041,12 @@ public class ContainerImpl implements Container {
       ContainerDoneTransition {
     @Override
     public void transition(ContainerImpl container, ContainerEvent event) {
-      container.metrics.endRunningContainer();
+      if (container.wasLaunched) {
+        container.metrics.endRunningContainer();
+      } else {
+        LOG.warn("Container exited with success despite being killed and not" +
+            "actually running");
+      }
       container.metrics.completedContainer();
       NMAuditLogger.logSuccess(container.user,
           AuditConstants.FINISH_SUCCESS_CONTAINER, "ContainerImpl",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62e9348b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
index 2834e30..2ab9842 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
@@ -397,7 +397,8 @@ public class TestContainer {
   }
 
   @Test
-  public void testKillOnLocalizedWhenContainerNotLaunched() throws Exception {
+  public void testKillOnLocalizedWhenContainerNotLaunchedContainerKilled()
+      throws Exception {
     WrappedContainer wc = null;
     try {
       wc = new WrappedContainer(17, 314159265358979L, 4344, "yak");
@@ -427,6 +428,62 @@ public class TestContainer {
   }
 
   @Test
+  public void testKillOnLocalizedWhenContainerNotLaunchedContainerSuccess()
+      throws Exception {
+    WrappedContainer wc = null;
+    try {
+      wc = new WrappedContainer(17, 314159265358979L, 4344, "yak");
+      wc.initContainer();
+      wc.localizeResources();
+      assertEquals(ContainerState.LOCALIZED, wc.c.getContainerState());
+      wc.killContainer();
+      assertEquals(ContainerState.KILLING, wc.c.getContainerState());
+      wc.containerSuccessful();
+      wc.drainDispatcherEvents();
+      assertEquals(ContainerState.EXITED_WITH_SUCCESS,
+          wc.c.getContainerState());
+      assertNull(wc.c.getLocalizedResources());
+      verifyCleanupCall(wc);
+      wc.c.handle(new ContainerEvent(wc.c.getContainerId(),
+          ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP));
+      assertEquals(ContainerState.DONE, wc.c.getContainerState());
+      assertEquals(0, metrics.getRunningContainers());
+    } finally {
+      if (wc != null) {
+        wc.finished();
+      }
+    }
+  }
+
+  @Test
+  public void testKillOnLocalizedWhenContainerNotLaunchedContainerFailure()
+      throws Exception {
+    WrappedContainer wc = null;
+    try {
+      wc = new WrappedContainer(17, 314159265358979L, 4344, "yak");
+      wc.initContainer();
+      wc.localizeResources();
+      assertEquals(ContainerState.LOCALIZED, wc.c.getContainerState());
+      wc.killContainer();
+      assertEquals(ContainerState.KILLING, wc.c.getContainerState());
+      wc.containerFailed(ExitCode.FORCE_KILLED.getExitCode());
+      wc.drainDispatcherEvents();
+      assertEquals(ContainerState.EXITED_WITH_FAILURE,
+          wc.c.getContainerState());
+      assertNull(wc.c.getLocalizedResources());
+      verifyCleanupCall(wc);
+      wc.c.handle(new ContainerEvent(wc.c.getContainerId(),
+          ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP));
+      assertEquals(ContainerState.DONE, wc.c.getContainerState());
+      assertEquals(0, metrics.getRunningContainers());
+    } finally {
+      if (wc != null) {
+        wc.finished();
+      }
+    }
+  }
+
+  @Test
   public void testKillOnLocalizedWhenContainerLaunched() throws Exception {
     WrappedContainer wc = null;
     try {


[32/38] hadoop git commit: HDFS-9474. TestPipelinesFailover should not fail when printing debug message. (John Zhuge via Yongjun Zhang)

Posted by as...@apache.org.
HDFS-9474. TestPipelinesFailover should not fail when printing debug message. (John Zhuge via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59dbe8b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59dbe8b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59dbe8b3

Branch: refs/heads/yarn-2877
Commit: 59dbe8b3e96d13c2322cabd87c7f893c5a3812ba
Parents: e02bbeb
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Fri Dec 4 13:45:01 2015 -0800
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Fri Dec 4 13:45:01 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../namenode/ha/TestPipelinesFailover.java      | 38 ++++++++++----------
 2 files changed, 21 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59dbe8b3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 99aa719c..34c3ff2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1717,6 +1717,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9490. MiniDFSCluster should change block generation stamp via
     FsDatasetTestUtils. (Tony Wu via lei)
 
+    HDFS-9474. TestPipelinesFailover should not fail when printing debug
+    message. (John Zhuge via Yongjun Zhang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59dbe8b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
index f1858a7..9ece121 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
@@ -56,7 +56,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
-import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.log4j.Level;
 import org.junit.Test;
@@ -430,29 +430,27 @@ public class TestPipelinesFailover {
     // The following section of code is to help debug HDFS-6694 about
     // this test that fails from time to time due to "too many open files".
     //
+    LOG.info("HDFS-6694 Debug Data BEGIN");
 
-    // Only collect debug data on these OSes.
-    if (Shell.LINUX || Shell.SOLARIS || Shell.MAC) {
-      System.out.println("HDFS-6694 Debug Data BEGIN===");
-      
-      String[] scmd = new String[] {"/bin/sh", "-c", "ulimit -a"};
-      ShellCommandExecutor sce = new ShellCommandExecutor(scmd);
-      sce.execute();
-      System.out.println("'ulimit -a' output:\n" + sce.getOutput());
-
-      scmd = new String[] {"hostname"};
-      sce = new ShellCommandExecutor(scmd);
-      sce.execute();
-      System.out.println("'hostname' output:\n" + sce.getOutput());
-
-      scmd = new String[] {"ifconfig", "-a"};
-      sce = new ShellCommandExecutor(scmd);
-      sce.execute();
-      System.out.println("'ifconfig' output:\n" + sce.getOutput());
+    String[][] scmds = new String[][] {
+      {"/bin/sh", "-c", "ulimit -a"},
+      {"hostname"},
+      {"ifconfig", "-a"}
+    };
 
-      System.out.println("===HDFS-6694 Debug Data END");
+    for (String[] scmd: scmds) {
+      String scmd_str = StringUtils.join(" ", scmd);
+      try {
+        ShellCommandExecutor sce = new ShellCommandExecutor(scmd);
+        sce.execute();
+        LOG.info("'" + scmd_str + "' output:\n" + sce.getOutput());
+      } catch (IOException e) {
+        LOG.warn("Error when running '" + scmd_str + "'", e);
+      }
     }
 
+    LOG.info("HDFS-6694 Debug Data END");
+
     HAStressTestHarness harness = new HAStressTestHarness();
     // Disable permissions so that another user can recover the lease.
     harness.conf.setBoolean(


[11/38] hadoop git commit: HDFS-9269. Update the documentation and wrapper for fuse-dfs. Contributed by Wei-Chiu Chuang.

Posted by as...@apache.org.
HDFS-9269. Update the documentation and wrapper for fuse-dfs. Contributed by Wei-Chiu Chuang.

Change-Id: Ia9ec512de2464bf94725cc7c15c378c59d0f04c4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1cc7e614
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1cc7e614
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1cc7e614

Branch: refs/heads/yarn-2877
Commit: 1cc7e614319a527ebc766b53b970852d5113f2f2
Parents: 485c346
Author: Zhe Zhang <zh...@apache.org>
Authored: Tue Dec 1 10:24:31 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Tue Dec 1 10:24:49 2015 -0800

----------------------------------------------------------------------
 .../src/main/native/fuse-dfs/doc/README         | 26 +++++++--------
 .../main/native/fuse-dfs/fuse_dfs_wrapper.sh    | 34 ++++++++++++--------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 3 files changed, 34 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cc7e614/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/doc/README
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/doc/README b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/doc/README
index 1744892..672265e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/doc/README
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/doc/README
@@ -16,45 +16,43 @@
 #
 Fuse-DFS
 
-Supports reads, writes, and directory operations (e.g., cp, ls, more, cat, find, less, rm, mkdir, mv, rmdir).  Things like touch, chmod, chown, and permissions are in the works. Fuse-dfs currently shows all files as owned by nobody.
+Fuse-DFS allows HDFS to be mounted as a local file system.
+It currently supports reads, writes, and directory operations (e.g., cp, ls, more, cat, find, less, rm, mkdir, mv, rmdir, touch, chmod, chown and permissions). Random access writing is not supported.
 
 Contributing
 
-It's pretty straightforward to add functionality to fuse-dfs as fuse makes things relatively simple. Some other tasks require also augmenting libhdfs to expose more hdfs functionality to C. See [http://issues.apache.org/jira/secure/IssueNavigator.jspa?reset=true&mode=hide&pid=12310240&sorter/order=DESC&sorter/field=priority&resolution=-1&component=12312376  contrib/fuse-dfs JIRAs]
+It's pretty straightforward to add functionality to fuse-dfs as fuse makes things relatively simple. Some other tasks require also augmenting libhdfs to expose more hdfs functionality to C. See [https://issues.apache.org/jira/issues/?jql=text%20~%20%22fuse-dfs%22  fuse-dfs JIRAs]
 
 Requirements
 
  * Hadoop with compiled libhdfs.so
  * Linux kernel > 2.6.9 with fuse, which is the default or Fuse 2.7.x, 2.8.x installed. See: [http://fuse.sourceforge.net/]
  * modprobe fuse to load it
- * fuse-dfs executable (see below)
+ * fuse_dfs executable (see below)
  * fuse_dfs_wrapper.sh installed in /bin or other appropriate location (see below)
 
 
 BUILDING
 
-   1. in HADOOP_PREFIX: `ant compile-libhdfs -Dlibhdfs=1
-   2. in HADOOP_PREFIX: `ant package` to deploy libhdfs
-   3. in HADOOP_PREFIX: `ant compile-contrib -Dlibhdfs=1 -Dfusedfs=1`
+   fuse-dfs executable can be built by setting `require.fuse` option to true using Maven. For example:
+   in HADOOP_PREFIX: `mvn package -Pnative -Drequire.fuse=true -DskipTests -Dmaven.javadoc.skip=true`
 
-NOTE: for amd64 architecture, libhdfs will not compile unless you edit
-the Makefile in src/c++/libhdfs/Makefile and set OS_ARCH=amd64
-(probably the same for others too). See [https://issues.apache.org/jira/browse/HADOOP-3344 HADOOP-3344]
+   The executable `fuse_dfs` will be located at HADOOP_PREFIX/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/
 
 Common build problems include not finding the libjvm.so in JAVA_HOME/jre/lib/OS_ARCH/server or not finding fuse in FUSE_HOME or /usr/local.
 
 
 CONFIGURING
 
-Look at all the paths in fuse_dfs_wrapper.sh and either correct them or set them in your environment before running. (note for automount and mount as root, you probably cannot control the environment, so best to set them in the wrapper)
+fuse_dfs_wrapper.sh may not work out of box. To use it, look at all the paths in fuse_dfs_wrapper.sh and either correct them or set them in your environment before running. (note for automount and mount as root, you probably cannot control the environment, so best to set them in the wrapper)
 
 INSTALLING
 
 1. `mkdir /export/hdfs` (or wherever you want to mount it)
 
-2. `fuse_dfs_wrapper.sh dfs://hadoop_server1.foo.com:9000 /export/hdfs -d` and from another terminal, try `ls /export/hdfs`
+2. `fuse_dfs_wrapper.sh dfs://hadoop_server1.foo.com:9000 /export/hdfs -odebug` and from another terminal, try `ls /export/hdfs`
 
-If 2 works, try again dropping the debug mode, i.e., -d
+If 2 works, try again dropping the debug mode, i.e., -debug
 
 (note - common problems are that you don't have libhdfs.so or libjvm.so or libfuse.so on your LD_LIBRARY_PATH, and your CLASSPATH does not contain hadoop and other required jars.)
 
@@ -111,7 +109,7 @@ NOTE - you cannot export this with a FUSE module built into the kernel
 
 RECOMMENDATIONS
 
-1. From /bin, `ln -s $HADOOP_PREFIX/contrib/fuse-dfs/fuse_dfs* .`
+1. From /bin, `ln -s HADOOP_PREFIX/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/fuse_dfs* .`
 
 2. Always start with debug on so you can see if you are missing a classpath or something like that.
 
@@ -127,5 +125,3 @@ this is very slow. see [https://issues.apache.org/jira/browse/HADOOP-3797 HADOOP
 2. Writes are approximately 33% slower than the DFSClient. TBD how to optimize this. see: [https://issues.apache.org/jira/browse/HADOOP-3805 HADOOP-3805] - try using -obig_writes if on a >2.6.26 kernel, should perform much better since bigger writes implies less context switching.
 
 3. Reads are ~20-30% slower even with the read buffering. 
-
-4. fuse-dfs and underlying libhdfs have no support for permissions. See [https://issues.apache.org/jira/browse/HADOOP-3536 HADOOP-3536] 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cc7e614/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh
index 97239cc..26dfd19 100755
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh
@@ -16,7 +16,12 @@
 # limitations under the License.
 #
 
-export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr/local/share/hadoop}
+if [ "$HADOOP_PREFIX" = "" ]; then
+  echo "HADOOP_PREFIX is empty. Set it to the root directory of Hadoop source code"
+  exit 1
+fi
+export FUSEDFS_PATH="$HADOOP_PREFIX/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs"
+export LIBHDFS_PATH="$HADOOP_PREFIX/hadoop-hdfs-project/hadoop-hdfs-native-client/target/usr/local/lib"
 
 if [ "$OS_ARCH" = "" ]; then
 export OS_ARCH=amd64
@@ -30,17 +35,18 @@ if [ "$LD_LIBRARY_PATH" = "" ]; then
 export LD_LIBRARY_PATH=$JAVA_HOME/jre/lib/$OS_ARCH/server:/usr/local/lib
 fi
 
-# If dev build set paths accordingly
-if [ -d $HADOOP_PREFIX/build ]; then
-  export HADOOP_PREFIX=$HADOOP_PREFIX
-  for f in ${HADOOP_PREFIX}/build/*.jar ; do
-    export CLASSPATH=$CLASSPATH:$f
-  done
-  for f in $HADOOP_PREFIX/build/ivy/lib/hadoop-hdfs/common/*.jar ; do
-    export CLASSPATH=$CLASSPATH:$f
-  done
-  export PATH=$HADOOP_PREFIX/build/contrib/fuse-dfs:$PATH
-  export LD_LIBRARY_PATH=$HADOOP_PREFIX/build/c++/lib:$JAVA_HOME/jre/lib/$OS_ARCH/server
-fi
+while IFS= read -r -d '' file
+do
+  export CLASSPATH=$CLASSPATH:$file
+done < <(find "$HADOOP_PREFIX/hadoop-client" -name "*.jar" -print0)
+
+while IFS= read -r -d '' file
+do
+  export CLASSPATH=$CLASSPATH:$file
+done < <(find "$HADOOP_PREFIX/hhadoop-hdfs-project" -name "*.jar" -print0)
+
+export CLASSPATH=$HADOOP_CONF_DIR:$CLASSPATH
+export PATH=$FUSEDFS_PATH:$PATH
+export LD_LIBRARY_PATH=$LIBHDFS_PATH:$JAVA_HOME/jre/lib/$OS_ARCH/server
 
-fuse_dfs $@
+fuse_dfs "$@"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cc7e614/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9e37b2d..77d5415 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1701,6 +1701,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8512. WebHDFS : GETFILESTATUS should return LocatedBlock with storage
     type info. (xyao)
 
+    HDFS-9269. Update the documentation and wrapper for fuse-dfs. 
+    (Wei-Chiu Chuang via zhz)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than


[17/38] hadoop git commit: HDFS-9429. Tests in TestDFSAdminWithHA intermittently fail with EOFException (Xiao Chen via Colin P. McCabe)

Posted by as...@apache.org.
HDFS-9429. Tests in TestDFSAdminWithHA intermittently fail with EOFException (Xiao Chen via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53e3bf7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53e3bf7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53e3bf7e

Branch: refs/heads/yarn-2877
Commit: 53e3bf7e704c332fb119f55cb92520a51b644bfc
Parents: 3c4a34e
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue Dec 1 23:21:21 2015 -0800
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Tue Dec 1 23:21:21 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../apache/hadoop/hdfs/TestRollingUpgrade.java  |  1 +
 .../hadoop/hdfs/TestRollingUpgradeRollback.java |  1 +
 .../hdfs/qjournal/MiniJournalCluster.java       | 38 ++++++++++++++++++++
 .../hadoop/hdfs/qjournal/MiniQJMHACluster.java  |  1 +
 .../hdfs/qjournal/TestMiniJournalCluster.java   |  1 +
 .../hadoop/hdfs/qjournal/TestNNWithQJM.java     |  1 +
 .../hdfs/qjournal/TestSecureNNWithQJM.java      |  1 +
 .../qjournal/client/TestEpochsAreUnique.java    |  1 +
 .../hdfs/qjournal/client/TestQJMWithFaults.java |  3 ++
 .../client/TestQuorumJournalManager.java        |  1 +
 .../qjournal/server/TestJournalNodeMXBean.java  | 10 +++---
 12 files changed, 58 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3bf7e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 89aaed4..bb3f148 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2430,6 +2430,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-6533. TestBPOfferService#testBasicFunctionalitytest fails
     intermittently. (Wei-Chiu Chuang via Arpit Agarwal)
 
+    HDFS-9429. Tests in TestDFSAdminWithHA intermittently fail with
+    EOFException (Xiao Chen via Colin P. McCabe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3bf7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
index 72e16e4..b3279ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
@@ -168,6 +168,7 @@ public class TestRollingUpgrade {
 
     final Configuration conf = new HdfsConfiguration();
     final MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build();
+    mjc.waitActive();
     setConf(conf, nn1Dir, mjc);
 
     {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3bf7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeRollback.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeRollback.java
index 38cfb92..b5ef5ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeRollback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeRollback.java
@@ -153,6 +153,7 @@ public class TestRollingUpgradeRollback {
     try {
       mjc = new MiniJournalCluster.Builder(conf).numJournalNodes(
           NUM_JOURNAL_NODES).build();
+      mjc.waitActive();
       conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc
           .getQuorumJournalURI(JOURNAL_ID).toString());
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3bf7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
index 202188d..7b974c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
@@ -17,26 +17,34 @@
  */
 package org.apache.hadoop.hdfs.qjournal;
 
+import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.FAKE_NSINFO;
+import static org.junit.Assert.fail;
+
 import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.List;
+import java.util.concurrent.TimeoutException;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
 import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
 import org.apache.hadoop.net.NetUtils;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
+import org.apache.hadoop.test.GenericTestUtils;
 
 public class MiniJournalCluster {
+  public static final String CLUSTER_WAITACTIVE_URI = "waitactive";
   public static class Builder {
     private String baseDir;
     private int numJournalNodes = 3;
@@ -217,4 +225,34 @@ public class MiniJournalCluster {
     return nodes.length;
   }
 
+  /**
+   * Wait until all the journalnodes start.
+   */
+  public void waitActive() throws IOException {
+    for (int i = 0; i < nodes.length; i++) {
+      final int index = i;
+      try {
+        GenericTestUtils.waitFor(new Supplier<Boolean>() {
+          // wait until all JN's IPC server is running
+          @Override public Boolean get() {
+            try {
+              QuorumJournalManager qjm =
+                  new QuorumJournalManager(nodes[index].node.getConf(),
+                      getQuorumJournalURI(CLUSTER_WAITACTIVE_URI), FAKE_NSINFO);
+              qjm.hasSomeData();
+              qjm.close();
+            } catch (IOException e) {
+              // Exception from IPC call, likely due to server not ready yet.
+              return false;
+            }
+            return true;
+          }
+        }, 50, 3000);
+      } catch (TimeoutException e) {
+        fail("Time out while waiting for journal node " + index + " to start.");
+      } catch (InterruptedException ite) {
+        LOG.warn("Thread interrupted when waiting for node start", ite);
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3bf7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
index 470a08b..8e838c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
@@ -98,6 +98,7 @@ public class MiniQJMHACluster {
         // start 3 journal nodes
         journalCluster = new MiniJournalCluster.Builder(conf).format(true)
             .build();
+        journalCluster.waitActive();
         URI journalURI = journalCluster.getQuorumJournalURI(NAMESERVICE);
 
         // start cluster with specified NameNodes

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3bf7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestMiniJournalCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestMiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestMiniJournalCluster.java
index fbb51e1..cace7c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestMiniJournalCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestMiniJournalCluster.java
@@ -36,6 +36,7 @@ public class TestMiniJournalCluster {
     Configuration conf = new Configuration();
     MiniJournalCluster c = new MiniJournalCluster.Builder(conf)
       .build();
+    c.waitActive();
     try {
       URI uri = c.getQuorumJournalURI("myjournal");
       String[] addrs = uri.getAuthority().split(";");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3bf7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
index 7e81b67..d713bc7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
@@ -52,6 +52,7 @@ public class TestNNWithQJM {
   @Before
   public void startJNs() throws Exception {
     mjc = new MiniJournalCluster.Builder(conf).build();
+    mjc.waitActive();
   }
   
   @After

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3bf7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
index f95594a..166f18e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
@@ -212,6 +212,7 @@ public class TestSecureNNWithQJM {
   private void startCluster() throws IOException {
     mjc = new MiniJournalCluster.Builder(conf)
       .build();
+    mjc.waitActive();
     conf.set(DFS_NAMENODE_EDITS_DIR_KEY,
       mjc.getQuorumJournalURI("myjournal").toString());
     cluster = new MiniDFSCluster.Builder(conf)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3bf7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java
index bd9cf6f..d57e089 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java
@@ -51,6 +51,7 @@ public class TestEpochsAreUnique {
   public void testSingleThreaded() throws IOException {
     Configuration conf = new Configuration();
     MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
+    cluster.waitActive();
     URI uri = cluster.getQuorumJournalURI(JID);
     QuorumJournalManager qjm = new QuorumJournalManager(
         conf, uri, FAKE_NSINFO);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3bf7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
index aac2f49..b0a9b99 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
@@ -98,6 +98,7 @@ public class TestQJMWithFaults {
   private static long determineMaxIpcNumber() throws Exception {
     Configuration conf = new Configuration();
     MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
+    cluster.waitActive();
     QuorumJournalManager qjm = null;
     long ret;
     try {
@@ -146,6 +147,7 @@ public class TestQJMWithFaults {
         
         MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
           .build();
+        cluster.waitActive();
         QuorumJournalManager qjm = null;
         try {
           qjm = createInjectableQJM(cluster);
@@ -218,6 +220,7 @@ public class TestQJMWithFaults {
     
     MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
       .build();
+    cluster.waitActive();
     
     // Format the cluster using a non-faulty QJM.
     QuorumJournalManager qjmForInitialFormat =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3bf7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index ad67deb..b9a0924 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -94,6 +94,7 @@ public class TestQuorumJournalManager {
     
     cluster = new MiniJournalCluster.Builder(conf)
       .build();
+    cluster.waitActive();
     
     qjm = createSpyingQJM();
     spies = qjm.getLoggerSetForTests().getLoggersForTests();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3bf7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
index 3471848..498ef71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.qjournal.server;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -52,6 +53,7 @@ public class TestJournalNodeMXBean {
     // start 1 journal node
     jCluster = new MiniJournalCluster.Builder(new Configuration()).format(true)
         .numJournalNodes(NUM_JN).build();
+    jCluster.waitActive();
     jn = jCluster.getJournalNode(0);
   }
   
@@ -89,19 +91,19 @@ public class TestJournalNodeMXBean {
     Map<String, String> infoMap = new HashMap<String, String>();
     infoMap.put("Formatted", "true");
     jMap.put(NAMESERVICE, infoMap);
+    Map<String, String> infoMap1 = new HashMap<>();
+    infoMap1.put("Formatted", "false");
+    jMap.put(MiniJournalCluster.CLUSTER_WAITACTIVE_URI, infoMap1);
     assertEquals(JSON.toString(jMap), journalStatus);
     
     // restart journal node without formatting
     jCluster = new MiniJournalCluster.Builder(new Configuration()).format(false)
         .numJournalNodes(NUM_JN).build();
+    jCluster.waitActive();
     jn = jCluster.getJournalNode(0);
     // re-check 
     journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus");
     assertEquals(jn.getJournalsStatus(), journalStatus);
-    jMap = new HashMap<String, Map<String, String>>();
-    infoMap = new HashMap<String, String>();
-    infoMap.put("Formatted", "true");
-    jMap.put(NAMESERVICE, infoMap);
     assertEquals(JSON.toString(jMap), journalStatus);
   }
 }


[33/38] hadoop git commit: HDFS-9214. Support reconfiguring dfs.datanode.balance.max.concurrent.moves without DN restart. (Contributed by Xiaobing Zhou)

Posted by as...@apache.org.
HDFS-9214. Support reconfiguring dfs.datanode.balance.max.concurrent.moves without DN restart. (Contributed by Xiaobing Zhou)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d817fa1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d817fa1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d817fa1

Branch: refs/heads/yarn-2877
Commit: 9d817fa1b14b477e5440ae4edd78de849976d9b5
Parents: 59dbe8b
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Dec 4 14:46:46 2015 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Dec 4 14:46:46 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |  40 ++-
 .../hdfs/server/datanode/DataXceiverServer.java |  58 +++--
 .../datanode/TestDataNodeReconfiguration.java   | 241 +++++++++++++++++++
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  |   2 +-
 5 files changed, 319 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d817fa1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 34c3ff2..e10450d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1720,6 +1720,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9474. TestPipelinesFailover should not fail when printing debug
     message. (John Zhuge via Yongjun Zhang)
 
+    HDFS-9214. Support reconfiguring dfs.datanode.balance.max.concurrent.moves
+    without DN restart. (Xiaobing Zhou via Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d817fa1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 0a68758..150ce6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -42,6 +42,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_DEFA
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT;
@@ -92,7 +94,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import javax.management.ObjectName;
 
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -212,6 +213,7 @@ import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
 import com.google.common.collect.Lists;
 import com.google.protobuf.BlockingService;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -284,7 +286,9 @@ public class DataNode extends ReconfigurableBase
   /** A list of property that are reconfigurable at runtime. */
   private static final List<String> RECONFIGURABLE_PROPERTIES =
       Collections.unmodifiableList(
-          Arrays.asList(DFS_DATANODE_DATA_DIR_KEY));
+          Arrays.asList(
+              DFS_DATANODE_DATA_DIR_KEY,
+              DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY));
 
   public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog");
 
@@ -522,6 +526,38 @@ public class DataNode extends ReconfigurableBase
           }
         }
       }
+    } else if (property.equals(
+        DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY)) {
+      ReconfigurationException rootException = null;
+      try {
+        LOG.info("Reconfiguring " + property + " to " + newVal);
+        int movers;
+        if (newVal == null) {
+          // set to default
+          movers = DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT;
+        } else {
+          movers = Integer.parseInt(newVal);
+          if (movers <= 0) {
+            rootException = new ReconfigurationException(
+                property,
+                newVal,
+                getConf().get(property),
+                new IllegalArgumentException(
+                    "balancer max concurrent movers must be larger than 0"));
+          }
+        }
+        xserver.updateBalancerMaxConcurrentMovers(movers);
+      } catch(NumberFormatException nfe) {
+        rootException = new ReconfigurationException(
+            property, newVal, getConf().get(property), nfe);
+      } finally {
+        if (rootException != null) {
+          LOG.warn(String.format(
+              "Exception in updating balancer max concurrent movers %s to %s",
+              property, newVal), rootException);
+          throw rootException;
+        }
+      }
     } else {
       throw new ReconfigurationException(
           property, newVal, getConf().get(property));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d817fa1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
index 36852eb..36cf8a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.net.SocketTimeoutException;
 import java.nio.channels.AsynchronousCloseException;
 import java.util.HashMap;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -31,6 +32,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Daemon;
 
 import com.google.common.annotations.VisibleForTesting;
+
 import org.slf4j.Logger;
 
 /**
@@ -64,36 +66,45 @@ class DataXceiverServer implements Runnable {
    */
   static class BlockBalanceThrottler extends DataTransferThrottler {
    private int numThreads;
-   private int maxThreads;
-   
+   private final AtomicInteger maxThreads = new AtomicInteger(0);
+
    /**Constructor
     * 
     * @param bandwidth Total amount of bandwidth can be used for balancing 
     */
-   private BlockBalanceThrottler(long bandwidth, int maxThreads) {
-     super(bandwidth);
-     this.maxThreads = maxThreads;
-     LOG.info("Balancing bandwith is "+ bandwidth + " bytes/s");
-     LOG.info("Number threads for balancing is "+ maxThreads);
-   }
-   
+    private BlockBalanceThrottler(long bandwidth, int maxThreads) {
+      super(bandwidth);
+      this.maxThreads.set(maxThreads);
+      LOG.info("Balancing bandwith is " + bandwidth + " bytes/s");
+      LOG.info("Number threads for balancing is " + maxThreads);
+    }
+
+    private void setMaxConcurrentMovers(int movers) {
+      this.maxThreads.set(movers);
+    }
+
+    @VisibleForTesting
+    int getMaxConcurrentMovers() {
+      return this.maxThreads.get();
+    }
+
    /** Check if the block move can start. 
     * 
     * Return true if the thread quota is not exceeded and 
     * the counter is incremented; False otherwise.
     */
-   synchronized boolean acquire() {
-     if (numThreads >= maxThreads) {
-       return false;
-     }
-     numThreads++;
-     return true;
-   }
-   
-   /** Mark that the move is completed. The thread counter is decremented. */
-   synchronized void release() {
-     numThreads--;
-   }
+    synchronized boolean acquire() {
+      if (numThreads >= maxThreads.get()) {
+        return false;
+      }
+      numThreads++;
+      return true;
+    }
+
+    /** Mark that the move is completed. The thread counter is decremented. */
+    synchronized void release() {
+      numThreads--;
+    }
   }
 
   final BlockBalanceThrottler balanceThrottler;
@@ -108,7 +119,6 @@ class DataXceiverServer implements Runnable {
   
   DataXceiverServer(PeerServer peerServer, Configuration conf,
       DataNode datanode) {
-    
     this.peerServer = peerServer;
     this.datanode = datanode;
     
@@ -288,4 +298,8 @@ class DataXceiverServer implements Runnable {
     peers.remove(peer);
     peersXceiver.remove(peer);
   }
+
+  public void updateBalancerMaxConcurrentMovers(int movers) {
+    balanceThrottler.setMaxConcurrentMovers(movers);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d817fa1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
new file mode 100644
index 0000000..edaf7ab
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
@@ -0,0 +1,241 @@
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationException;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test to reconfigure some parameters for DataNode without restart
+ */
+public class TestDataNodeReconfiguration {
+
+  private static final Log LOG = LogFactory.getLog(TestBlockRecovery.class);
+  private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory()
+      + "data";
+  private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
+      "localhost", 5020);
+  private final int NUM_NAME_NODE = 1;
+  private final int NUM_DATA_NODE = 10;
+  private MiniDFSCluster cluster;
+
+  @Before
+  public void Setup() throws IOException {
+    startDFSCluster(NUM_NAME_NODE, NUM_DATA_NODE);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+
+    File dir = new File(DATA_DIR);
+    if (dir.exists())
+      Assert.assertTrue("Cannot delete data-node dirs",
+          FileUtil.fullyDelete(dir));
+  }
+
+  private void startDFSCluster(int numNameNodes, int numDataNodes)
+      throws IOException {
+    Configuration conf = new Configuration();
+
+    MiniDFSNNTopology nnTopology = MiniDFSNNTopology
+        .simpleFederatedTopology(numNameNodes);
+
+    cluster = new MiniDFSCluster.Builder(conf).nnTopology(nnTopology)
+        .numDataNodes(numDataNodes).build();
+    cluster.waitActive();
+  }
+
+  /**
+   * Starts an instance of DataNode
+   *
+   * @throws IOException
+   */
+  public DataNode[] createDNsForTest(int numDateNode) throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
+    conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
+    conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
+    conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
+    conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
+
+    DataNode[] result = new DataNode[numDateNode];
+    for (int i = 0; i < numDateNode; i++) {
+      result[i] = DataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR);
+    }
+    return result;
+  }
+
+  @Test
+  public void testMaxConcurrentMoversReconfiguration()
+      throws ReconfigurationException, IOException {
+    int maxConcurrentMovers = 10;
+    for (int i = 0; i < NUM_DATA_NODE; i++) {
+      DataNode dn = cluster.getDataNodes().get(i);
+
+      // try invalid values
+      try {
+        dn.reconfigureProperty(
+            DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, "text");
+        fail("ReconfigurationException expected");
+      } catch (ReconfigurationException expected) {
+        assertTrue("expecting NumberFormatException",
+            expected.getCause() instanceof NumberFormatException);
+      }
+      try {
+        dn.reconfigureProperty(
+            DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+            String.valueOf(-1));
+        fail("ReconfigurationException expected");
+      } catch (ReconfigurationException expected) {
+        assertTrue("expecting IllegalArgumentException",
+            expected.getCause() instanceof IllegalArgumentException);
+      }
+      try {
+        dn.reconfigureProperty(
+            DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+            String.valueOf(0));
+        fail("ReconfigurationException expected");
+      } catch (ReconfigurationException expected) {
+        assertTrue("expecting IllegalArgumentException",
+            expected.getCause() instanceof IllegalArgumentException);
+      }
+
+      // change properties
+      dn.reconfigureProperty(DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+          String.valueOf(maxConcurrentMovers));
+
+      // verify change
+      assertEquals(String.format("%s has wrong value",
+          DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY),
+          maxConcurrentMovers, dn.xserver.balanceThrottler.getMaxConcurrentMovers());
+
+      assertEquals(String.format("%s has wrong value",
+          DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY),
+          maxConcurrentMovers, Integer.parseInt(dn.getConf().get(
+              DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY)));
+
+      // revert to default
+      dn.reconfigureProperty(DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+          null);
+
+      // verify default
+      assertEquals(String.format("%s has wrong value",
+          DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY),
+          DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT,
+          dn.xserver.balanceThrottler.getMaxConcurrentMovers());
+
+      assertEquals(String.format("expect %s is not configured",
+          DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY), null, dn
+          .getConf().get(DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY));
+    }
+  }
+
+  @Test
+  public void testAcquireWithMaxConcurrentMoversGreaterThanDefault()
+      throws IOException, ReconfigurationException {
+    testAcquireWithMaxConcurrentMoversShared(10);
+  }
+
+  @Test
+  public void testAcquireWithMaxConcurrentMoversLessThanDefault()
+      throws IOException, ReconfigurationException {
+    testAcquireWithMaxConcurrentMoversShared(3);
+  }
+
+  private void testAcquireWithMaxConcurrentMoversShared(
+      int maxConcurrentMovers)
+      throws IOException, ReconfigurationException {
+    DataNode[] dns = null;
+    try {
+      dns = createDNsForTest(1);
+      testAcquireOnMaxConcurrentMoversReconfiguration(dns[0],
+          maxConcurrentMovers);
+    } catch (IOException ioe) {
+      throw ioe;
+    } catch (ReconfigurationException re) {
+      throw re;
+    } finally {
+      shutDownDNs(dns);
+    }
+  }
+
+  private void shutDownDNs(DataNode[] dns) {
+    if (dns == null) {
+      return;
+    }
+
+    for (int i = 0; i < dns.length; i++) {
+      try {
+        if (dns[i] == null) {
+          continue;
+        }
+        dns[i].shutdown();
+      } catch (Exception e) {
+        LOG.error("Cannot close: ", e);
+      }
+    }
+  }
+
+  private void testAcquireOnMaxConcurrentMoversReconfiguration(
+      DataNode dataNode, int maxConcurrentMovers) throws IOException,
+      ReconfigurationException {
+    int defaultMaxThreads = dataNode.getConf().getInt(
+        DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+        DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
+    for (int i = 0; i < defaultMaxThreads; i++) {
+      assertEquals("should be able to get thread quota", true,
+          dataNode.xserver.balanceThrottler.acquire());
+    }
+
+    assertEquals("should not be able to get thread quota", false,
+        dataNode.xserver.balanceThrottler.acquire());
+
+    // change properties
+    dataNode.reconfigureProperty(
+        DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+        String.valueOf(maxConcurrentMovers));
+
+    assertEquals("thread quota is wrong", maxConcurrentMovers,
+        dataNode.xserver.balanceThrottler.getMaxConcurrentMovers()); // thread quota
+
+    int val = Math.abs(maxConcurrentMovers - defaultMaxThreads);
+    if (defaultMaxThreads < maxConcurrentMovers) {
+      for (int i = 0; i < val; i++) {
+        assertEquals("should be able to get thread quota", true,
+            dataNode.xserver.balanceThrottler.acquire());
+      }
+    } else if (defaultMaxThreads > maxConcurrentMovers) {
+      for (int i = 0; i < val; i++) {
+        assertEquals("should not be able to get thread quota", false,
+            dataNode.xserver.balanceThrottler.acquire());
+      }
+    }
+
+    assertEquals("should not be able to get thread quota", false,
+        dataNode.xserver.balanceThrottler.acquire());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d817fa1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index a2b5638..3a30ccf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -207,7 +207,7 @@ public class TestDFSAdmin {
     final String address = "localhost:" + port;
     List<String> outputs =
         getReconfigurationAllowedProperties("datanode", address);
-    assertEquals(2, outputs.size());
+    assertEquals(3, outputs.size());
     assertEquals(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
         outputs.get(1));
   }


[23/38] hadoop git commit: YARN-4292. ResourceUtilization should be a part of NodeInfo REST API. (Sunil G via wangda)

Posted by as...@apache.org.
YARN-4292. ResourceUtilization should be a part of NodeInfo REST API. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2c3bfc8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2c3bfc8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2c3bfc8

Branch: refs/heads/yarn-2877
Commit: a2c3bfc8c1349102a7f2bc4ea96b80b429ac227b
Parents: 9f77cca
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Dec 3 14:28:00 2015 -0800
Committer: Wangda Tan <wa...@apache.org>
Committed: Thu Dec 3 14:28:32 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../resourcemanager/webapp/dao/NodeInfo.java    |  8 ++
 .../webapp/dao/ResourceUtilizationInfo.java     | 89 ++++++++++++++++++++
 .../webapp/TestRMWebServicesNodes.java          | 79 +++++++++++++++--
 4 files changed, 174 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2c3bfc8/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cd19488..6043e64 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -586,6 +586,9 @@ Release 2.8.0 - UNRELEASED
     YARN-4132. Separate configs for nodemanager to resourcemanager connection
     timeout and retries (Chang Li via jlowe)
 
+    YARN-4292. ResourceUtilization should be a part of NodeInfo REST API.
+    (Sunil G via wangda)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2c3bfc8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java
index 3104117..0f877f8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java
@@ -50,6 +50,7 @@ public class NodeInfo {
   protected long usedVirtualCores;
   protected long availableVirtualCores;
   protected ArrayList<String> nodeLabels = new ArrayList<String>();
+  protected ResourceUtilizationInfo resourceUtilization;
 
   public NodeInfo() {
   } // JAXB needs this
@@ -82,6 +83,9 @@ public class NodeInfo {
       nodeLabels.addAll(labelSet);
       Collections.sort(nodeLabels);
     }
+
+    // update node and containers resource utilization
+    this.resourceUtilization = new ResourceUtilizationInfo(ni);
   }
 
   public String getRack() {
@@ -139,4 +143,8 @@ public class NodeInfo {
   public ArrayList<String> getNodeLabels() {
     return this.nodeLabels;
   }
+
+  public ResourceUtilizationInfo getResourceUtilization() {
+    return this.resourceUtilization;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2c3bfc8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceUtilizationInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceUtilizationInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceUtilizationInfo.java
new file mode 100644
index 0000000..67d0d64
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceUtilizationInfo.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+
+/**
+ * DAO object represents resource utilization of node and containers.
+ */
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class ResourceUtilizationInfo {
+
+  protected int nodePhysicalMemoryMB;
+  protected int nodeVirtualMemoryMB;
+  protected double nodeCPUUsage;
+  protected int aggregatedContainersPhysicalMemoryMB;
+  protected int aggregatedContainersVirtualMemoryMB;
+  protected double containersCPUUsage;
+
+  public ResourceUtilizationInfo() {
+  } // JAXB needs this
+
+  public ResourceUtilizationInfo(RMNode ni) {
+
+    // update node and containers resource utilization
+    ResourceUtilization nodeUtilization = ni.getNodeUtilization();
+    if (nodeUtilization != null) {
+      this.nodePhysicalMemoryMB = nodeUtilization.getPhysicalMemory();
+      this.nodeVirtualMemoryMB = nodeUtilization.getVirtualMemory();
+      this.nodeCPUUsage = nodeUtilization.getCPU();
+    }
+
+    ResourceUtilization containerAggrUtilization = ni
+        .getAggregatedContainersUtilization();
+    if (containerAggrUtilization != null) {
+      this.aggregatedContainersPhysicalMemoryMB = containerAggrUtilization
+          .getPhysicalMemory();
+      this.aggregatedContainersVirtualMemoryMB = containerAggrUtilization
+          .getVirtualMemory();
+      this.containersCPUUsage = containerAggrUtilization.getCPU();
+    }
+  }
+
+  public int getNodePhysicalMemoryMB() {
+    return nodePhysicalMemoryMB;
+  }
+
+  public int getNodeVirtualMemoryMB() {
+    return nodeVirtualMemoryMB;
+  }
+
+  public int getAggregatedContainersPhysicalMemoryMB() {
+    return aggregatedContainersPhysicalMemoryMB;
+  }
+
+  public int getAggregatedContainersVirtualMemoryMB() {
+    return aggregatedContainersVirtualMemoryMB;
+  }
+
+  public double getNodeCPUUsage() {
+    return nodeCPUUsage;
+  }
+
+  public double getContainersCPUUsage() {
+    return containersCPUUsage;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2c3bfc8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
index 206edb1..462ce50 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
+import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
@@ -639,6 +640,44 @@ public class TestRMWebServicesNodes extends JerseyTestBase {
     assertEquals("incorrect number of elements", 3, nodeArray.length());
   }
 
+  @Test
+  public void testNodesResourceUtilization() throws JSONException, Exception {
+    WebResource r = resource();
+    MockNM nm1 = rm.registerNode("h1:1234", 5120);
+    rm.sendNodeStarted(nm1);
+    rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
+
+    RMNodeImpl node = (RMNodeImpl) rm.getRMContext().getRMNodes()
+        .get(nm1.getNodeId());
+    NodeHealthStatus nodeHealth = NodeHealthStatus.newInstance(true,
+        "test health report", System.currentTimeMillis());
+    ResourceUtilization nodeResource = ResourceUtilization.newInstance(4096, 0,
+        (float) 10.5);
+    ResourceUtilization containerResource = ResourceUtilization.newInstance(
+        2048, 0, (float) 5.05);
+    NodeStatus nodeStatus = NodeStatus.newInstance(nm1.getNodeId(), 0,
+        new ArrayList<ContainerStatus>(), null, nodeHealth, containerResource,
+        nodeResource, null);
+    node.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus, null));
+    rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
+
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("nodes").accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject nodes = json.getJSONObject("nodes");
+    assertEquals("incorrect number of elements", 1, nodes.length());
+    JSONArray nodeArray = nodes.getJSONArray("node");
+    assertEquals("incorrect number of elements", 1, nodeArray.length());
+    JSONObject info = nodeArray.getJSONObject(0);
+
+    // verify the resource utilization
+    verifyNodeInfo(info, nm1);
+  }
+
   public void verifyNodesXML(NodeList nodes, MockNM nm) throws JSONException,
       Exception {
     for (int i = 0; i < nodes.getLength(); i++) {
@@ -656,14 +695,23 @@ public class TestRMWebServicesNodes extends JerseyTestBase {
           WebServicesTestUtils.getXmlLong(element, "availMemoryMB"),
           WebServicesTestUtils.getXmlLong(element, "usedVirtualCores"),
           WebServicesTestUtils.getXmlLong(element,  "availableVirtualCores"),
-          WebServicesTestUtils.getXmlString(element, "version"));
+          WebServicesTestUtils.getXmlString(element, "version"),
+          WebServicesTestUtils.getXmlInt(element, "nodePhysicalMemoryMB"),
+          WebServicesTestUtils.getXmlInt(element, "nodeVirtualMemoryMB"),
+          WebServicesTestUtils.getXmlFloat(element, "nodeCPUUsage"),
+          WebServicesTestUtils.getXmlInt(element,
+              "aggregatedContainersPhysicalMemoryMB"),
+          WebServicesTestUtils.getXmlInt(element,
+              "aggregatedContainersVirtualMemoryMB"),
+          WebServicesTestUtils.getXmlFloat(element, "containersCPUUsage"));
     }
   }
 
   public void verifyNodeInfo(JSONObject nodeInfo, MockNM nm)
       throws JSONException, Exception {
-    assertEquals("incorrect number of elements", 13, nodeInfo.length());
+    assertEquals("incorrect number of elements", 14, nodeInfo.length());
 
+    JSONObject resourceInfo = nodeInfo.getJSONObject("resourceUtilization");
     verifyNodeInfoGeneric(nm, nodeInfo.getString("state"),
         nodeInfo.getString("rack"),
         nodeInfo.getString("id"), nodeInfo.getString("nodeHostName"),
@@ -672,15 +720,23 @@ public class TestRMWebServicesNodes extends JerseyTestBase {
         nodeInfo.getString("healthReport"), nodeInfo.getInt("numContainers"),
         nodeInfo.getLong("usedMemoryMB"), nodeInfo.getLong("availMemoryMB"),
         nodeInfo.getLong("usedVirtualCores"), nodeInfo.getLong("availableVirtualCores"),
-        nodeInfo.getString("version"));
-
+        nodeInfo.getString("version"),
+        resourceInfo.getInt("nodePhysicalMemoryMB"),
+        resourceInfo.getInt("nodeVirtualMemoryMB"),
+        resourceInfo.getDouble("nodeCPUUsage"),
+        resourceInfo.getInt("aggregatedContainersPhysicalMemoryMB"),
+        resourceInfo.getInt("aggregatedContainersVirtualMemoryMB"),
+        resourceInfo.getDouble("containersCPUUsage"));
   }
 
   public void verifyNodeInfoGeneric(MockNM nm, String state, String rack,
       String id, String nodeHostName,
       String nodeHTTPAddress, long lastHealthUpdate, String healthReport,
       int numContainers, long usedMemoryMB, long availMemoryMB, long usedVirtualCores, 
-      long availVirtualCores, String version)
+      long availVirtualCores, String version, int nodePhysicalMemoryMB,
+      int nodeVirtualMemoryMB, double nodeCPUUsage,
+      int containersPhysicalMemoryMB, int containersVirtualMemoryMB,
+      double containersCPUUsage)
       throws JSONException, Exception {
 
     RMNode node = rm.getRMContext().getRMNodes().get(nm.getNodeId());
@@ -701,6 +757,19 @@ public class TestRMWebServicesNodes extends JerseyTestBase {
         expectedHttpAddress, nodeHTTPAddress);
     WebServicesTestUtils.checkStringMatch("version",
         node.getNodeManagerVersion(), version);
+    if (node.getNodeUtilization() != null) {
+      ResourceUtilization nodeResource = ResourceUtilization.newInstance(
+          nodePhysicalMemoryMB, nodeVirtualMemoryMB, (float) nodeCPUUsage);
+      assertEquals("nodeResourceUtilization doesn't match",
+          node.getNodeUtilization(), nodeResource);
+    }
+    if (node.getAggregatedContainersUtilization() != null) {
+      ResourceUtilization containerResource = ResourceUtilization.newInstance(
+          containersPhysicalMemoryMB, containersVirtualMemoryMB,
+          (float) containersCPUUsage);
+      assertEquals("containerResourceUtilization doesn't match",
+          node.getAggregatedContainersUtilization(), containerResource);
+    }
 
     long expectedHealthUpdate = node.getLastHealthReportTime();
     assertEquals("lastHealthUpdate doesn't match, got: " + lastHealthUpdate


[24/38] hadoop git commit: HDFS-9436. Make NNThroughputBenchmark$BlockReportStats run with 10 datanodes by default. Contributed by Mingliang.

Posted by as...@apache.org.
HDFS-9436. Make NNThroughputBenchmark$BlockReportStats run with 10 datanodes by default. Contributed by Mingliang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e71aa717
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e71aa717
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e71aa717

Branch: refs/heads/yarn-2877
Commit: e71aa717d42f6472dab8eb98930df26a9025a012
Parents: a2c3bfc
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Thu Dec 3 14:48:02 2015 -0800
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Thu Dec 3 15:03:33 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                      | 3 +++
 .../hadoop/hdfs/server/namenode/NNThroughputBenchmark.java       | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e71aa717/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b65c048..ef2efc7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1766,6 +1766,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8335. FSNamesystem should construct FSPermissionChecker only if
     permission is enabled. (Gabor Liptak via wheat9)
 
+    HDFS-9436. Make NNThroughputBenchmark$BlockReportStats run with 10
+    datanodes by default. (Mingliang Liu via shv)
+
   BUG FIXES
 
     HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e71aa717/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index affbe2f..91f9793 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -1098,6 +1098,8 @@ public class NNThroughputBenchmark implements Tool {
 
     BlockReportStats(List<String> args) {
       super();
+      numThreads = 10;
+      numOpsRequired = 30;
       this.blocksPerReport = 100;
       this.blocksPerFile = 10;
       // set heartbeat interval to 3 min, so that expiration were 40 min
@@ -1258,7 +1260,7 @@ public class NNThroughputBenchmark implements Tool {
     ReplicationStats(List<String> args) {
       super();
       numThreads = 1;
-      numDatanodes = 3;
+      numDatanodes = 10;
       nodesToDecommission = 1;
       nodeReplicationLimit = 100;
       totalBlocks = 100;


[08/38] hadoop git commit: HDFS-9336. deleteSnapshot throws NPE when snapshotname is null. Contributed by Brahma Reddy Battula.

Posted by as...@apache.org.
HDFS-9336. deleteSnapshot throws NPE when snapshotname is null. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c05393b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c05393b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c05393b

Branch: refs/heads/yarn-2877
Commit: 1c05393b51748033279bff31dbc5c5cae7fc3a86
Parents: 9b8e50b
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Dec 1 11:29:09 2015 +0800
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Dec 1 11:29:09 2015 +0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hdfs/server/namenode/NameNodeRpcServer.java |  3 +
 .../namenode/TestNameNodeRpcServerMethods.java  | 84 ++++++++++++++++++++
 3 files changed, 90 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c05393b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5ee5446..bf77e73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2415,6 +2415,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9467. Fix data race accessing writeLockHeldTimeStamp in FSNamesystem.
     (Mingliang Liu via jing9)
 
+    HDFS-9336. deleteSnapshot throws NPE when snapshotname is null.
+    (Brahma Reddy Battula via aajisaka)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c05393b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 367fad0..490f3e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1646,6 +1646,9 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public void deleteSnapshot(String snapshotRoot, String snapshotName)
       throws IOException {
     checkNNStartup();
+    if (snapshotName == null || snapshotName.isEmpty()) {
+      throw new IOException("The snapshot name is null or empty.");
+    }
     namesystem.checkOperation(OperationCategory.WRITE);
     metrics.incrDeleteSnapshotOps();
     CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c05393b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServerMethods.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServerMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServerMethods.java
new file mode 100644
index 0000000..80d9722
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServerMethods.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestNameNodeRpcServerMethods {
+  private static NamenodeProtocols nnRpc;
+  private static Configuration conf;
+  private static MiniDFSCluster cluster;
+
+  /** Start a cluster */
+  @Before
+  public void setup() throws Exception {
+    conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf).build();
+    cluster.waitActive();
+    nnRpc = cluster.getNameNode().getRpcServer();
+  }
+
+  /**
+   * Cleanup after the test
+   *
+   * @throws IOException
+   * @throws UnresolvedLinkException
+   * @throws SafeModeException
+   * @throws AccessControlException
+   */
+  @After
+  public void cleanup() throws IOException {
+    if (cluster != null)
+      cluster.shutdown();
+  }
+
+  @Test
+  public void testDeleteSnapshotWhenSnapshotNameIsEmpty() throws Exception {
+    String dir = "/testNamenodeRetryCache/testDelete";
+    try {
+      nnRpc.deleteSnapshot(dir, null);
+      Assert.fail("testdeleteSnapshot is not thrown expected exception ");
+    } catch (IOException e) {
+      // expected
+      GenericTestUtils.assertExceptionContains(
+          "The snapshot name is null or empty.", e);
+    }
+    try {
+      nnRpc.deleteSnapshot(dir, "");
+      Assert.fail("testdeleteSnapshot is not thrown expected exception");
+    } catch (IOException e) {
+      // expected
+      GenericTestUtils.assertExceptionContains(
+          "The snapshot name is null or empty.", e);
+    }
+
+  }
+
+}


[37/38] hadoop git commit: HDFS-9414. Add missing license header

Posted by as...@apache.org.
HDFS-9414. Add missing license header


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42d49016
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42d49016
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42d49016

Branch: refs/heads/yarn-2877
Commit: 42d49016d4128eff71a7d1f8365be9ea9d222070
Parents: 86c95cb
Author: Arpit Agarwal <ar...@apache.org>
Authored: Sat Dec 5 09:24:36 2015 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Sat Dec 5 09:24:36 2015 -0800

----------------------------------------------------------------------
 .../hdfs/protocol/ReconfigurationProtocol.java    | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42d49016/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
index 23fd57c..a22192c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
@@ -1,3 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 package org.apache.hadoop.hdfs.protocol;
 
 


[05/38] hadoop git commit: Revert "HADOOP-11505. Various native parts use bswap incorrectly and unportably (Alan Burlison via aw)"

Posted by as...@apache.org.
Revert "HADOOP-11505. Various native parts use bswap incorrectly and unportably (Alan Burlison via aw)"

This reverts commit 6725e7f1beb96177b0b59a6082a05869aab2e37b.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c37c3f41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c37c3f41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c37c3f41

Branch: refs/heads/yarn-2877
Commit: c37c3f41b3b5c70b0fe892a3bb8aec4246257bf0
Parents: 2577e5b
Author: cnauroth <cn...@apache.org>
Authored: Sat Nov 28 17:21:11 2015 -0800
Committer: cnauroth <cn...@apache.org>
Committed: Sat Nov 28 17:21:11 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 -
 .../hadoop-common/src/CMakeLists.txt            | 125 -------------------
 .../src/org/apache/hadoop/util/bulk_crc32.c     |  17 ++-
 .../src/CMakeLists.txt                          |   1 -
 .../mapred/nativetask/INativeComparable.java    |   4 +-
 .../src/main/native/src/NativeTask.h            |   9 ++
 .../src/main/native/src/codec/BlockCodec.cc     |   4 +-
 .../src/main/native/src/codec/Lz4Codec.cc       |   4 +-
 .../src/main/native/src/codec/SnappyCodec.cc    |   4 +-
 .../main/native/src/handler/CombineHandler.cc   |  15 ++-
 .../src/handler/MCollectorOutputHandler.cc      |  10 +-
 .../src/handler/MCollectorOutputHandler.h       |   2 +
 .../src/main/native/src/lib/Buffers.h           |  13 +-
 .../src/main/native/src/lib/IFile.cc            |   4 +-
 .../src/main/native/src/lib/IFile.h             |   4 +-
 .../main/native/src/lib/NativeObjectFactory.cc  |  16 +--
 .../src/main/native/src/lib/SpillInfo.cc        |   4 +-
 .../src/main/native/src/lib/commons.h           |   1 -
 .../src/main/native/src/lib/primitives.h        |  50 ++++++--
 .../src/main/native/src/util/WritableUtils.cc   |  26 ++--
 .../src/main/native/test/TestIFile.cc           |   2 +-
 .../src/main/native/test/TestSort.cc            |   8 +-
 .../src/main/native/test/lib/TestKVBuffer.cc    |   4 +-
 .../native/test/lib/TestMemBlockIterator.cc     |   2 +-
 .../src/main/native/test/lib/TestMemoryBlock.cc |   6 +-
 .../main/native/test/lib/TestPartitionBucket.cc |  26 ++--
 26 files changed, 139 insertions(+), 225 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3c77099..d48479a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -537,9 +537,6 @@ Trunk (Unreleased)
 
     HADOOP-12553. [JDK8] Fix javadoc error caused by illegal tag. (aajisaka)
 
-    HADOOP-11505. Various native parts use bswap incorrectly and unportably
-    (Alan Burlison via aw)
-
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-common-project/hadoop-common/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
index a8762d5..63bb773 100644
--- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt
+++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
@@ -41,131 +41,6 @@ endif()
 # Configure JNI.
 include(HadoopJNI)
 
-#
-# Endian configuration, as per http://austingroupbugs.net/view.php?id=162#c665
-#
-
-# Work out the endianness, set header macro values.
-include(TestBigEndian)
-include(CheckIncludeFile)
-include(CheckSymbolExists)
-test_big_endian(_bigendian)
-if(_bigendian)
-  set(HADOOP_BYTE_ORDER "HADOOP_BIG_ENDIAN")
-else()
-  set(HADOOP_BYTE_ORDER "HADOOP_LITTLE_ENDIAN")
-endif()
-
-# Linux, NetBSD, FreeBSD and OpenBSD all provide htoXXX definitions in endian.h or sys/endian.h.
-check_include_file("endian.h" _endian_h)
-if (_endian_h)
-  set(HADOOP_ENDIAN_H "endian.h")
-else()
-  check_include_file("sys/endian.h" _sys_endian_h)
-  if (_sys_endian_h)
-    set(HADOOP_ENDIAN_H "sys/endian.h")
-  endif()
-endif()
-if(DEFINED HADOOP_ENDIAN_H)
-check_symbol_exists("be64toh" ${HADOOP_ENDIAN_H} _be64toh)
-  if( _be64toh)
-    set(HADOOP_HTOBE16 "htobe16")
-    set(HADOOP_HTOLE16 "htole16")
-    set(HADOOP_BE16TOH "be16toh")
-    set(HADOOP_LE16TOH "le16toh")
-    set(HADOOP_HTOBE32 "htobe32")
-    set(HADOOP_HTOLE32 "htole32")
-    set(HADOOP_BE32TOH "be32toh")
-    set(HADOOP_LE32TOH "le32toh")
-    set(HADOOP_HTOBE64 "htobe64")
-    set(HADOOP_HTOLE64 "htole64")
-    set(HADOOP_BE64TOH "be64toh")
-    set(HADOOP_LE64TOH "le64toh")
-    set(_have_endian TRUE)
-    unset(_be64toh)
-  else()
-    message(FATAL_ERROR "endian.h located but doesn't contain be64toh")
-  endif()
-endif()
-
-# Solaris doesn't provide htoXXX, we have to provide alternatives.
-if(NOT _have_endian)
-  check_include_file("sys/byteorder.h" _sys_byteorder_h)
-  if(_sys_byteorder_h)
-    set(HADOOP_ENDIAN_H "sys/byteorder.h")
-    check_symbol_exists("BSWAP_64" ${HADOOP_ENDIAN_H} _bswap_64)
-  endif()
-  if(_sys_byteorder_h AND _bswap_64)
-    if(_bigendian)
-      set(HADOOP_HTOBE16 "")
-      set(HADOOP_HTOLE16 "BSWAP_16")
-      set(HADOOP_BE16TOH "")
-      set(HADOOP_LE16TOH "BSWAP_16")
-      set(HADOOP_HTOBE32 "")
-      set(HADOOP_HTOLE32 "BSWAP_32")
-      set(HADOOP_BE32TOH "")
-      set(HADOOP_LE32TOH "BSWAP_32")
-      set(HADOOP_HTOBE64 "")
-      set(HADOOP_HTOLE64 "BSWAP_64")
-      set(HADOOP_BE64TOH "")
-      set(HADOOP_LE64TOH "BSWAP_64")
-    else()
-      set(HADOOP_HTOBE16 "BSWAP_16")
-      set(HADOOP_HTOLE16 "")
-      set(HADOOP_BE16TOH "BSWAP_16")
-      set(HADOOP_LE16TOH "")
-      set(HADOOP_HTOBE32 "BSWAP_32")
-      set(HADOOP_HTOLE32 "")
-      set(HADOOP_BE32TOH "BSWAP_32")
-      set(HADOOP_LE32TOH "")
-      set(HADOOP_HTOBE64 "BSWAP_64")
-      set(HADOOP_HTOLE64 "")
-      set(HADOOP_BE64TOH "BSWAP_64")
-      set(HADOOP_LE64TOH "")
-    endif()
-    set(_have_endian TRUE)
-    unset(_sys_byteorder_h)
-    unset(_bswap_64)
-  endif()
-endif()
-
-# OSX uses libkern/OSByteOrder.h and OSSwapXtoY.
-if(NOT _have_endian)
-  check_include_file("libkern/OSByteOrder.h" _libkern_osbyteorder_h)
-  if(_libkern_osbyteorder_h)
-    set(HADOOP_ENDIAN_H "libkern/OSByteOrder.h")
-    check_symbol_exists("OSSwapHostToLittleInt64" ${HADOOP_ENDIAN_H} _osswaphosttolittleint64)
-  endif()
-  if(_libkern_osbyteorder_h AND _osswaphosttolittleint64)
-    set(HADOOP_HTOBE16 "OSSwapHostToBigInt16")
-    set(HADOOP_HTOLE16 "OSSwapHostToLittleInt16")
-    set(HADOOP_BE16TOH "OSSwapBigToHostInt16")
-    set(HADOOP_LE16TOH "OSSwapLittleToHostInt16")
-    set(HADOOP_HTOBE32 "OSSwapHostToBigInt32")
-    set(HADOOP_HTOLE32 "OSSwapHostToLittleInt32")
-    set(HADOOP_BE32TOH "OSSwapBigToHostInt32")
-    set(HADOOP_LE32TOH "OSSwapLittleToHostInt32")
-    set(HADOOP_HTOBE64 "OSSwapHostToBigInt64")
-    set(HADOOP_HTOLE64 "OSSwapHostToLittleInt64")
-    set(HADOOP_BE64TOH "OSSwapBigToHostInt64")
-    set(HADOOP_LE64TOH "OSSwapLittleToHostInt64")
-    set(_have_endian TRUE)
-    unset(_libkern_osbyteorder_h)
-    unset(_osswaphosttolittleint64)
-  endif()
-endif()
-
-# Bail if we don't know the endian definitions for this platform.
-if(NOT _have_endian)
-  message(FATAL_ERROR "Can't provide endianness definitions for this platform")
-endif()
-
-# Configure the hadoop_endian.h header file.
-configure_file(${CMAKE_SOURCE_DIR}/hadoop_endian.h.cmake ${CMAKE_BINARY_DIR}/hadoop_endian.h)
-unset(_bigendian)
-unset(_have_endian)
-unset(HADOOP_ENDIAN_H)
-
 # Require zlib.
 set(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
 hadoop_set_find_shared_library_version("1")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
index 988ccf2..b3bb699 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
@@ -37,7 +37,6 @@
 #include "crc32c_tables.h"
 #include "bulk_crc32.h"
 #include "gcc_optimizations.h"
-#include "hadoop_endian.h"
 
 #define CRC_INITIAL_VAL 0xffffffff
 
@@ -164,7 +163,7 @@ static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length) {
   for (li=0; li < running_length/8; li++) {
 	uint32_t term1;
 	uint32_t term2;
-    crc ^= hadoop_htole32(*(uint32_t *)buf);
+    crc ^= *(uint32_t *)buf;
     buf += 4;
     term1 = CRC32C_T8_7[crc & 0x000000FF] ^
         CRC32C_T8_6[(crc >> 8) & 0x000000FF];
@@ -172,10 +171,10 @@ static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length) {
     crc = term1 ^
         CRC32C_T8_5[term2 & 0x000000FF] ^ 
         CRC32C_T8_4[(term2 >> 8) & 0x000000FF];
-    term1 = CRC32C_T8_3[hadoop_htole32(*(uint32_t *)buf) & 0x000000FF] ^
-        CRC32C_T8_2[(hadoop_htole32(*(uint32_t *)buf) >> 8) & 0x000000FF];
+    term1 = CRC32C_T8_3[(*(uint32_t *)buf) & 0x000000FF] ^
+        CRC32C_T8_2[((*(uint32_t *)buf) >> 8) & 0x000000FF];
     
-    term2 = hadoop_htole32((*(uint32_t *)buf)) >> 16;
+    term2 = (*(uint32_t *)buf) >> 16;
     crc =  crc ^ 
         term1 ^    
         CRC32C_T8_1[term2  & 0x000000FF] ^  
@@ -210,7 +209,7 @@ static uint32_t crc32_zlib_sb8(
   for (li=0; li < running_length/8; li++) {
 	uint32_t term1;
 	uint32_t term2;
-    crc ^= hadoop_htole32(*(uint32_t *)buf);
+    crc ^= *(uint32_t *)buf;
     buf += 4;
     term1 = CRC32_T8_7[crc & 0x000000FF] ^
         CRC32_T8_6[(crc >> 8) & 0x000000FF];
@@ -218,10 +217,10 @@ static uint32_t crc32_zlib_sb8(
     crc = term1 ^
         CRC32_T8_5[term2 & 0x000000FF] ^ 
         CRC32_T8_4[(term2 >> 8) & 0x000000FF];
-    term1 = CRC32_T8_3[hadoop_htole32(*(uint32_t *)buf) & 0x000000FF] ^
-        CRC32_T8_2[(hadoop_htole32(*(uint32_t *)buf) >> 8) & 0x000000FF];
+    term1 = CRC32_T8_3[(*(uint32_t *)buf) & 0x000000FF] ^
+        CRC32_T8_2[((*(uint32_t *)buf) >> 8) & 0x000000FF];
     
-    term2 = hadoop_htole32(*(uint32_t *)buf) >> 16;
+    term2 = (*(uint32_t *)buf) >> 16;
     crc =  crc ^ 
         term1 ^    
         CRC32_T8_1[term2  & 0x000000FF] ^  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
index 99428b0..f878a94 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
@@ -90,7 +90,6 @@ include_directories(
     ${SRC}/src/util
     ${SRC}/src/lib
     ${SRC}/test
-    ../../../../hadoop-common-project/hadoop-common/target/native
     ${CMAKE_CURRENT_SOURCE_DIR}
     ${CMAKE_BINARY_DIR}
     ${JNI_INCLUDE_DIRS}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/INativeComparable.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/INativeComparable.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/INativeComparable.java
index df6570a..1ec05db 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/INativeComparable.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/INativeComparable.java
@@ -42,8 +42,8 @@ import org.apache.hadoop.classification.InterfaceStability;
  * <code>
  *   int HivePlatform::HiveKeyComparator(const char * src, uint32_t srcLength,
  *   const char * dest, uint32_t destLength) {
- *     uint32_t sl = hadoop_be32toh(*(uint32_t*)src);
- *     uint32_t dl = hadoop_be32toh(*(uint32_t*)dest);
+ *     uint32_t sl = bswap(*(uint32_t*)src);
+ *     uint32_t dl = bswap(*(uint32_t*)dest);
  *     return NativeObjectFactory::BytesComparator(src + 4, sl, dest + 4, dl);
  *   }
  * </code>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/NativeTask.h
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/NativeTask.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/NativeTask.h
index f1336ef..ba026f5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/NativeTask.h
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/NativeTask.h
@@ -40,6 +40,15 @@ enum NativeObjectType {
   BatchHandlerType = 1,
 };
 
+/**
+ * Enduim setting
+ *
+ */
+enum Endium {
+  LITTLE_ENDIUM = 0,
+  LARGE_ENDIUM = 1
+};
+
 #define NATIVE_COMBINER "native.combiner.class"
 #define NATIVE_PARTITIONER "native.partitioner.class"
 #define NATIVE_MAPPER "native.mapper.class"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/BlockCodec.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/BlockCodec.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/BlockCodec.cc
index 7ce26f1..ce36239 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/BlockCodec.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/BlockCodec.cc
@@ -104,8 +104,8 @@ int32_t BlockDecompressStream::read(void * buff, uint32_t length) {
       THROW_EXCEPTION(IOException, "readFully get incomplete data");
     }
     _compressedBytesRead += rd;
-    sizes[0] = hadoop_be32toh(sizes[0]);
-    sizes[1] = hadoop_be32toh(sizes[1]);
+    sizes[0] = bswap(sizes[0]);
+    sizes[1] = bswap(sizes[1]);
     if (sizes[0] <= length) {
       uint32_t len = decompressOneBlock(sizes[1], buff, sizes[0]);
       if (len != sizes[0]) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/Lz4Codec.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/Lz4Codec.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/Lz4Codec.cc
index 23c6c46..48c96b5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/Lz4Codec.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/Lz4Codec.cc
@@ -38,8 +38,8 @@ void Lz4CompressStream::compressOneBlock(const void * buff, uint32_t length) {
   int ret = LZ4_compress((char*)buff, _tempBuffer + 8, length);
   if (ret > 0) {
     compressedLength = ret;
-    ((uint32_t*)_tempBuffer)[0] = hadoop_be32toh(length);
-    ((uint32_t*)_tempBuffer)[1] = hadoop_be32toh((uint32_t)compressedLength);
+    ((uint32_t*)_tempBuffer)[0] = bswap(length);
+    ((uint32_t*)_tempBuffer)[1] = bswap((uint32_t)compressedLength);
     _stream->write(_tempBuffer, compressedLength + 8);
     _compressedBytesWritten += (compressedLength + 8);
   } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/SnappyCodec.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/SnappyCodec.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/SnappyCodec.cc
index 04380ac..a0417e0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/SnappyCodec.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/SnappyCodec.cc
@@ -37,8 +37,8 @@ void SnappyCompressStream::compressOneBlock(const void * buff, uint32_t length)
   snappy_status ret = snappy_compress((const char*)buff, length, _tempBuffer + 8,
       &compressedLength);
   if (ret == SNAPPY_OK) {
-    ((uint32_t*)_tempBuffer)[0] = hadoop_be32toh(length);
-    ((uint32_t*)_tempBuffer)[1] = hadoop_be32toh((uint32_t)compressedLength);
+    ((uint32_t*)_tempBuffer)[0] = bswap(length);
+    ((uint32_t*)_tempBuffer)[1] = bswap((uint32_t)compressedLength);
     _stream->write(_tempBuffer, compressedLength + 8);
     _compressedBytesWritten += (compressedLength + 8);
   } else if (ret == SNAPPY_INVALID_INPUT) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/CombineHandler.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/CombineHandler.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/CombineHandler.cc
index b18d057..5f3863e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/CombineHandler.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/CombineHandler.cc
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 #include "CombineHandler.h"
 
 namespace NativeTask {
@@ -49,8 +48,8 @@ uint32_t CombineHandler::feedDataToJavaInWritableSerialization() {
 
   if (_kvCached) {
     uint32_t kvLength = _key.outerLength + _value.outerLength + KVBuffer::headerLength();
-    outputInt(hadoop_be32toh(_key.outerLength));
-    outputInt(hadoop_be32toh(_value.outerLength));
+    outputInt(bswap(_key.outerLength));
+    outputInt(bswap(_value.outerLength));
     outputKeyOrValue(_key, _kType);
     outputKeyOrValue(_value, _vType);
 
@@ -74,8 +73,8 @@ uint32_t CombineHandler::feedDataToJavaInWritableSerialization() {
     } else {
       firstKV = false;
       //write final key length and final value length
-      outputInt(hadoop_be32toh(_key.outerLength));
-      outputInt(hadoop_be32toh(_value.outerLength));
+      outputInt(bswap(_key.outerLength));
+      outputInt(bswap(_value.outerLength));
       outputKeyOrValue(_key, _kType);
       outputKeyOrValue(_value, _vType);
 
@@ -102,7 +101,7 @@ void CombineHandler::outputKeyOrValue(SerializeInfo & KV, KeyValueType type) {
     output(KV.buffer.data(), KV.buffer.length());
     break;
   case BytesType:
-    outputInt(hadoop_be32toh(KV.buffer.length()));
+    outputInt(bswap(KV.buffer.length()));
     output(KV.buffer.data(), KV.buffer.length());
     break;
   default:
@@ -203,8 +202,8 @@ void CombineHandler::write(char * buf, uint32_t length) {
   uint32_t outputRecordCount = 0;
   while (remain > 0) {
     kv = (KVBuffer *)pos;
-    kv->keyLength = hadoop_be32toh(kv->keyLength);
-    kv->valueLength = hadoop_be32toh(kv->valueLength);
+    kv->keyLength = bswap(kv->keyLength);
+    kv->valueLength = bswap(kv->valueLength);
     _writer->write(kv->getKey(), kv->keyLength, kv->getValue(), kv->valueLength);
     outputRecordCount++;
     remain -= kv->length();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.cc
index 4921b33..7e4ae44 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.cc
@@ -30,7 +30,7 @@ using std::vector;
 namespace NativeTask {
 
 MCollectorOutputHandler::MCollectorOutputHandler()
-    : _collector(NULL), _dest(NULL) {
+    : _collector(NULL), _dest(NULL), _endium(LARGE_ENDIUM) {
 }
 
 MCollectorOutputHandler::~MCollectorOutputHandler() {
@@ -73,9 +73,11 @@ void MCollectorOutputHandler::handleInput(ByteBuffer & in) {
       THROW_EXCEPTION(IOException, "k/v meta information incomplete");
     }
 
-    kvBuffer->partitionId = hadoop_be32toh(kvBuffer->partitionId);
-    kvBuffer->buffer.keyLength = hadoop_be32toh(kvBuffer->buffer.keyLength);
-    kvBuffer->buffer.valueLength = hadoop_be32toh(kvBuffer->buffer.valueLength);
+    if (_endium == LARGE_ENDIUM) {
+      kvBuffer->partitionId = bswap(kvBuffer->partitionId);
+      kvBuffer->buffer.keyLength = bswap(kvBuffer->buffer.keyLength);
+      kvBuffer->buffer.valueLength = bswap(kvBuffer->buffer.valueLength);
+    }
 
     uint32_t kvLength = kvBuffer->buffer.length();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.h
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.h
index 2e21806..fe4635f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.h
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/handler/MCollectorOutputHandler.h
@@ -35,6 +35,8 @@ private:
   // state info for large KV pairs
   char * _dest;
 
+  Endium _endium;
+
 public:
   MCollectorOutputHandler();
   virtual ~MCollectorOutputHandler();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Buffers.h
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Buffers.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Buffers.h
index 09606d8..4929426 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Buffers.h
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Buffers.h
@@ -115,7 +115,7 @@ public:
    * read uint32_t big endian
    */
   inline uint32_t read_uint32_be() {
-    return hadoop_be32toh(read_uint32_le());
+    return bswap(read_uint32_le());
   }
 };
 
@@ -198,7 +198,7 @@ public:
   }
 
   inline void write_uint32_be(uint32_t v) {
-    write_uint32_le(hadoop_be32toh(v));
+    write_uint32_le(bswap(v));
   }
 
   inline void write_uint64_le(uint64_t v) {
@@ -211,7 +211,7 @@ public:
   }
 
   inline void write_uint64_be(uint64_t v) {
-    write_uint64_le(hadoop_be64toh(v));
+    write_uint64_le(bswap64(v));
   }
 
   inline void write_vlong(int64_t v) {
@@ -278,11 +278,12 @@ struct KVBuffer {
   }
 
   uint32_t length() {
-    return keyLength + valueLength + SIZE_OF_KV_LENGTH;
+    return keyLength + valueLength + SIZE_OF_KEY_LENGTH + SIZE_OF_VALUE_LENGTH;
   }
 
   uint32_t lengthConvertEndium() {
-    return hadoop_be32toh(keyLength) + hadoop_be32toh(valueLength) + SIZE_OF_KV_LENGTH;
+    long value = bswap64(*((long *)this));
+    return (value >> 32) + value + SIZE_OF_KEY_LENGTH + SIZE_OF_VALUE_LENGTH;
   }
 
   void fill(const void * key, uint32_t keylen, const void * value, uint32_t vallen) {
@@ -298,7 +299,7 @@ struct KVBuffer {
   }
 
   static uint32_t headerLength() {
-    return SIZE_OF_KV_LENGTH;
+    return SIZE_OF_KEY_LENGTH + SIZE_OF_VALUE_LENGTH;
   }
 };
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.cc
index cbe1b28..2d3e0b5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.cc
@@ -60,7 +60,7 @@ bool IFileReader::nextPartition() {
     if (4 != _stream->readFully(&chsum, 4)) {
       THROW_EXCEPTION(IOException, "read ifile checksum failed");
     }
-    uint32_t actual = hadoop_be32toh(chsum);
+    uint32_t actual = bswap(chsum);
     uint32_t expect = _source->getChecksum();
     if (actual != expect) {
       THROW_EXCEPTION_EX(IOException, "read ifile checksum not match, actual %x expect %x", actual,
@@ -130,7 +130,7 @@ void IFileWriter::endPartition() {
   }
 
   uint32_t chsum = _dest->getChecksum();
-  chsum = hadoop_be32toh(chsum);
+  chsum = bswap(chsum);
   _stream->write(&chsum, sizeof(chsum));
   _stream->flush();
   IFileSegment * info = &(_spillFileSegments[_spillFileSegments.size() - 1]);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.h
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.h
index 414dc27..e397f90 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.h
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/IFile.h
@@ -74,7 +74,7 @@ public:
       keyLen = WritableUtils::ReadVInt(kvbuff, len);
       break;
     case BytesType:
-      keyLen = hadoop_be32toh(*(uint32_t*)kvbuff);
+      keyLen = bswap(*(uint32_t*)kvbuff);
       len = 4;
       break;
     default:
@@ -89,7 +89,7 @@ public:
       _valuePos = vbuff + len;
       break;
     case BytesType:
-      _valueLen = hadoop_be32toh(*(uint32_t*)vbuff);
+      _valueLen = bswap(*(uint32_t*)vbuff);
       _valuePos = vbuff + 4;
       break;
     default:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/NativeObjectFactory.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/NativeObjectFactory.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/NativeObjectFactory.cc
index 5633fcf..2185798 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/NativeObjectFactory.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/NativeObjectFactory.cc
@@ -317,8 +317,8 @@ int NativeObjectFactory::IntComparator(const char * src, uint32_t srcLength, con
     uint32_t destLength) {
   int result = (*src) - (*dest);
   if (result == 0) {
-    uint32_t from = hadoop_be32toh(*(uint32_t*)src);
-    uint32_t to = hadoop_be32toh(*(uint32_t*)dest);
+    uint32_t from = bswap(*(uint32_t*)src);
+    uint32_t to = bswap(*(uint32_t*)dest);
     if (from > to) {
       return 1;
     } else if (from == to) {
@@ -335,8 +335,8 @@ int NativeObjectFactory::LongComparator(const char * src, uint32_t srcLength, co
   int result = (int)(*src) - (int)(*dest);
   if (result == 0) {
 
-    uint64_t from = hadoop_be64toh(*(uint64_t*)src);
-    uint64_t to = hadoop_be64toh(*(uint64_t*)dest);
+    uint64_t from = bswap64(*(uint64_t*)src);
+    uint64_t to = bswap64(*(uint64_t*)dest);
     if (from > to) {
       return 1;
     } else if (from == to) {
@@ -380,8 +380,8 @@ int NativeObjectFactory::FloatComparator(const char * src, uint32_t srcLength, c
     THROW_EXCEPTION_EX(IOException, "float comparator, while src/dest lengt is not 4");
   }
 
-  uint32_t from = hadoop_be32toh(*(uint32_t*)src);
-  uint32_t to = hadoop_be32toh(*(uint32_t*)dest);
+  uint32_t from = bswap(*(uint32_t*)src);
+  uint32_t to = bswap(*(uint32_t*)dest);
 
   float * srcValue = (float *)(&from);
   float * destValue = (float *)(&to);
@@ -401,8 +401,8 @@ int NativeObjectFactory::DoubleComparator(const char * src, uint32_t srcLength,
     THROW_EXCEPTION_EX(IOException, "double comparator, while src/dest lengt is not 4");
   }
 
-  uint64_t from = hadoop_be64toh(*(uint64_t*)src);
-  uint64_t to = hadoop_be64toh(*(uint64_t*)dest);
+  uint64_t from = bswap64(*(uint64_t*)src);
+  uint64_t to = bswap64(*(uint64_t*)dest);
 
   double * srcValue = (double *)(&from);
   double * destValue = (double *)(&to);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/SpillInfo.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/SpillInfo.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/SpillInfo.cc
index c1a36ce..9cff529 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/SpillInfo.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/SpillInfo.cc
@@ -58,10 +58,10 @@ void SingleSpillInfo::writeSpillInfo(const std::string & filepath) {
     appendBuffer.flush();
     uint32_t chsum = dest.getChecksum();
 #ifdef SPILLRECORD_CHECKSUM_UINT
-    chsum = hadoop_be32toh(chsum);
+    chsum = bswap(chsum);
     fout->write(&chsum, sizeof(uint32_t));
 #else
-    uint64_t wtchsum = hadoop_be64toh((uint64_t)chsum);
+    uint64_t wtchsum = bswap64((uint64_t)chsum);
     fout->write(&wtchsum, sizeof(uint64_t));
 #endif
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/commons.h
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/commons.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/commons.h
index 9c69f42..57500b7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/commons.h
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/commons.h
@@ -41,7 +41,6 @@
 #include <map>
 #include <algorithm>
 
-#include "hadoop_endian.h"
 #include "lib/primitives.h"
 #include "lib/Log.h"
 #include "NativeTask.h"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
index 8a74a63..3bf5f76 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
@@ -28,7 +28,6 @@
 #include <stdint.h>
 #include <assert.h>
 #include <string>
-#include "hadoop_endian.h"
 
 #ifdef __GNUC__
 #define likely(x)       __builtin_expect((x),1)
@@ -95,6 +94,39 @@ inline void simple_memcpy(void * dest, const void * src, size_t len) {
 #endif
 
 /**
+ * little-endian to big-endian or vice versa
+ */
+inline uint32_t bswap(uint32_t val) {
+#ifdef __aarch64__
+  __asm__("rev %w[dst], %w[src]" : [dst]"=r"(val) : [src]"r"(val));
+#else
+  __asm__("bswap %0" : "=r" (val) : "0" (val));
+#endif
+  return val;
+}
+
+inline uint64_t bswap64(uint64_t val) {
+#ifdef __aarch64__
+  __asm__("rev %[dst], %[src]" : [dst]"=r"(val) : [src]"r"(val));
+#else
+#ifdef __X64
+  __asm__("bswapq %0" : "=r" (val) : "0" (val));
+#else
+
+  uint64_t lower = val & 0xffffffffU;
+  uint32_t higher = (val >> 32) & 0xffffffffU;
+
+  lower = bswap(lower);
+  higher = bswap(higher);
+
+  return (lower << 32) + higher;
+
+#endif
+#endif
+  return val;
+}
+
+/**
  * Fast memcmp
  */
 inline int64_t fmemcmp(const char * src, const char * dest, uint32_t len) {
@@ -126,16 +158,16 @@ inline int64_t fmemcmp(const char * src, const char * dest, uint32_t len) {
     return ((int64_t)src8[2] - (int64_t)dest8[2]);
   }
   case 4: {
-    return (int64_t)hadoop_be32toh(*(uint32_t*)src) - (int64_t)hadoop_be32toh(*(uint32_t*)dest);
+    return (int64_t)bswap(*(uint32_t*)src) - (int64_t)bswap(*(uint32_t*)dest);
   }
   }
   if (len < 8) {
-    int64_t ret = ((int64_t)hadoop_be32toh(*(uint32_t*)src) - (int64_t)hadoop_be32toh(*(uint32_t*)dest));
+    int64_t ret = ((int64_t)bswap(*(uint32_t*)src) - (int64_t)bswap(*(uint32_t*)dest));
     if (ret) {
       return ret;
     }
-    return ((int64_t)hadoop_be32toh(*(uint32_t*)(src + len - 4))
-        - (int64_t)hadoop_be32toh(*(uint32_t*)(dest + len - 4)));
+    return ((int64_t)bswap(*(uint32_t*)(src + len - 4))
+        - (int64_t)bswap(*(uint32_t*)(dest + len - 4)));
   }
   uint32_t cur = 0;
   uint32_t end = len & (0xffffffffU << 3);
@@ -143,8 +175,8 @@ inline int64_t fmemcmp(const char * src, const char * dest, uint32_t len) {
     uint64_t l = *(uint64_t*)(src8 + cur);
     uint64_t r = *(uint64_t*)(dest8 + cur);
     if (l != r) {
-      l = hadoop_be64toh(l);
-      r = hadoop_be64toh(r);
+      l = bswap64(l);
+      r = bswap64(r);
       return l > r ? 1 : -1;
     }
     cur += 8;
@@ -152,8 +184,8 @@ inline int64_t fmemcmp(const char * src, const char * dest, uint32_t len) {
   uint64_t l = *(uint64_t*)(src8 + len - 8);
   uint64_t r = *(uint64_t*)(dest8 + len - 8);
   if (l != r) {
-    l = hadoop_be64toh(l);
-    r = hadoop_be64toh(r);
+    l = bswap64(l);
+    r = bswap64(r);
     return l > r ? 1 : -1;
   }
   return 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/WritableUtils.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/WritableUtils.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/WritableUtils.cc
index b9f434e..8ed8dd2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/WritableUtils.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/WritableUtils.cc
@@ -120,29 +120,29 @@ void WritableUtils::WriteVLongInner(int64_t v, char * pos, uint32_t & len) {
     len = 4;
   } else if (value < (1ULL << 32)) {
     *(pos++) = base - 3;
-    *(uint32_t*)(pos) = hadoop_be32toh((uint32_t)value);
+    *(uint32_t*)(pos) = bswap((uint32_t)value);
     len = 5;
   } else if (value < (1ULL << 40)) {
     *(pos++) = base - 4;
-    *(uint32_t*)(pos) = hadoop_be32toh((uint32_t)(value >> 8));
+    *(uint32_t*)(pos) = bswap((uint32_t)(value >> 8));
     *(uint8_t*)(pos + 4) = value;
     len = 6;
   } else if (value < (1ULL << 48)) {
     *(pos++) = base - 5;
-    *(uint32_t*)(pos) = hadoop_be32toh((uint32_t)(value >> 16));
+    *(uint32_t*)(pos) = bswap((uint32_t)(value >> 16));
     *(uint8_t*)(pos + 4) = value >> 8;
     *(uint8_t*)(pos + 5) = value;
     len = 7;
   } else if (value < (1ULL << 56)) {
     *(pos++) = base - 6;
-    *(uint32_t*)(pos) = hadoop_be32toh((uint32_t)(value >> 24));
+    *(uint32_t*)(pos) = bswap((uint32_t)(value >> 24));
     *(uint8_t*)(pos + 4) = value >> 16;
     *(uint8_t*)(pos + 5) = value >> 8;
     *(uint8_t*)(pos + 6) = value;
     len = 8;
   } else {
     *(pos++) = base - 7;
-    *(uint64_t*)pos = hadoop_be64toh(value);
+    *(uint64_t*)pos = bswap64(value);
     len = 9;
   }
 }
@@ -168,7 +168,7 @@ int64_t WritableUtils::ReadLong(InputStream * stream) {
   if (stream->readFully(&ret, 8) != 8) {
     THROW_EXCEPTION(IOException, "ReadLong reach EOF");
   }
-  return (int64_t)hadoop_be64toh(ret);
+  return (int64_t)bswap64(ret);
 }
 
 int32_t WritableUtils::ReadInt(InputStream * stream) {
@@ -176,7 +176,7 @@ int32_t WritableUtils::ReadInt(InputStream * stream) {
   if (stream->readFully(&ret, 4) != 4) {
     THROW_EXCEPTION(IOException, "ReadInt reach EOF");
   }
-  return (int32_t)hadoop_be32toh(ret);
+  return (int32_t)bswap(ret);
 }
 
 int16_t WritableUtils::ReadShort(InputStream * stream) {
@@ -192,7 +192,7 @@ float WritableUtils::ReadFloat(InputStream * stream) {
   if (stream->readFully(&ret, 4) != 4) {
     THROW_EXCEPTION(IOException, "ReadFloat reach EOF");
   }
-  ret = hadoop_be32toh(ret);
+  ret = bswap(ret);
   return *(float*)&ret;
 }
 
@@ -232,12 +232,12 @@ void WritableUtils::WriteVLong(OutputStream * stream, int64_t v) {
 }
 
 void WritableUtils::WriteLong(OutputStream * stream, int64_t v) {
-  uint64_t be = hadoop_be64toh((uint64_t)v);
+  uint64_t be = bswap64((uint64_t)v);
   stream->write(&be, 8);
 }
 
 void WritableUtils::WriteInt(OutputStream * stream, int32_t v) {
-  uint32_t be = hadoop_be32toh((uint32_t)v);
+  uint32_t be = bswap((uint32_t)v);
   stream->write(&be, 4);
 }
 
@@ -249,7 +249,7 @@ void WritableUtils::WriteShort(OutputStream * stream, int16_t v) {
 
 void WritableUtils::WriteFloat(OutputStream * stream, float v) {
   uint32_t intv = *(uint32_t*)&v;
-  intv = hadoop_be32toh(intv);
+  intv = bswap(intv);
   stream->write(&intv, 4);
 }
 
@@ -286,10 +286,10 @@ void WritableUtils::toString(string & dest, KeyValueType type, const void * data
     dest.append(*(uint8_t*)data ? "true" : "false");
     break;
   case IntType:
-    dest.append(StringUtil::ToString((int32_t)hadoop_be32toh(*(uint32_t*)data)));
+    dest.append(StringUtil::ToString((int32_t)bswap(*(uint32_t*)data)));
     break;
   case LongType:
-    dest.append(StringUtil::ToString((int64_t)hadoop_be64toh(*(uint64_t*)data)));
+    dest.append(StringUtil::ToString((int64_t)bswap64(*(uint64_t*)data)));
     break;
   case FloatType:
     dest.append(StringUtil::ToString(*(float*)data));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestIFile.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestIFile.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestIFile.cc
index 93417b4..e1e32d4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestIFile.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestIFile.cc
@@ -190,7 +190,7 @@ TEST(IFile, TestGlibCBug) {
   reader->nextPartition();
   uint32_t index = 0;
   while (NULL != (key = reader->nextKey(length))) {
-    int32_t realKey = (int32_t)hadoop_be32toh(*(uint32_t *)(key));
+    int32_t realKey = (int32_t)bswap(*(uint32_t *)(key));
     ASSERT_LT(index, 5);
     ASSERT_EQ(expect[index], realKey);
     index++;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestSort.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestSort.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestSort.cc
index 6d40dc2..1c391a6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestSort.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/TestSort.cc
@@ -38,8 +38,8 @@ inline int fmemcmporig(const char * src, const char * dest, uint32_t len) {
     uint64_t l = *src8;
     uint64_t r = *dest8;
     if (l != r) {
-      l = hadoop_be64toh(l);
-      r = hadoop_be64toh(r);
+      l = bswap64(l);
+      r = bswap64(r);
       return l > r ? 1 : -1;
     }
     ++src8;
@@ -59,8 +59,8 @@ inline int fmemcmporig(const char * src, const char * dest, uint32_t len) {
   if (l == r) {
     return 0;
   }
-  l = hadoop_be64toh(l);
-  r = hadoop_be64toh(r);
+  l = bswap64(l);
+  r = bswap64(r);
   return l > r ? 1 : -1;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestKVBuffer.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestKVBuffer.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestKVBuffer.cc
index dac79ba..e47e169 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestKVBuffer.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestKVBuffer.cc
@@ -43,8 +43,8 @@ TEST(KVBuffer, test) {
   ASSERT_EQ(8, kv1->getKey() - buff);
   ASSERT_EQ(strlen(KEY) + 8, kv1->getValue() - buff);
 
-  kv1->keyLength = hadoop_be32toh(kv1->keyLength);
-  kv1->valueLength = hadoop_be32toh(kv1->valueLength);
+  kv1->keyLength = bswap(kv1->keyLength);
+  kv1->valueLength = bswap(kv1->valueLength);
 
   ASSERT_EQ(8, kv1->headerLength());
   ASSERT_EQ(strlen(KEY) + strlen(VALUE) + 8, kv1->lengthConvertEndium());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemBlockIterator.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemBlockIterator.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemBlockIterator.cc
index 4025e3c..8d784fb 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemBlockIterator.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemBlockIterator.cc
@@ -59,7 +59,7 @@ class MemoryBlockFactory {
       kv->keyLength = 4;
       kv->valueLength = 4;
       uint32_t * key = (uint32_t *)kv->getKey();
-      *key = hadoop_be32toh(index);
+      *key = bswap(index);
     }
     return block1;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemoryBlock.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemoryBlock.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemoryBlock.cc
index fd9c29b..6af73c5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemoryBlock.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestMemoryBlock.cc
@@ -85,17 +85,17 @@ TEST(MemoryBlock, sort) {
   medium->keyLength = 4;
   medium->valueLength = 4;
   uint32_t * mediumKey = (uint32_t *)medium->getKey();
-  *mediumKey = hadoop_be32toh(MEDIUM);
+  *mediumKey = bswap(MEDIUM);
 
   small->keyLength = 4;
   small->valueLength = 4;
   uint32_t * smallKey = (uint32_t *)small->getKey();
-  *smallKey = hadoop_be32toh(SMALL);
+  *smallKey = bswap(SMALL);
 
   big->keyLength = 4;
   big->valueLength = 4;
   uint32_t * bigKey = (uint32_t *)big->getKey();
-  *bigKey = hadoop_be32toh(BIG);
+  *bigKey = bswap(BIG);
 
   ComparatorPtr bytesComparator = NativeTask::get_comparator(BytesType, NULL);
   block.sort(CPPSORT, bytesComparator);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37c3f41/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestPartitionBucket.cc
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestPartitionBucket.cc b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestPartitionBucket.cc
index d13987a..79e1b5e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestPartitionBucket.cc
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/test/lib/TestPartitionBucket.cc
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-#include "hadoop_endian.h"
+#include "lib/commons.h"
 #include "test_commons.h"
 #include "lib/PartitionBucket.h"
 #include "lib/PartitionBucketIterator.h"
@@ -129,15 +129,15 @@ TEST(PartitionBucket, sort) {
   const uint32_t BIG = 1000;
 
   kv1->keyLength = 4;
-  *((uint32_t *)kv1->getKey()) = hadoop_be32toh(BIG);
+  *((uint32_t *)kv1->getKey()) = bswap(BIG);
   kv1->valueLength = KV_SIZE - kv1->headerLength() - kv1->keyLength;
 
   kv2->keyLength = 4;
-  *((uint32_t *)kv2->getKey()) = hadoop_be32toh(SMALL);
+  *((uint32_t *)kv2->getKey()) = bswap(SMALL);
   kv2->valueLength = KV_SIZE - kv2->headerLength() - kv2->keyLength;
 
   kv3->keyLength = 4;
-  *((uint32_t *)kv3->getKey()) = hadoop_be32toh(MEDIUM);
+  *((uint32_t *)kv3->getKey()) = bswap(MEDIUM);
   kv3->valueLength = KV_SIZE - kv3->headerLength() - kv3->keyLength;
 
   bucket->sort(DUALPIVOTSORT);
@@ -148,13 +148,13 @@ TEST(PartitionBucket, sort) {
   Buffer value;
   iter->next(key, value);
 
-  ASSERT_EQ(SMALL, hadoop_be32toh(*(uint32_t * )key.data()));
+  ASSERT_EQ(SMALL, bswap(*(uint32_t * )key.data()));
 
   iter->next(key, value);
-  ASSERT_EQ(MEDIUM, hadoop_be32toh(*(uint32_t * )key.data()));
+  ASSERT_EQ(MEDIUM, bswap(*(uint32_t * )key.data()));
 
   iter->next(key, value);
-  ASSERT_EQ(BIG, hadoop_be32toh(*(uint32_t * )key.data()));
+  ASSERT_EQ(BIG, bswap(*(uint32_t * )key.data()));
 
   delete iter;
   delete bucket;
@@ -181,15 +181,15 @@ TEST(PartitionBucket, spill) {
   const uint32_t BIG = 1000;
 
   kv1->keyLength = 4;
-  *((uint32_t *)kv1->getKey()) = hadoop_be32toh(BIG);
+  *((uint32_t *)kv1->getKey()) = bswap(BIG);
   kv1->valueLength = KV_SIZE - KVBuffer::headerLength() - kv1->keyLength;
 
   kv2->keyLength = 4;
-  *((uint32_t *)kv2->getKey()) = hadoop_be32toh(SMALL);
+  *((uint32_t *)kv2->getKey()) = bswap(SMALL);
   kv2->valueLength = KV_SIZE - KVBuffer::headerLength() - kv2->keyLength;
 
   kv3->keyLength = 4;
-  *((uint32_t *)kv3->getKey()) = hadoop_be32toh(MEDIUM);
+  *((uint32_t *)kv3->getKey()) = bswap(MEDIUM);
   kv3->valueLength = KV_SIZE - KVBuffer::headerLength() - kv3->keyLength;
 
   bucket->sort(DUALPIVOTSORT);
@@ -203,17 +203,17 @@ TEST(PartitionBucket, spill) {
   KVBuffer * first = (KVBuffer *)writer.buff();
   ASSERT_EQ(4, first->keyLength);
   ASSERT_EQ(KV_SIZE - KVBuffer::headerLength() - 4, first->valueLength);
-  ASSERT_EQ(hadoop_be32toh(SMALL), (*(uint32_t * )(first->getKey())));
+  ASSERT_EQ(bswap(SMALL), (*(uint32_t * )(first->getKey())));
 
   KVBuffer * second = first->next();
   ASSERT_EQ(4, second->keyLength);
   ASSERT_EQ(KV_SIZE - KVBuffer::headerLength() - 4, second->valueLength);
-  ASSERT_EQ(hadoop_be32toh(MEDIUM), (*(uint32_t * )(second->getKey())));
+  ASSERT_EQ(bswap(MEDIUM), (*(uint32_t * )(second->getKey())));
 
   KVBuffer * third = second->next();
   ASSERT_EQ(4, third->keyLength);
   ASSERT_EQ(KV_SIZE - KVBuffer::headerLength() - 4, third->valueLength);
-  ASSERT_EQ(hadoop_be32toh(BIG), (*(uint32_t * )(third->getKey())));
+  ASSERT_EQ(bswap(BIG), (*(uint32_t * )(third->getKey())));
 
   delete [] buff;
   delete bucket;


[04/38] hadoop git commit: Revert "HADOOP-11505. Various native parts use bswap incorrectly and unportably (Alan Burlison via aw)"

Posted by as...@apache.org.
Revert "HADOOP-11505. Various native parts use bswap incorrectly and unportably (Alan Burlison via aw)"

This reverts commit 4c8125d60d47e98b1ec84422888975111e0cbcec.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2577e5b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2577e5b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2577e5b2

Branch: refs/heads/yarn-2877
Commit: 2577e5b2c10d8cbbe3c549cdbd8e671e8830e660
Parents: b2c7853
Author: cnauroth <cn...@apache.org>
Authored: Sat Nov 28 17:20:49 2015 -0800
Committer: cnauroth <cn...@apache.org>
Committed: Sat Nov 28 17:20:49 2015 -0800

----------------------------------------------------------------------
 .../hadoop-common/src/hadoop_endian.h.cmake     | 43 --------------------
 1 file changed, 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2577e5b2/hadoop-common-project/hadoop-common/src/hadoop_endian.h.cmake
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/hadoop_endian.h.cmake b/hadoop-common-project/hadoop-common/src/hadoop_endian.h.cmake
deleted file mode 100644
index b30d9bd..0000000
--- a/hadoop-common-project/hadoop-common/src/hadoop_endian.h.cmake
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-/* Hadoop versions of http://austingroupbugs.net/view.php?id=162#c665 */
-
-#ifndef HADOOP_ENDIAN_H
-#define HADOOP_ENDIAN_H
-
-#include <@HADOOP_ENDIAN_H@>
-
-#define HADOOP_LITTLE_ENDIAN 1234
-#define HADOOP_BIG_ENDIAN    4321
-#cmakedefine HADOOP_BYTE_ORDER @HADOOP_BYTE_ORDER@
-
-#define hadoop_htobe16(X) @HADOOP_HTOBE16@(X)
-#define hadoop_htole16(X) @HADOOP_HTOLE16@(X)
-#define hadoop_be16toh(X) @HADOOP_BE16TOH@(X)
-#define hadoop_le16toh(X) @HADOOP_LE16TOH@(X)
-#define hadoop_htobe32(X) @HADOOP_HTOBE32@(X)
-#define hadoop_htole32(X) @HADOOP_HTOLE32@(X)
-#define hadoop_be32toh(X) @HADOOP_BE32TOH@(X)
-#define hadoop_le32toh(X) @HADOOP_LE32TOH@(X)
-#define hadoop_htobe64(X) @HADOOP_HTOBE64@(X)
-#define hadoop_htole64(X) @HADOOP_HTOLE64@(X)
-#define hadoop_be64toh(X) @HADOOP_BE64TOH@(X)
-#define hadoop_le64toh(X) @HADOOP_LE64TOH@(X)
-
-#endif


[28/38] hadoop git commit: HDFS-9430 Remove waitForLoadingFSImage since checkNNStartup has ensured image loaded and namenode started. (Brahma Reddy Battula via mingma)

Posted by as...@apache.org.
HDFS-9430 Remove waitForLoadingFSImage since checkNNStartup has ensured image loaded and namenode started. (Brahma Reddy Battula via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fa33b5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fa33b5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fa33b5c

Branch: refs/heads/yarn-2877
Commit: 3fa33b5c2c289ceaced30c6c5451f3569110459d
Parents: e84d6ca
Author: Ming Ma <mi...@apache.org>
Authored: Fri Dec 4 09:47:57 2015 -0800
Committer: Ming Ma <mi...@apache.org>
Committed: Fri Dec 4 09:47:57 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/server/namenode/FSNamesystem.java      | 38 --------------------
 2 files changed, 3 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fa33b5c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 40fdc58..17cbe29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2439,6 +2439,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9484. NNThroughputBenchmark$BlockReportStats should not send empty
     block reports. (Mingliang Liu via shv)
 
+    HDFS-9430. Remove waitForLoadingFSImage since checkNNStartup has ensured
+    image loaded and namenode started. (Brahma Reddy Battula via mingma)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fa33b5c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 6af7265..9c9d9f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -564,25 +564,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   /**
-   * Block until the object is imageLoaded to be used.
-   */
-  void waitForLoadingFSImage() {
-    if (!imageLoaded) {
-      writeLock();
-      try {
-        while (!imageLoaded) {
-          try {
-            cond.await(5000, TimeUnit.MILLISECONDS);
-          } catch (InterruptedException ignored) {
-          }
-        }
-      } finally {
-        writeUnlock();
-      }
-    }
-  }
-
-  /**
    * Clear all loaded data
    */
   void clear() {
@@ -1802,7 +1783,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   void concat(String target, String [] srcs, boolean logRetryCache)
       throws IOException {
-    waitForLoadingFSImage();
     HdfsFileStatus stat = null;
     boolean success = false;
     writeLock();
@@ -1899,7 +1879,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     if (!FileSystem.areSymlinksEnabled()) {
       throw new UnsupportedOperationException("Symlinks not supported");
     }
-    waitForLoadingFSImage();
     HdfsFileStatus auditStat = null;
     writeLock();
     try {
@@ -1933,7 +1912,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   boolean setReplication(final String src, final short replication)
       throws IOException {
     boolean success = false;
-    waitForLoadingFSImage();
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -1961,7 +1939,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   void setStoragePolicy(String src, String policyName) throws IOException {
     HdfsFileStatus auditStat;
-    waitForLoadingFSImage();
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -1988,7 +1965,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   BlockStoragePolicy getStoragePolicy(String src) throws IOException {
     checkOperation(OperationCategory.READ);
-    waitForLoadingFSImage();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
@@ -2003,7 +1979,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   BlockStoragePolicy[] getStoragePolicies() throws IOException {
     checkOperation(OperationCategory.READ);
-    waitForLoadingFSImage();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
@@ -2132,7 +2107,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
 
     FSPermissionChecker pc = getPermissionChecker();
-    waitForLoadingFSImage();
 
     /**
      * If the file is in an encryption zone, we optimistically create an
@@ -2414,7 +2388,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: {}  inodeId {}" +
         " for {}", src, fileId, clientName);
 
-    waitForLoadingFSImage();
     LocatedBlock[] onRetryBlock = new LocatedBlock[1];
     FSDirWriteFileOp.ValidateAddBlockResult r;
     FSPermissionChecker pc = getPermissionChecker();
@@ -2525,7 +2498,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       throws IOException {
     NameNode.stateChangeLog.debug(
         "BLOCK* NameSystem.abandonBlock: {} of file {}", b, src);
-    waitForLoadingFSImage();
     checkOperation(OperationCategory.WRITE);
     FSPermissionChecker pc = getPermissionChecker();
     writeLock();
@@ -2593,7 +2565,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     throws IOException {
     boolean success = false;
     checkOperation(OperationCategory.WRITE);
-    waitForLoadingFSImage();
     FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
@@ -2651,7 +2622,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   @Deprecated
   boolean renameTo(String src, String dst, boolean logRetryCache)
       throws IOException {
-    waitForLoadingFSImage();
     FSDirRenameOp.RenameOldResult ret = null;
     writeLock();
     try {
@@ -2676,7 +2646,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   void renameTo(final String src, final String dst,
                 boolean logRetryCache, Options.Rename... options)
       throws IOException {
-    waitForLoadingFSImage();
     Map.Entry<BlocksMapUpdateInfo, HdfsFileStatus> res = null;
     writeLock();
     try {
@@ -2712,7 +2681,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   boolean delete(String src, boolean recursive, boolean logRetryCache)
       throws IOException {
-    waitForLoadingFSImage();
     BlocksMapUpdateInfo toRemovedBlocks = null;
     writeLock();
     boolean ret = false;
@@ -2940,7 +2908,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
 
     FSPermissionChecker pc = getPermissionChecker();
-    waitForLoadingFSImage();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -3193,7 +3160,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     // since we just remove the uc feature from pendingFile
     pendingFile.toCompleteFile(now());
 
-    waitForLoadingFSImage();
     // close file and persist block allocations for this file
     closeFile(src, pendingFile);
 
@@ -3256,7 +3222,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
              + ")");
     checkOperation(OperationCategory.WRITE);
     final String src;
-    waitForLoadingFSImage();
     writeLock();
     boolean copyTruncate = false;
     BlockInfo truncatedBlock = null;
@@ -3602,7 +3567,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   private void closeFile(String path, INodeFile file) {
     assert hasWriteLock();
-    waitForLoadingFSImage();
     // file is closed
     getEditLog().logCloseFile(path, file);
     NameNode.stateChangeLog.debug("closeFile: {} with {} blocks is persisted" +
@@ -4748,7 +4712,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
              + ", newNodes=" + Arrays.asList(newNodes)
              + ", client=" + clientName
              + ")");
-    waitForLoadingFSImage();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -6591,7 +6554,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
     checkOperation(OperationCategory.READ);
-    waitForLoadingFSImage();
     readLock();
     try {
       checkOperation(OperationCategory.READ);


[26/38] hadoop git commit: YARN-4405. Support node label store in non-appendable file system. Contributed by Wangda Tan

Posted by as...@apache.org.
YARN-4405. Support node label store in non-appendable file system. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/755dda8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/755dda8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/755dda8d

Branch: refs/heads/yarn-2877
Commit: 755dda8dd8bb23864abc752bad506f223fcac010
Parents: 924a33d
Author: Jian He <ji...@apache.org>
Authored: Thu Dec 3 17:45:31 2015 -0800
Committer: Jian He <ji...@apache.org>
Committed: Thu Dec 3 17:45:31 2015 -0800

----------------------------------------------------------------------
 .../conf/TestConfigurationFieldsBase.java       | 14 ++---
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |  6 ++
 .../yarn/conf/TestYarnConfigurationFields.java  |  2 +
 .../nodelabels/CommonNodeLabelsManager.java     | 15 ++++-
 .../nodelabels/FileSystemNodeLabelsStore.java   | 65 +++++++++++---------
 .../hadoop/yarn/nodelabels/NodeLabelsStore.java | 16 ++---
 .../src/main/resources/yarn-default.xml         |  8 +++
 .../DummyCommonNodeLabelsManager.java           |  6 +-
 .../TestFileSystemNodeLabelsStore.java          | 28 +++++++--
 .../nodelabels/NullRMNodeLabelsManager.java     |  4 +-
 11 files changed, 106 insertions(+), 61 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/755dda8d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index 2e4d8b1..e528602 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -18,27 +18,22 @@
 
 package org.apache.hadoop.conf;
 
-import java.lang.Class;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+
 import java.lang.reflect.Field;
 import java.lang.reflect.Modifier;
-import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
 import static org.junit.Assert.assertTrue;
 
-import org.apache.hadoop.conf.Configuration;
-
 /**
  * Base class for comparing fields in one or more Configuration classes
  * against a corresponding .xml file.  Usage is intended as follows:
@@ -331,6 +326,7 @@ public abstract class TestConfigurationFieldsBase {
   private static Set<String> compareConfigurationToXmlFields(Map<String,String> keyMap1, Map<String,String> keyMap2) {
     Set<String> retVal = new HashSet<String>(keyMap1.keySet());
     retVal.removeAll(keyMap2.keySet());
+
     return retVal;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/755dda8d/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6043e64..90ada4b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -589,6 +589,9 @@ Release 2.8.0 - UNRELEASED
     YARN-4292. ResourceUtilization should be a part of NodeInfo REST API.
     (Sunil G via wangda)
 
+    YARN-4405. Support node label store in non-appendable file system. (Wangda
+    Tan via jianhe)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/755dda8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f493fd3..cbd28ca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2072,6 +2072,12 @@ public class YarnConfiguration extends Configuration {
    */
   public static final String NODE_LABELS_PREFIX = YARN_PREFIX + "node-labels.";
   
+  /** Node label store implementation class */
+  public static final String FS_NODE_LABELS_STORE_IMPL_CLASS = NODE_LABELS_PREFIX
+      + "fs-store.impl.class";
+  public static final String DEFAULT_FS_NODE_LABELS_STORE_IMPL_CLASS =
+      "org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore";
+  
   /** URI for NodeLabelManager */
   public static final String FS_NODE_LABELS_STORE_ROOT_DIR = NODE_LABELS_PREFIX
       + "fs-store.root-dir";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/755dda8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 97fcfa1..dec5cfb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -49,6 +49,8 @@ public class TestYarnConfigurationFields extends TestConfigurationFieldsBase {
 
     // Specific properties to skip
     configurationPropsToSkipCompare
+        .add(YarnConfiguration.DEFAULT_FS_NODE_LABELS_STORE_IMPL_CLASS);
+    configurationPropsToSkipCompare
         .add(YarnConfiguration.DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS);
     configurationPropsToSkipCompare
         .add(YarnConfiguration.DEFAULT_CLIENT_FAILOVER_PROXY_PROVIDER);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/755dda8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index a00c49d..8b26cc5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -42,6 +42,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -224,10 +225,20 @@ public class CommonNodeLabelsManager extends AbstractService {
     labelCollections.put(NO_LABEL, new RMNodeLabel(NO_LABEL));
   }
 
+  boolean isCentralizedConfiguration() {
+    return isCentralizedNodeLabelConfiguration;
+  }
+
   protected void initNodeLabelStore(Configuration conf) throws Exception {
-    this.store = new FileSystemNodeLabelsStore(this);
+    this.store =
+        ReflectionUtils
+            .newInstance(
+                conf.getClass(YarnConfiguration.FS_NODE_LABELS_STORE_IMPL_CLASS,
+                    FileSystemNodeLabelsStore.class, NodeLabelsStore.class),
+                conf);
+    this.store.setNodeLabelsManager(this);
     this.store.init(conf);
-    this.store.recover(!isCentralizedNodeLabelConfiguration);
+    this.store.recover();
   }
 
   // for UT purpose

http://git-wip-us.apache.org/repos/asf/hadoop/blob/755dda8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
index c9727a2..a65349b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
@@ -52,11 +52,6 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOn
 import com.google.common.collect.Sets;
 
 public class FileSystemNodeLabelsStore extends NodeLabelsStore {
-
-  public FileSystemNodeLabelsStore(CommonNodeLabelsManager mgr) {
-    super(mgr);
-  }
-
   protected static final Log LOG = LogFactory.getLog(FileSystemNodeLabelsStore.class);
 
   protected static final String DEFAULT_DIR_NAME = "node-labels";
@@ -69,8 +64,8 @@ public class FileSystemNodeLabelsStore extends NodeLabelsStore {
 
   Path fsWorkingPath;
   FileSystem fs;
-  FSDataOutputStream editlogOs;
-  Path editLogPath;
+  private FSDataOutputStream editlogOs;
+  private Path editLogPath;
   
   private String getDefaultFSNodeLabelsRootDir() throws IOException {
     // default is in local: /tmp/hadoop-yarn-${user}/node-labels/
@@ -160,12 +155,40 @@ public class FileSystemNodeLabelsStore extends NodeLabelsStore {
       ensureCloseEditlogFile();
     }
   }
+  
+  protected void loadFromMirror(Path newMirrorPath, Path oldMirrorPath)
+      throws IOException {
+    // If mirror.new exists, read from mirror.new,
+    FSDataInputStream is = null;
+    if (fs.exists(newMirrorPath)) {
+      is = fs.open(newMirrorPath);
+    } else if (fs.exists(oldMirrorPath)) {
+      is = fs.open(oldMirrorPath);
+    }
+
+    if (null != is) {
+      List<NodeLabel> labels = new AddToClusterNodeLabelsRequestPBImpl(
+          AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is))
+              .getNodeLabels();
+      mgr.addToCluserNodeLabels(labels);
+
+      if (mgr.isCentralizedConfiguration()) {
+        // Only load node to labels mapping while using centralized configuration
+        Map<NodeId, Set<String>> nodeToLabels =
+            new ReplaceLabelsOnNodeRequestPBImpl(
+                ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is))
+                  .getNodeToLabels();
+        mgr.replaceLabelsOnNode(nodeToLabels);
+      }
+      is.close();
+    }
+  }
 
   /* (non-Javadoc)
    * @see org.apache.hadoop.yarn.nodelabels.NodeLabelsStore#recover(boolean)
    */
   @Override
-  public void recover(boolean ignoreNodeToLabelsMappings) throws YarnException,
+  public void recover() throws YarnException,
       IOException {
     /*
      * Steps of recover
@@ -181,31 +204,13 @@ public class FileSystemNodeLabelsStore extends NodeLabelsStore {
     // Open mirror from serialized file
     Path mirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME);
     Path oldMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".old");
-
-    FSDataInputStream is = null;
-    if (fs.exists(mirrorPath)) {
-      is = fs.open(mirrorPath);
-    } else if (fs.exists(oldMirrorPath)) {
-      is = fs.open(oldMirrorPath);
-    }
-
-    if (null != is) {
-      List<NodeLabel> labels =
-          new AddToClusterNodeLabelsRequestPBImpl(
-              AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is)).getNodeLabels();
-      Map<NodeId, Set<String>> nodeToLabels =
-          new ReplaceLabelsOnNodeRequestPBImpl(
-              ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is))
-              .getNodeToLabels();
-      mgr.addToCluserNodeLabels(labels);
-      mgr.replaceLabelsOnNode(nodeToLabels);
-      is.close();
-    }
+    
+    loadFromMirror(mirrorPath, oldMirrorPath);
 
     // Open and process editlog
     editLogPath = new Path(fsWorkingPath, EDITLOG_FILENAME);
     if (fs.exists(editLogPath)) {
-      is = fs.open(editLogPath);
+      FSDataInputStream is = fs.open(editLogPath);
 
       while (true) {
         try {
@@ -233,7 +238,7 @@ public class FileSystemNodeLabelsStore extends NodeLabelsStore {
                 new ReplaceLabelsOnNodeRequestPBImpl(
                     ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is))
                     .getNodeToLabels();
-            if (!ignoreNodeToLabelsMappings) {
+            if (mgr.isCentralizedConfiguration()) {
               /*
                * In case of Distributed NodeLabels setup,
                * ignoreNodeToLabelsMappings will be set to true and recover will

http://git-wip-us.apache.org/repos/asf/hadoop/blob/755dda8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java
index 46b94fd..aacb920 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java
@@ -31,11 +31,7 @@ import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 
 public abstract class NodeLabelsStore implements Closeable {
-  protected final CommonNodeLabelsManager mgr;
-
-  public NodeLabelsStore(CommonNodeLabelsManager mgr) {
-    this.mgr = mgr;
-  }
+  protected CommonNodeLabelsManager mgr;
   
   /**
    * Store node {@literal ->} label
@@ -62,16 +58,14 @@ public abstract class NodeLabelsStore implements Closeable {
    * ignoreNodeToLabelsMappings will be set to true and recover will be invoked
    * as RM will collect the node labels from NM through registration/HB
    *
-   * @param ignoreNodeToLabelsMappings
    * @throws IOException
    * @throws YarnException
    */
-  public abstract void recover(boolean ignoreNodeToLabelsMappings)
-      throws IOException, YarnException;
+  public abstract void recover() throws IOException, YarnException;
   
   public void init(Configuration conf) throws Exception {}
-  
-  public CommonNodeLabelsManager getNodeLabelsManager() {
-    return mgr;
+
+  public void setNodeLabelsManager(CommonNodeLabelsManager mgr) {
+    this.mgr = mgr;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/755dda8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 9bbdb94..c862ef2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2451,4 +2451,12 @@
     <name>yarn.am.blacklisting.disable-failure-threshold</name>
     <value>0.8f</value>
   </property>
+
+  <property>
+    <description>
+    Choose different implementation of node label's storage
+    </description>
+    <name>yarn.node-labels.fs-store.impl.class</name>
+    <value>org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore</value>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/755dda8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyCommonNodeLabelsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyCommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyCommonNodeLabelsManager.java
index fce663a..64c74c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyCommonNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyCommonNodeLabelsManager.java
@@ -36,10 +36,10 @@ public class DummyCommonNodeLabelsManager extends CommonNodeLabelsManager {
 
   @Override
   public void initNodeLabelStore(Configuration conf) {
-    this.store = new NodeLabelsStore(this) {
+    this.store = new NodeLabelsStore() {
 
       @Override
-      public void recover(boolean ignoreNodeToLabelsMappings)
+      public void recover()
           throws IOException {
       }
 
@@ -65,6 +65,8 @@ public class DummyCommonNodeLabelsManager extends CommonNodeLabelsManager {
         // do nothing 
       }
     };
+
+    this.store.setNodeLabelsManager(this);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/755dda8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java
index 4929f95..82e4e11 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.nodelabels;
 import java.io.File;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
@@ -33,13 +34,17 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.mockito.Mockito;
 
 import com.google.common.collect.ImmutableMap;
-import org.mockito.Mockito;
 
+@RunWith(Parameterized.class)
 public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
   MockNodeLabelManager mgr = null;
   Configuration conf = null;
+  String storeClassName = null;
 
   private static class MockNodeLabelManager extends
       CommonNodeLabelsManager {
@@ -59,8 +64,15 @@ public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
     }
   }
   
-  private FileSystemNodeLabelsStore getStore() {
-    return (FileSystemNodeLabelsStore) mgr.store;
+  public TestFileSystemNodeLabelsStore(String className) {
+    this.storeClassName = className;
+  }
+  
+  @Parameterized.Parameters
+  public static Collection<String[]> getParameters() {
+    return Arrays.asList(
+        new String[][] { { FileSystemNodeLabelsStore.class.getCanonicalName() },
+            { NonAppendableFSNodeLabelStore.class.getCanonicalName() } });
   }
 
   @Before
@@ -68,6 +80,7 @@ public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
     mgr = new MockNodeLabelManager();
     conf = new Configuration();
     conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
+    conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_IMPL_CLASS, storeClassName);
     File tempDir = File.createTempFile("nlb", ".tmp");
     tempDir.delete();
     tempDir.mkdirs();
@@ -80,7 +93,11 @@ public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
 
   @After
   public void after() throws IOException {
-    getStore().fs.delete(getStore().fsWorkingPath, true);
+    if (mgr.store instanceof FileSystemNodeLabelsStore) {
+      FileSystemNodeLabelsStore fsStore =
+          ((FileSystemNodeLabelsStore) mgr.store);
+      fsStore.fs.delete(fsStore.fsWorkingPath, true);
+    }
     mgr.stop();
   }
 
@@ -324,11 +341,12 @@ public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
   @Test
   public void testRootMkdirOnInitStore() throws Exception {
     final FileSystem mockFs = Mockito.mock(FileSystem.class);
-    FileSystemNodeLabelsStore mockStore = new FileSystemNodeLabelsStore(mgr) {
+    FileSystemNodeLabelsStore mockStore = new FileSystemNodeLabelsStore() {
       void setFileSystem(Configuration conf) throws IOException {
         fs = mockFs;
       }
     };
+    mockStore.setNodeLabelsManager(mgr);
     mockStore.fs = mockFs;
     verifyMkdirsCount(mockStore, true, 0);
     verifyMkdirsCount(mockStore, false, 1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/755dda8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NullRMNodeLabelsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NullRMNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NullRMNodeLabelsManager.java
index 2e21d26..bb0b45f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NullRMNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NullRMNodeLabelsManager.java
@@ -37,10 +37,10 @@ public class NullRMNodeLabelsManager extends RMNodeLabelsManager {
 
   @Override
   public void initNodeLabelStore(Configuration conf) {
-    this.store = new NodeLabelsStore(this) {
+    this.store = new NodeLabelsStore() {
 
       @Override
-      public void recover(boolean ignoreNodeToLabelsMappings)
+      public void recover()
           throws IOException {
         // do nothing
       }


[18/38] hadoop git commit: YARN-4398. Remove unnecessary synchronization in RMStateStore. Contributed by Ning Ding

Posted by as...@apache.org.
YARN-4398. Remove unnecessary synchronization in RMStateStore. Contributed by Ning Ding


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b9a5beb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b9a5beb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b9a5beb

Branch: refs/heads/yarn-2877
Commit: 6b9a5beb2b2f9589ef86670f2d763e8488ee5e90
Parents: 53e3bf7
Author: Jian He <ji...@apache.org>
Authored: Wed Dec 2 11:07:18 2015 -0800
Committer: Jian He <ji...@apache.org>
Committed: Wed Dec 2 11:07:18 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                           |  2 ++
 .../org/apache/hadoop/yarn/event/AsyncDispatcher.java     |  5 +----
 .../server/resourcemanager/recovery/RMStateStore.java     | 10 +++++-----
 3 files changed, 8 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b9a5beb/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5992ee8..748a841 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1121,6 +1121,8 @@ Release 2.7.3 - UNRELEASED
     YARN-4380. TestResourceLocalizationService.testDownloadingResourcesOnContainerKill
     fails intermittently. (Varun Saxena via ozawa)
 
+    YARN-4398. Remove unnecessary synchronization in RMStateStore. (Ning Ding via jianhe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b9a5beb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index 6cdfaad..403381b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -64,7 +64,7 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
   // For drainEventsOnStop enabled only, block newly coming events into the
   // queue while stopping.
   private volatile boolean blockNewEvents = false;
-  private EventHandler handlerInstance = null;
+  private final EventHandler handlerInstance = new GenericEventHandler();
 
   private Thread eventHandlingThread;
   protected final Map<Class<? extends Enum>, EventHandler> eventDispatchers;
@@ -224,9 +224,6 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
 
   @Override
   public EventHandler getEventHandler() {
-    if (handlerInstance == null) {
-      handlerInstance = new GenericEventHandler();
-    }
     return handlerInstance;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b9a5beb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index bced5b8..ec42cbe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -737,7 +737,7 @@ public abstract class RMStateStore extends AbstractService {
    * RMAppStoredEvent will be sent on completion to notify the RMApp
    */
   @SuppressWarnings("unchecked")
-  public synchronized void storeNewApplication(RMApp app) {
+  public void storeNewApplication(RMApp app) {
     ApplicationSubmissionContext context = app
                                             .getApplicationSubmissionContext();
     assert context instanceof ApplicationSubmissionContextPBImpl;
@@ -748,7 +748,7 @@ public abstract class RMStateStore extends AbstractService {
   }
 
   @SuppressWarnings("unchecked")
-  public synchronized void updateApplicationState(
+  public void updateApplicationState(
       ApplicationStateData appState) {
     dispatcher.getEventHandler().handle(new RMStateUpdateAppEvent(appState));
   }
@@ -780,7 +780,7 @@ public abstract class RMStateStore extends AbstractService {
    * This does not block the dispatcher threads
    * RMAppAttemptStoredEvent will be sent on completion to notify the RMAppAttempt
    */
-  public synchronized void storeNewApplicationAttempt(RMAppAttempt appAttempt) {
+  public void storeNewApplicationAttempt(RMAppAttempt appAttempt) {
     Credentials credentials = getCredentialsFromAppAttempt(appAttempt);
 
     AggregateAppResourceUsage resUsage =
@@ -798,7 +798,7 @@ public abstract class RMStateStore extends AbstractService {
   }
 
   @SuppressWarnings("unchecked")
-  public synchronized void updateApplicationAttemptState(
+  public void updateApplicationAttemptState(
       ApplicationAttemptStateData attemptState) {
     dispatcher.getEventHandler().handle(
       new RMStateUpdateAppAttemptEvent(attemptState));
@@ -963,7 +963,7 @@ public abstract class RMStateStore extends AbstractService {
    * There is no notification of completion for this operation.
    */
   @SuppressWarnings("unchecked")
-  public synchronized void removeApplication(RMApp app) {
+  public void removeApplication(RMApp app) {
     ApplicationStateData appState =
         ApplicationStateData.newInstance(app.getSubmitTime(),
             app.getStartTime(), app.getApplicationSubmissionContext(),


[31/38] hadoop git commit: HDFS-9267. TestDiskError should get stored replicas through FsDatasetTestUtils. (Lei (Eddy) Xu via Colin P. McCabe)

Posted by as...@apache.org.
HDFS-9267. TestDiskError should get stored replicas through FsDatasetTestUtils. (Lei (Eddy) Xu via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e02bbeb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e02bbeb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e02bbeb8

Branch: refs/heads/yarn-2877
Commit: e02bbeb8862ee5bca572a0252e8ff3a3699eff5a
Parents: cbc7b6b
Author: Colin P. McCabe <cm...@cloudera.com>
Authored: Fri Dec 4 12:15:53 2015 -0800
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Fri Dec 4 12:51:19 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 ++
 .../server/datanode/FsDatasetTestUtils.java     |  6 ++++-
 .../hdfs/server/datanode/TestDiskError.java     |  9 +++-----
 .../fsdataset/impl/FsDatasetImplTestUtils.java  | 23 ++++++++++++++++++++
 4 files changed, 33 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e02bbeb8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9e8b8a9..99aa719c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -883,6 +883,8 @@ Release 2.9.0 - UNRELEASED
   NEW FEATURES
 
   IMPROVEMENTS
+      HDFS-9267. TestDiskError should get stored replicas through
+      FsDatasetTestUtils. (Lei (Eddy) Xu via Colin P. McCabe)
 
   OPTIMIZATIONS
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e02bbeb8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
index fd47705..e89e1f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.Iterator;
 
 /**
  * Provide block access for FsDataset white box tests.
@@ -251,4 +252,7 @@ public interface FsDatasetTestUtils {
    */
   void changeStoredGenerationStamp(ExtendedBlock block, long newGenStamp)
       throws IOException;
-}
+
+  /** Get all stored replicas in the specified block pool. */
+  Iterator<Replica> getStoredReplicas(String bpid) throws IOException;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e02bbeb8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
index cc8566c..55a668b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
@@ -140,7 +140,8 @@ public class TestDiskError {
     cluster.waitActive();
     final int sndNode = 1;
     DataNode datanode = cluster.getDataNodes().get(sndNode);
-    
+    FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(datanode);
+
     // replicate the block to the second datanode
     InetSocketAddress target = datanode.getXferAddress();
     Socket s = new Socket(target.getAddress(), target.getPort());
@@ -161,11 +162,7 @@ public class TestDiskError {
 
     // the temporary block & meta files should be deleted
     String bpid = cluster.getNamesystem().getBlockPoolId();
-    File storageDir = cluster.getInstanceStorageDir(sndNode, 0);
-    File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
-    storageDir = cluster.getInstanceStorageDir(sndNode, 1);
-    File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
-    while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
+    while (utils.getStoredReplicas(bpid).hasNext()) {
       Thread.sleep(100);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e02bbeb8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
index 320ae9f..f67eeb8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
@@ -47,6 +47,9 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.channels.FileChannel;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
 import java.nio.file.Files;
 import java.nio.file.StandardCopyOption;
 import java.util.Random;
@@ -377,4 +380,24 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils {
     Files.move(metaFile.toPath(), newMetaFile.toPath(),
         StandardCopyOption.ATOMIC_MOVE);
   }
+
+  @Override
+  public Iterator<Replica> getStoredReplicas(String bpid) throws IOException {
+    // Reload replicas from the disk.
+    ReplicaMap replicaMap = new ReplicaMap(dataset);
+    try (FsVolumeReferences refs = dataset.getFsVolumeReferences()) {
+      for (FsVolumeSpi vol : refs) {
+        FsVolumeImpl volume = (FsVolumeImpl) vol;
+        volume.getVolumeMap(bpid, replicaMap, dataset.ramDiskReplicaTracker);
+      }
+    }
+
+    // Cast ReplicaInfo to Replica, because ReplicaInfo assumes a file-based
+    // FsVolumeSpi implementation.
+    List<Replica> ret = new ArrayList<>();
+    if (replicaMap.replicas(bpid) != null) {
+      ret.addAll(replicaMap.replicas(bpid));
+    }
+    return ret.iterator();
+  }
 }


[03/38] hadoop git commit: HADOOP-12600. FileContext and AbstractFileSystem should be annotated as a Stable interface. Contributed by Chris Nauroth.

Posted by as...@apache.org.
HADOOP-12600. FileContext and AbstractFileSystem should be annotated as a Stable interface. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2c78536
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2c78536
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2c78536

Branch: refs/heads/yarn-2877
Commit: b2c78536cb55c58e4d4a0ea16f648a34c7e2f88c
Parents: 4c8125d
Author: cnauroth <cn...@apache.org>
Authored: Sat Nov 28 15:40:07 2015 -0800
Committer: cnauroth <cn...@apache.org>
Committed: Sat Nov 28 15:40:07 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java    | 2 +-
 .../src/main/java/org/apache/hadoop/fs/FileContext.java           | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2c78536/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4f35432..3c77099 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -994,6 +994,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-10465. Fix use of generics within SortedMapWritable.
     (Bertrand Dechoux via wheat9)
 
+    HADOOP-12600. FileContext and AbstractFileSystem should be annotated as a
+    Stable interface. (cnauroth)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2c78536/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 2bc3859..1fce04c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -64,7 +64,7 @@ import com.google.common.annotations.VisibleForTesting;
  * to the root of the "this" file system .
  */
 @InterfaceAudience.Public
-@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
+@InterfaceStability.Stable
 public abstract class AbstractFileSystem {
   static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2c78536/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 4dbf9e3..2456154 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -165,7 +165,7 @@ import org.apache.htrace.core.Tracer;
  */
 
 @InterfaceAudience.Public
-@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
+@InterfaceStability.Stable
 public class FileContext {
   
   public static final Log LOG = LogFactory.getLog(FileContext.class);