You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/03/30 21:37:07 UTC

[01/20] hadoop git commit: HADOOP-11639. Clean up Windows native code compilation warnings related to Windows Secure Container Executor. Contributed by Remus Rusanu.

Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 ee3526587 -> 471b1d936


HADOOP-11639. Clean up Windows native code compilation warnings related to Windows Secure Container Executor. Contributed by Remus Rusanu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3d37787
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3d37787
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3d37787

Branch: refs/heads/YARN-2928
Commit: a3d37787f3dd42b30e1666047a1d240761544691
Parents: 597feeb
Author: cnauroth <cn...@apache.org>
Authored: Fri Mar 27 15:03:41 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:46 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../windows_secure_container_executor.c         |  2 +-
 .../hadoop-common/src/main/winutils/client.c    | 17 ++++------
 .../hadoop-common/src/main/winutils/config.cpp  |  2 +-
 .../src/main/winutils/include/winutils.h        | 24 +++++++++++---
 .../src/main/winutils/libwinutils.c             | 18 +++++------
 .../hadoop-common/src/main/winutils/service.c   | 34 ++++++++++----------
 .../src/main/winutils/systeminfo.c              |  3 ++
 .../hadoop-common/src/main/winutils/task.c      | 28 +++++++++-------
 9 files changed, 76 insertions(+), 55 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d37787/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index febbf6b..8643901 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1172,6 +1172,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11691. X86 build of libwinutils is broken.
     (Kiran Kumar M R via cnauroth)
 
+    HADOOP-11639. Clean up Windows native code compilation warnings related to
+    Windows Secure Container Executor. (Remus Rusanu via cnauroth)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d37787/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/yarn/server/nodemanager/windows_secure_container_executor.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/yarn/server/nodemanager/windows_secure_container_executor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/yarn/server/nodemanager/windows_secure_container_executor.c
index 7e65065..b37359d 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/yarn/server/nodemanager/windows_secure_container_executor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/yarn/server/nodemanager/windows_secure_container_executor.c
@@ -409,7 +409,7 @@ Java_org_apache_hadoop_yarn_server_nodemanager_WindowsSecureContainerExecutor_00
 
 done:
   if (path)     (*env)->ReleaseStringChars(env, jpath, path);
-  return hFile;
+  return (jlong) hFile;
 #endif
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d37787/hadoop-common-project/hadoop-common/src/main/winutils/client.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/client.c b/hadoop-common-project/hadoop-common/src/main/winutils/client.c
index 047bfb5..e3a2c37 100644
--- a/hadoop-common-project/hadoop-common/src/main/winutils/client.c
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/client.c
@@ -28,8 +28,6 @@ static ACCESS_MASK CLIENT_MASK = 1;
 VOID ReportClientError(LPWSTR lpszLocation, DWORD dwError) {
   LPWSTR      debugMsg = NULL;
   int         len;
-  WCHAR       hexError[32];
-  HRESULT     hr;
 
   if (IsDebuggerPresent()) {
     len = FormatMessageW(
@@ -49,7 +47,6 @@ DWORD PrepareRpcBindingHandle(
   DWORD       dwError = EXIT_FAILURE;
   RPC_STATUS  status;
   LPWSTR      lpszStringBinding    = NULL;
-  ULONG       ulCode;
   RPC_SECURITY_QOS_V3 qos;
   SID_IDENTIFIER_AUTHORITY authNT = SECURITY_NT_AUTHORITY;
   BOOL rpcBindingInit = FALSE;
@@ -104,7 +101,7 @@ DWORD PrepareRpcBindingHandle(
                   RPC_C_AUTHN_WINNT,              // AuthnSvc
                   NULL,                           // AuthnIdentity (self)
                   RPC_C_AUTHZ_NONE,               // AuthzSvc
-                  &qos);
+                  (RPC_SECURITY_QOS*) &qos);
   if (RPC_S_OK != status) {
     ReportClientError(L"RpcBindingSetAuthInfoEx", status);
     dwError = status;
@@ -375,7 +372,7 @@ DWORD RpcCall_WinutilsCreateFile(
   RpcEndExcept;
 
   if (ERROR_SUCCESS == dwError) {
-    *hFile = response->hFile;
+    *hFile = (HANDLE) response->hFile;
   }
 
 done:
@@ -479,11 +476,11 @@ DWORD RpcCall_TaskCreateAsUser(
     RpcEndExcept;
 
     if (ERROR_SUCCESS == dwError) {
-      *phProcess = response->hProcess;
-      *phThread = response->hThread;
-      *phStdIn = response->hStdIn;
-      *phStdOut = response->hStdOut;
-      *phStdErr = response->hStdErr;
+      *phProcess = (HANDLE) response->hProcess;
+      *phThread = (HANDLE) response->hThread;
+      *phStdIn = (HANDLE) response->hStdIn;
+      *phStdOut = (HANDLE) response->hStdOut;
+      *phStdErr = (HANDLE) response->hStdErr;
     }
 
 done:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d37787/hadoop-common-project/hadoop-common/src/main/winutils/config.cpp
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/config.cpp b/hadoop-common-project/hadoop-common/src/main/winutils/config.cpp
index 1e07b7f..74be689 100644
--- a/hadoop-common-project/hadoop-common/src/main/winutils/config.cpp
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/config.cpp
@@ -18,7 +18,7 @@
 #include "winutils.h"
 #include <string.h>
 #include <stdlib.h>
-#import "msxml6.dll"
+#import "msxml6.dll" exclude("ISequentialStream", "_FILETIME")
 
 #define ERROR_CHECK_HRESULT_DONE(hr, message)                               \
   if (FAILED(hr))  {                                                        \

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d37787/hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h b/hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h
index f72802c..6c33b5a 100644
--- a/hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h
@@ -248,8 +248,8 @@ DWORD BuildServiceSecurityDescriptor(
   __out PSECURITY_DESCRIPTOR*         pSD);
 
 DWORD AddNodeManagerAndUserACEsToObject(
-  __in HANDLE hObject,
-  __in LPWSTR user,
+  __in HANDLE hProcess,
+  __in LPCWSTR user,
   __in ACCESS_MASK accessMask);
 
 
@@ -283,15 +283,29 @@ DWORD RpcCall_WinutilsCreateFile(
   __out HANDLE* hFile);
 
 DWORD RpcCall_WinutilsMoveFile(
-  __in LPCWSTR    sourcePath, 
-  __in LPCWSTR    destinationPath,
-  __in BOOL       replaceExisting);
+  __in int operation,
+  __in LPCWSTR sourcePath, 
+  __in LPCWSTR destinationPath,
+  __in BOOL replaceExisting);
+
 
 DWORD RpcCall_WinutilsDeletePath(
   __in LPCWSTR    path,
   __in BOOL       isDir,
   __out BOOL*     pDeleted);
 
+DWORD RpcCall_WinutilsChown(
+  __in LPCWSTR filePath, 
+  __in_opt LPCWSTR ownerName, 
+  __in_opt LPCWSTR groupName);
+
+DWORD RpcCall_WinutilsMkDir(
+  __in LPCWSTR filePath);
+
+DWORD RpcCall_WinutilsChmod(
+  __in LPCWSTR filePath, 
+  __in int mode);
+
 #ifdef __cplusplus
 }
 #endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d37787/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c b/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c
index 98fe3ab..676f1b2 100644
--- a/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c
@@ -2596,7 +2596,7 @@ LPCWSTR GetSystemTimeString() {
   QueryPerformanceFrequency(&frequency);
 
   qpc = (double) counter.QuadPart / (double) frequency.QuadPart;
-  subSec = ((qpc - (long)qpc) * 1000000);
+  subSec = (int)((qpc - (long)qpc) * 1000000);
 
   hr = StringCbPrintf(buffer, sizeof(buffer), L"%02d:%02d:%02d.%06d", 
     (int)systime.wHour, (int)systime.wMinute, (int)systime.wSecond, (int)subSec);
@@ -2619,7 +2619,7 @@ done:
 //  Native debugger: windbg, ntsd, cdb, visual studio
 //
 VOID LogDebugMessage(LPCWSTR format, ...) {
-  LPWSTR buffer[8192];
+  wchar_t buffer[8192];
   va_list args;
   HRESULT hr;
 
@@ -2657,8 +2657,8 @@ DWORD SplitStringIgnoreSpaceW(
   size_t tokenCount = 0;
   size_t crtSource;
   size_t crtToken = 0;
-  WCHAR* lpwszTokenStart = NULL;
-  WCHAR* lpwszTokenEnd = NULL;
+  const WCHAR* lpwszTokenStart = NULL;
+  const WCHAR* lpwszTokenEnd = NULL;
   WCHAR* lpwszBuffer = NULL;
   size_t tokenLength = 0;
   size_t cchBufferLength = 0;
@@ -2849,7 +2849,7 @@ DWORD BuildServiceSecurityDescriptor(
     }
   }
 
-  pTokenGroup = (PTOKEN_USER) LocalAlloc(LPTR, dwBufferSize);
+  pTokenGroup = (PTOKEN_PRIMARY_GROUP) LocalAlloc(LPTR, dwBufferSize);
   if (NULL == pTokenGroup) {
     dwError = GetLastError();
     LogDebugMessage(L"LocalAlloc:pTokenGroup: %d\n", dwError);
@@ -2870,11 +2870,11 @@ DWORD BuildServiceSecurityDescriptor(
 
   owner.TrusteeForm = TRUSTEE_IS_SID;
   owner.TrusteeType = TRUSTEE_IS_UNKNOWN;
-  owner.ptstrName = (LPCWSTR) pOwner;
+  owner.ptstrName = (LPWSTR) pOwner;
 
   group.TrusteeForm = TRUSTEE_IS_SID;
   group.TrusteeType = TRUSTEE_IS_UNKNOWN;
-  group.ptstrName = (LPCWSTR) pTokenGroup->PrimaryGroup;
+  group.ptstrName = (LPWSTR) pTokenGroup->PrimaryGroup;
 
   eas = (EXPLICIT_ACCESS*) LocalAlloc(LPTR, sizeof(EXPLICIT_ACCESS) * (grantSidCount + denySidCount));
   if (NULL == eas) {
@@ -2890,7 +2890,7 @@ DWORD BuildServiceSecurityDescriptor(
     eas[crt].grfInheritance = NO_INHERITANCE;
     eas[crt].Trustee.TrusteeForm = TRUSTEE_IS_SID;
     eas[crt].Trustee.TrusteeType = TRUSTEE_IS_UNKNOWN;
-    eas[crt].Trustee.ptstrName = (LPCWSTR) pGrantSids[crt];
+    eas[crt].Trustee.ptstrName = (LPWSTR) pGrantSids[crt];
     eas[crt].Trustee.pMultipleTrustee = NULL;
     eas[crt].Trustee.MultipleTrusteeOperation = NO_MULTIPLE_TRUSTEE;
   }
@@ -2902,7 +2902,7 @@ DWORD BuildServiceSecurityDescriptor(
     eas[crt].grfInheritance = NO_INHERITANCE;
     eas[crt].Trustee.TrusteeForm = TRUSTEE_IS_SID;
     eas[crt].Trustee.TrusteeType = TRUSTEE_IS_UNKNOWN;
-    eas[crt].Trustee.ptstrName = (LPCWSTR) pDenySids[crt - grantSidCount];
+    eas[crt].Trustee.ptstrName = (LPWSTR) pDenySids[crt - grantSidCount];
     eas[crt].Trustee.pMultipleTrustee = NULL;
     eas[crt].Trustee.MultipleTrusteeOperation = NO_MULTIPLE_TRUSTEE;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d37787/hadoop-common-project/hadoop-common/src/main/winutils/service.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/service.c b/hadoop-common-project/hadoop-common/src/main/winutils/service.c
index ba35003..fca5dbc 100644
--- a/hadoop-common-project/hadoop-common/src/main/winutils/service.c
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/service.c
@@ -206,7 +206,7 @@ DWORD ValidateConfigurationFile() {
   BOOL daclPresent = FALSE;
   BOOL daclDefaulted = FALSE;
   PACL pDacl = NULL;
-  unsigned int crt = 0, crtSid = 0;
+  DWORD crt = 0;
   WELL_KNOWN_SID_TYPE allowedSidTypes[] = {
     WinLocalSystemSid,
     WinBuiltinAdministratorsSid};
@@ -214,7 +214,6 @@ DWORD ValidateConfigurationFile() {
   DWORD cbSid = SECURITY_MAX_SID_SIZE;
   PSID* allowedSids = NULL; 
   int cAllowedSids = 0;
-  BOOL isSidDefaulted;
   PSID sidOwner = NULL;
   PSID sidGroup = NULL;
 
@@ -324,7 +323,7 @@ DWORD InitJobName() {
   int       crt = 0;
 
   // Services can be restarted
-  if (gJobName) LocalFree(gJobName);
+  if (gJobName) LocalFree((HLOCAL)gJobName);
   gJobName = NULL;
     
   dwError = GetConfigValue(
@@ -382,7 +381,7 @@ DWORD InitLocalDirs() {
   }
 
 done:
-  if (value) LocalFree(value);
+  if (value) LocalFree((HLOCAL)value);
   
   return dwError;
 }
@@ -437,7 +436,7 @@ DWORD ValidateLocalPath(LPCWSTR lpszPath) {
       gLocalDirs[crt], gCchLocalDir[crt],
       NULL, // lpVersionInformation
       NULL, // lpReserved
-      NULL); // lParam
+      (LPARAM) NULL); // lParam
     
     if (0 == compareResult) {
       dwError = GetLastError();
@@ -500,7 +499,7 @@ done:
 // Description:
 //  Service main entry point.
 //
-VOID WINAPI SvcMain() {
+VOID WINAPI SvcMain(DWORD dwArg, LPTSTR* lpszArgv) {
   DWORD dwError = ERROR_SUCCESS;
 
   gSvcStatusHandle = RegisterServiceCtrlHandler( 
@@ -693,15 +692,15 @@ done:
 //
 DWORD AuthInit() {
   DWORD       dwError = ERROR_SUCCESS;
-  int         count = 0;
-  int         crt  = 0;
+  size_t      count = 0;
+  size_t      crt  = 0;
   size_t      len = 0;
   LPCWSTR     value = NULL;
   WCHAR**     tokens = NULL;
   LPWSTR      lpszSD = NULL;
   ULONG       cchSD = 0;
   DWORD       dwBufferSize = 0;
-  int         allowedCount = 0;
+  size_t      allowedCount = 0;
   PSID*       allowedSids = NULL;
   
 
@@ -737,7 +736,7 @@ DWORD AuthInit() {
   
 done:
   if (lpszSD) LocalFree(lpszSD);
-  if (value) LocalFree(value);
+  if (value) LocalFree((HLOCAL)value);
   if (tokens) LocalFree(tokens);
   return dwError;
 }
@@ -1167,11 +1166,12 @@ error_status_t WinutilsCreateProcessAsUser(
   // Note that there are no more API calls, only assignments. A failure could occur only if
   // foced (process kill) or hardware error (faulty memory, processort bit flip etc).
 
-  (*response)->hProcess = hDuplicateProcess;
-  (*response)->hThread = hDuplicateThread;
-  (*response)->hStdIn = hDuplicateStdIn;
-  (*response)->hStdOut = hDuplicateStdOut;
-  (*response)->hStdErr = hDuplicateStdErr;
+  // as MIDL has no 'HANDLE' type, the (LONG_PTR) is used instead
+  (*response)->hProcess = (LONG_PTR)hDuplicateProcess;
+  (*response)->hThread = (LONG_PTR)hDuplicateThread;
+  (*response)->hStdIn = (LONG_PTR)hDuplicateStdIn;
+  (*response)->hStdOut = (LONG_PTR)hDuplicateStdOut;
+  (*response)->hStdErr = (LONG_PTR)hDuplicateStdErr;
 
   fMustCleanupProcess = FALSE;
   
@@ -1276,7 +1276,8 @@ error_status_t WinutilsCreateFile(
     goto done;
   }
 
-  (*response)->hFile = hDuplicateFile;
+  // As MIDL has no 'HANDLE' type, (LONG_PTR) is used instead
+  (*response)->hFile = (LONG_PTR)hDuplicateFile;
   hDuplicateFile = INVALID_HANDLE_VALUE;
 
 done:
@@ -1302,7 +1303,6 @@ error_status_t WinutilsKillTask(
     /* [in] */ handle_t IDL_handle,
     /* [in] */ KILLTASK_REQUEST *request) {
   DWORD dwError = ERROR_SUCCESS;
-  HRESULT hr;
   WCHAR bufferName[MAX_PATH];
 
   dwError = GetSecureJobObjectName(request->taskName, MAX_PATH, bufferName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d37787/hadoop-common-project/hadoop-common/src/main/winutils/systeminfo.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/systeminfo.c b/hadoop-common-project/hadoop-common/src/main/winutils/systeminfo.c
index 7fce424..48f03ed 100644
--- a/hadoop-common-project/hadoop-common/src/main/winutils/systeminfo.c
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/systeminfo.c
@@ -19,6 +19,9 @@
 #include <psapi.h>
 #include <PowrProf.h>
 
+#ifdef PSAPI_VERSION
+#undef PSAPI_VERSION
+#endif
 #define PSAPI_VERSION 1
 #pragma comment(lib, "psapi.lib")
 #pragma comment(lib, "Powrprof.lib")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d37787/hadoop-common-project/hadoop-common/src/main/winutils/task.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/task.c b/hadoop-common-project/hadoop-common/src/main/winutils/task.c
index 37c6ca1..057fd8a 100644
--- a/hadoop-common-project/hadoop-common/src/main/winutils/task.c
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/task.c
@@ -22,6 +22,9 @@
 #include <authz.h>
 #include <sddl.h>
 
+#ifdef PSAPI_VERSION
+#undef PSAPI_VERSION
+#endif
 #define PSAPI_VERSION 1
 #pragma comment(lib, "psapi.lib")
 
@@ -231,7 +234,7 @@ DWORD BuildImpersonateSecurityDescriptor(__out PSECURITY_DESCRIPTOR* ppSD) {
   LocalFree(tokens);
   tokens = NULL;
 
-  LocalFree(value);
+  LocalFree((HLOCAL)value);
   value = NULL;
   
   dwError = GetConfigValue(wsceConfigRelativePath, NM_WSCE_IMPERSONATE_DENIED, &len, &value); 
@@ -298,18 +301,18 @@ done:
 //
 DWORD AddNodeManagerAndUserACEsToObject(
   __in HANDLE hObject,
-  __in LPWSTR user,
+  __in LPCWSTR user,
   __in ACCESS_MASK accessMask) {
 
   DWORD dwError = ERROR_SUCCESS;
-  int         countTokens = 0;
+  size_t      countTokens = 0;
   size_t      len = 0;
   LPCWSTR     value = NULL;
   WCHAR**     tokens = NULL;
-  int         crt = 0;
+  DWORD       crt = 0;
   PACL        pDacl = NULL;
   PSECURITY_DESCRIPTOR  psdProcess = NULL;
-  LPSTR       lpszOldDacl = NULL, lpszNewDacl = NULL;
+  LPWSTR      lpszOldDacl = NULL, lpszNewDacl = NULL;
   ULONG       daclLen = 0;
   PACL        pNewDacl = NULL;
   ACL_SIZE_INFORMATION si;
@@ -381,8 +384,8 @@ DWORD AddNodeManagerAndUserACEsToObject(
   // ACCESS_ALLOWED_ACE struct contains the first DWORD of the SID 
   //
   dwNewAclSize = si.AclBytesInUse + 
-    (countTokens + 1 + sizeof(forcesSidTypes)/sizeof(forcesSidTypes[0])) * 
-      (sizeof(ACCESS_ALLOWED_ACE) + SECURITY_MAX_SID_SIZE - sizeof(DWORD));
+      (DWORD)(countTokens + 1 + sizeof(forcesSidTypes)/sizeof(forcesSidTypes[0])) * 
+              (sizeof(ACCESS_ALLOWED_ACE) + SECURITY_MAX_SID_SIZE - sizeof(DWORD));
 
   pNewDacl = (PSID) LocalAlloc(LPTR, dwNewAclSize);
   if (!pNewDacl) {
@@ -511,7 +514,7 @@ DWORD AddNodeManagerAndUserACEsToObject(
       goto done;
     }
 
-    LogDebugMessage(L"Old DACL: %s\nNew DACL: %s\n", lpszOldDacl, lpszNewDacl);
+    LogDebugMessage(L"Old DACL: %ls\nNew DACL: %ls\n", lpszOldDacl, lpszNewDacl);
   }
   
 done:
@@ -634,7 +637,7 @@ done:
 // Returns:
 // ERROR_SUCCESS: On success
 // GetLastError: otherwise
-DWORD CreateTaskImpl(__in_opt HANDLE logonHandle, __in PCWSTR jobObjName,__in PCWSTR cmdLine, 
+DWORD CreateTaskImpl(__in_opt HANDLE logonHandle, __in PCWSTR jobObjName,__in PWSTR cmdLine, 
   __in LPCWSTR userName, __in long memory, __in long cpuRate)
 {
   DWORD dwErrorCode = ERROR_SUCCESS;
@@ -912,7 +915,7 @@ DWORD CreateTask(__in PCWSTR jobObjName,__in PWSTR cmdLine, __in long memory, __
 // ERROR_SUCCESS: On success
 // GetLastError: otherwise
 DWORD CreateTaskAsUser(__in PCWSTR jobObjName,
-  __in PCWSTR user, __in PCWSTR pidFilePath, __in PCWSTR cmdLine)
+  __in PCWSTR user, __in PCWSTR pidFilePath, __in PWSTR cmdLine)
 {
   DWORD err = ERROR_SUCCESS;
   DWORD exitCode = EXIT_FAILURE;
@@ -923,6 +926,7 @@ DWORD CreateTaskAsUser(__in PCWSTR jobObjName,
   FILE* pidFile = NULL;
   DWORD retLen = 0;
   HANDLE logonHandle = NULL;
+  errno_t pidErrNo = 0;
 
   err = EnableImpersonatePrivileges();
   if( err != ERROR_SUCCESS ) {
@@ -961,8 +965,8 @@ DWORD CreateTaskAsUser(__in PCWSTR jobObjName,
   profileIsLoaded = TRUE; 
 
   // Create the PID file
-
-  if (!(pidFile = _wfopen(pidFilePath, "w"))) {
+  pidErrNo = _wfopen_s(&pidFile, pidFilePath, L"w");
+  if (pidErrNo) {
       err = GetLastError();
       ReportErrorCode(L"_wfopen:pidFilePath", err);
       goto done;


[15/20] hadoop git commit: HDFS-8002. Website refers to /trash directory. Contributd by Brahma Reddy Battula.

Posted by zj...@apache.org.
HDFS-8002. Website refers to /trash directory. Contributd by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6baa8fd2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6baa8fd2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6baa8fd2

Branch: refs/heads/YARN-2928
Commit: 6baa8fd21cbd070a0652983f252fbb30ae90c2b5
Parents: 040fd16
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Mar 31 00:27:50 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:48 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                     | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6baa8fd2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 811ee75..efba80e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -376,6 +376,9 @@ Release 2.8.0 - UNRELEASED
     greater or equal to 1 there is mismatch in the UI report
     (J.Andreina via vinayakumarb)
 
+    HDFS-8002. Website refers to /trash directory. (Brahma Reddy Battula via
+    aajisaka)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6baa8fd2/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
index 87a9fcd..5a8e366 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
@@ -224,9 +224,9 @@ Space Reclamation
 
 ### File Deletes and Undeletes
 
-When a file is deleted by a user or an application, it is not immediately removed from HDFS. Instead, HDFS first renames it to a file in the `/trash` directory. The file can be restored quickly as long as it remains in `/trash`. A file remains in `/trash` for a configurable amount of time. After the expiry of its life in `/trash`, the NameNode deletes the file from the HDFS namespace. The deletion of a file causes the blocks associated with the file to be freed. Note that there could be an appreciable time delay between the time a file is deleted by a user and the time of the corresponding increase in free space in HDFS.
+When a file is deleted by a user or an application, it is not immediately removed from HDFS. Instead, HDFS first renames it to a file in the trash directory(`/user/<username>/.Trash`). The file can be restored quickly as long as it remains in trash. A file remains in trash for a configurable amount of time. After the expiry of its life in trash, the NameNode deletes the file from the HDFS namespace. The deletion of a file causes the blocks associated with the file to be freed. Note that there could be an appreciable time delay between the time a file is deleted by a user and the time of the corresponding increase in free space in HDFS.
 
-A user can Undelete a file after deleting it as long as it remains in the `/trash` directory. If a user wants to undelete a file that he/she has deleted, he/she can navigate the `/trash` directory and retrieve the file. The `/trash` directory contains only the latest copy of the file that was deleted. The `/trash` directory is just like any other directory with one special feature: HDFS applies specified policies to automatically delete files from this directory. Current default trash interval is set to 0 (Deletes file without storing in trash). This value is configurable parameter stored as `fs.trash.interval` stored in core-site.xml.
+A user can Undelete a file after deleting it as long as it remains in the trash directory. If a user wants to undelete a file that he/she has deleted, he/she can navigate the trash directory and retrieve the file. The trash directory contains only the latest copy of the file that was deleted. The trash directory is just like any other directory with one special feature: HDFS applies specified policies to automatically delete files from this directory. Current default trash interval is set to 0 (Deletes file without storing in trash). This value is configurable parameter stored as `fs.trash.interval` stored in core-site.xml.
 
 ### Decrease Replication Factor
 


[10/20] hadoop git commit: HDFS-8004. Use KeyProviderCryptoExtension#warmUpEncryptedKeys when creating an encryption zone. (awang via asuresh)

Posted by zj...@apache.org.
HDFS-8004. Use KeyProviderCryptoExtension#warmUpEncryptedKeys when creating an encryption zone. (awang via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f63bd79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f63bd79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f63bd79

Branch: refs/heads/YARN-2928
Commit: 8f63bd795da85602a1e21c8951fd978cc7e76e77
Parents: a3d3778
Author: Arun Suresh <as...@apache.org>
Authored: Fri Mar 27 19:23:45 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:47 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 3 +++
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f63bd79/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 72ea4fb..af1dd60 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -344,6 +344,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-7990. IBR delete ack should not be delayed. (daryn via kihwal)
 
+    HDFS-8004. Use KeyProviderCryptoExtension#warmUpEncryptedKeys when creating
+    an encryption zone. (awang via asuresh)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f63bd79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 1226a26..d0999b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7957,7 +7957,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         throw new IOException("Key " + keyName + " doesn't exist.");
       }
       // If the provider supports pool for EDEKs, this will fill in the pool
-      generateEncryptedDataEncryptionKey(keyName);
+      provider.warmUpEncryptedKeys(keyName);
       createEncryptionZoneInt(src, metadata.getCipher(),
           keyName, logRetryCache);
     } catch (AccessControlException e) {


[20/20] hadoop git commit: MAPREDUCE-6288. Changed permissions on JobHistory server's done directory so that user's client can load the conf files directly. Contributed by Robert Kanter.

Posted by zj...@apache.org.
MAPREDUCE-6288. Changed permissions on JobHistory server's done directory so that user's client can load the conf files directly. Contributed by Robert Kanter.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c42a674
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c42a674
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c42a674

Branch: refs/heads/YARN-2928
Commit: 5c42a674f8a497159a9cf76b834625f3e2d98122
Parents: 4e4f1b8
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Mon Mar 30 10:27:19 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:49 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  4 ++
 .../v2/jobhistory/JobHistoryUtils.java          |  4 +-
 .../mapreduce/v2/hs/HistoryFileManager.java     | 31 ++++++++-
 .../mapreduce/v2/hs/TestHistoryFileManager.java | 73 ++++++++++++++++++++
 4 files changed, 108 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c42a674/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index b0367a7..69ff96b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -510,6 +510,10 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6285. ClientServiceDelegate should not retry upon
     AuthenticationException. (Jonathan Eagles via ozawa)
 
+    MAPREDUCE-6288. Changed permissions on JobHistory server's done directory
+    so that user's client can load the conf files directly. (Robert Kanter via
+    vinodkv)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c42a674/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
index e279c03..8966e4e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
@@ -72,7 +72,7 @@ public class JobHistoryUtils {
    * Permissions for the history done dir and derivatives.
    */
   public static final FsPermission HISTORY_DONE_DIR_PERMISSION =
-    FsPermission.createImmutable((short) 0770); 
+    FsPermission.createImmutable((short) 0771);
 
   public static final FsPermission HISTORY_DONE_FILE_PERMISSION =
     FsPermission.createImmutable((short) 0770); // rwx------
@@ -81,7 +81,7 @@ public class JobHistoryUtils {
    * Umask for the done dir and derivatives.
    */
   public static final FsPermission HISTORY_DONE_DIR_UMASK = FsPermission
-      .createImmutable((short) (0770 ^ 0777));
+      .createImmutable((short) (0771 ^ 0777));
 
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c42a674/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index 65f8a4f..5377075 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -571,8 +571,10 @@ public class HistoryFileManager extends AbstractService {
           new Path(doneDirPrefix));
       doneDirFc = FileContext.getFileContext(doneDirPrefixPath.toUri(), conf);
       doneDirFc.setUMask(JobHistoryUtils.HISTORY_DONE_DIR_UMASK);
-      mkdir(doneDirFc, doneDirPrefixPath, new FsPermission(
-          JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION));
+      FsPermission doneDirPerm = new FsPermission(
+          JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION);
+      mkdir(doneDirFc, doneDirPrefixPath, doneDirPerm);
+      checkHistoryDirsPermissions(doneDirFc, doneDirPrefixPath, doneDirPerm);
     } catch (ConnectException ex) {
       if (logWait) {
         LOG.info("Waiting for FileSystem at " +
@@ -659,6 +661,31 @@ public class HistoryFileManager extends AbstractService {
     }
   }
 
+  private void checkHistoryDirsPermissions(FileContext fc, Path donePath,
+      FsPermission fsp) throws IOException {
+    FileStatus fsStatus = fc.getFileStatus(donePath);
+    if (fsStatus.getPermission().toShort() != fsp.toShort()) {
+      fc.setPermission(donePath, fsp);
+    }
+    List<FileStatus> dirs = findTimestampedDirectories();
+    for (FileStatus dir : dirs) {
+      setPermission(fc, donePath, fsp, dir);
+    }
+  }
+
+  private void setPermission(FileContext fc, Path donePath, FsPermission fsp,
+       FileStatus dir) throws IOException {
+    FsPermission fsPerm = dir.getPermission();
+    if (fsPerm.toShort() != fsp.toShort()) {
+      fc.setPermission(dir.getPath(), fsp);
+      Path parentPath = dir.getPath().getParent();
+      if (!parentPath.equals(donePath)) {
+        FileStatus parentDir = fc.getFileStatus(parentPath);
+        setPermission(fc, donePath, fsp, parentDir);
+      }
+    }
+  }
+
   /**
    * Populates index data structures. Should only be called at initialization
    * times.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c42a674/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
index e2e943a..032bb72 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
@@ -23,6 +23,9 @@ import java.io.File;
 import java.io.FileOutputStream;
 import java.util.UUID;
 
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -109,6 +112,76 @@ public class TestHistoryFileManager {
   }
 
   @Test
+  public void testUpdateDirPermissions() throws Exception {
+    DistributedFileSystem fs = dfsCluster.getFileSystem();
+    fs.setSafeMode( HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
+    Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
+    Configuration conf = dfsCluster.getConfiguration(0);
+    conf.set(JHAdminConfig.MR_HISTORY_DONE_DIR, getDoneDirNameForTest());
+    conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR, getIntermediateDoneDirNameForTest());
+    Path p1a = new Path(getDoneDirNameForTest(), "2013");
+    Path p1b = new Path(p1a, "02");
+    Path p1c = new Path(p1b, "15");
+    Path p1d = new Path(p1c, "000000");
+    Path p2a = new Path(getDoneDirNameForTest(), "2013");
+    Path p2b = new Path(p2a, "03");
+    Path p2c = new Path(p2b, "14");
+    Path p2d = new Path(p2c, "000001");
+    FsPermission oldPerms = new FsPermission((short) 0770);
+    fs.mkdirs(p1d);
+    fs.mkdirs(p2d);
+    fs.setPermission(p1a, oldPerms);
+    fs.setPermission(p1b, oldPerms);
+    fs.setPermission(p1c, oldPerms);
+    fs.setPermission(p1d, oldPerms);
+    fs.setPermission(p2a, oldPerms);
+    fs.setPermission(p2b, oldPerms);
+    fs.setPermission(p2c, oldPerms);
+    fs.setPermission(p2d, oldPerms);
+    Path p1File = new Path(p1d, "foo.jhist");
+    Assert.assertTrue(fs.createNewFile(p1File));
+    fs.setPermission(p1File, JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION);
+    Path p2File = new Path(p2d, "bar.jhist");
+    Assert.assertTrue(fs.createNewFile(p2File));
+    fs.setPermission(p2File, JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION);
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p1a).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p1b).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p1c).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p1d).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION,
+        fs.getFileStatus(p1File).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p2a).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p2b).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p2c).getPermission());
+    Assert.assertEquals(oldPerms, fs.getFileStatus(p2d).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION,
+        fs.getFileStatus(p2File).getPermission());
+    HistoryFileManager hfm = new HistoryFileManager();
+    hfm.conf = conf;
+    Assert.assertEquals(true, hfm.tryCreatingHistoryDirs(false));
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p1a).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p1b).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p1c).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p1d).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION,
+        fs.getFileStatus(p2File).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p2a).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p2b).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p2c).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION,
+        fs.getFileStatus(p2d).getPermission());
+    Assert.assertEquals(JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION,
+        fs.getFileStatus(p2File).getPermission());
+  }
+
+  @Test
   public void testCreateDirsWithAdditionalFileSystem() throws Exception {
     dfsCluster.getFileSystem().setSafeMode(
         HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);


[19/20] hadoop git commit: HADOOP-11761. Fix findbugs warnings in org.apache.hadoop.security.authentication. Contributed by Li Lu.

Posted by zj...@apache.org.
HADOOP-11761. Fix findbugs warnings in org.apache.hadoop.security.authentication. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e598f8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e598f8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e598f8b

Branch: refs/heads/YARN-2928
Commit: 6e598f8b670e477a69d7a28cb47e1b73d7e8f5f0
Parents: afb05c8
Author: Haohui Mai <wh...@apache.org>
Authored: Mon Mar 30 11:08:54 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:49 2015 -0700

----------------------------------------------------------------------
 .../hadoop-auth/dev-support/findbugsExcludeFile.xml       | 10 ++++++++++
 hadoop-common-project/hadoop-common/CHANGES.txt           |  3 +++
 2 files changed, 13 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e598f8b/hadoop-common-project/hadoop-auth/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-auth/dev-support/findbugsExcludeFile.xml
index 1ecf37a..ddda63c 100644
--- a/hadoop-common-project/hadoop-auth/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-auth/dev-support/findbugsExcludeFile.xml
@@ -34,5 +34,15 @@
     <Method name="getCurrentSecret" />
     <Bug pattern="EI_EXPOSE_REP" />
   </Match>
+  <Match>
+    <Class name="org.apache.hadoop.security.authentication.util.FileSignerSecretProvider" />
+    <Method name="getAllSecrets" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.security.authentication.util.FileSignerSecretProvider" />
+    <Method name="getCurrentSecret" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
 
 </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e598f8b/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8643901..8b59972 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1175,6 +1175,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11639. Clean up Windows native code compilation warnings related to
     Windows Secure Container Executor. (Remus Rusanu via cnauroth)
 
+    HADOOP-11761. Fix findbugs warnings in org.apache.hadoop.security
+    .authentication. (Li Lu via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[13/20] hadoop git commit: HDFS-6408. Remove redundant definitions in log4j.properties. Contributed by Abhiraj Butala.

Posted by zj...@apache.org.
HDFS-6408. Remove redundant definitions in log4j.properties. Contributed by Abhiraj Butala.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53e3d8c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53e3d8c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53e3d8c0

Branch: refs/heads/YARN-2928
Commit: 53e3d8c0025bb2ffafd4896e5dcb3e4abad21921
Parents: e700a4b
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Mar 30 11:25:35 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:48 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                     | 3 +++
 .../src/contrib/bkjournal/src/test/resources/log4j.properties   | 5 -----
 2 files changed, 3 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3d8c0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e026f85..f4991da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -350,6 +350,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-6263. Remove DRFA.MaxBackupIndex config from log4j.properties.
     (Abhiraj Butala via aajisaka)
 
+    HDFS-6408. Remove redundant definitions in log4j.properties.
+    (Abhiraj Butala via aajisaka)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e3d8c0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties
index f66c84b..93c22f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties
@@ -53,8 +53,3 @@ log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p - [%t:%C{
 
 # Max log file size of 10MB
 log4j.appender.ROLLINGFILE.MaxFileSize=10MB
-
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-


[17/20] hadoop git commit: YARN-3304. Cleaning up ResourceCalculatorProcessTree APIs for public use and removing inconsistencies in the default values. Contributed by Junping Du and Karthik Kambatla.

Posted by zj...@apache.org.
YARN-3304. Cleaning up ResourceCalculatorProcessTree APIs for public use and removing inconsistencies in the default values. Contributed by Junping Du and Karthik Kambatla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e4f1b88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e4f1b88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e4f1b88

Branch: refs/heads/YARN-2928
Commit: 4e4f1b88dd29de11b9535194cc05f9db2a5570b1
Parents: 6baa8fd
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Mon Mar 30 10:09:40 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:49 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/mapred/Task.java     | 26 ++++--
 hadoop-yarn-project/CHANGES.txt                 |  4 +
 .../apache/hadoop/yarn/util/CpuTimeTracker.java |  3 +-
 .../yarn/util/ProcfsBasedProcessTree.java       | 80 +++++++++---------
 .../util/ResourceCalculatorProcessTree.java     | 66 ++++++++-------
 .../yarn/util/WindowsBasedProcessTree.java      | 21 +++--
 .../yarn/util/TestProcfsBasedProcessTree.java   | 85 ++++++++++----------
 .../util/TestResourceCalculatorProcessTree.java |  4 +-
 .../yarn/util/TestWindowsBasedProcessTree.java  | 28 +++----
 .../monitor/ContainerMetrics.java               | 12 ++-
 .../monitor/ContainersMonitorImpl.java          | 12 +--
 11 files changed, 187 insertions(+), 154 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e4f1b88/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
index bf5ca22..80881bc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
@@ -171,7 +171,7 @@ abstract public class Task implements Writable, Configurable {
     skipRanges.skipRangeIterator();
 
   private ResourceCalculatorProcessTree pTree;
-  private long initCpuCumulativeTime = 0;
+  private long initCpuCumulativeTime = ResourceCalculatorProcessTree.UNAVAILABLE;
 
   protected JobConf conf;
   protected MapOutputFile mapOutputFile;
@@ -866,13 +866,25 @@ abstract public class Task implements Writable, Configurable {
     }
     pTree.updateProcessTree();
     long cpuTime = pTree.getCumulativeCpuTime();
-    long pMem = pTree.getCumulativeRssmem();
-    long vMem = pTree.getCumulativeVmem();
+    long pMem = pTree.getRssMemorySize();
+    long vMem = pTree.getVirtualMemorySize();
     // Remove the CPU time consumed previously by JVM reuse
-    cpuTime -= initCpuCumulativeTime;
-    counters.findCounter(TaskCounter.CPU_MILLISECONDS).setValue(cpuTime);
-    counters.findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES).setValue(pMem);
-    counters.findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES).setValue(vMem);
+    if (cpuTime != ResourceCalculatorProcessTree.UNAVAILABLE &&
+        initCpuCumulativeTime != ResourceCalculatorProcessTree.UNAVAILABLE) {
+      cpuTime -= initCpuCumulativeTime;
+    }
+    
+    if (cpuTime != ResourceCalculatorProcessTree.UNAVAILABLE) {
+      counters.findCounter(TaskCounter.CPU_MILLISECONDS).setValue(cpuTime);
+    }
+    
+    if (pMem != ResourceCalculatorProcessTree.UNAVAILABLE) {
+      counters.findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES).setValue(pMem);
+    }
+
+    if (vMem != ResourceCalculatorProcessTree.UNAVAILABLE) {
+      counters.findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES).setValue(vMem);
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e4f1b88/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0d07032..3c16f24 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -897,6 +897,10 @@ Release 2.7.0 - UNRELEASED
     YARN-2213. Change proxy-user cookie log in AmIpFilter to DEBUG.
     (Varun Saxena via xgong)
 
+    YARN-3304. Cleaning up ResourceCalculatorProcessTree APIs for public use and
+    removing inconsistencies in the default values. (Junping Du and Karthik
+    Kambatla via vinodkv)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e4f1b88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
index d36848e..b09a4b6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
@@ -26,7 +26,8 @@ import java.math.BigInteger;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class CpuTimeTracker {
-  public static final int UNAVAILABLE = -1;
+  public static final int UNAVAILABLE =
+      ResourceCalculatorProcessTree.UNAVAILABLE;
   final long MINIMUM_UPDATE_INTERVAL;
 
   // CPU used time since system is on (ms)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e4f1b88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 134cec2..9996a79 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -140,7 +140,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
   static private String deadPid = "-1";
   private String pid = deadPid;
   static private Pattern numberPattern = Pattern.compile("[1-9][0-9]*");
-  private Long cpuTime = 0L;
+  private long cpuTime = UNAVAILABLE;
 
   protected Map<String, ProcessInfo> processTree =
     new HashMap<String, ProcessInfo>();
@@ -340,66 +340,53 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
     return ret.toString();
   }
 
-  /**
-   * Get the cumulative virtual memory used by all the processes in the
-   * process-tree that are older than the passed in age.
-   *
-   * @param olderThanAge processes above this age are included in the
-   *                      memory addition
-   * @return cumulative virtual memory used by the process-tree in bytes,
-   *          for processes older than this age.
-   */
   @Override
-  public long getCumulativeVmem(int olderThanAge) {
-    long total = 0;
+  public long getVirtualMemorySize(int olderThanAge) {
+    long total = UNAVAILABLE;
     for (ProcessInfo p : processTree.values()) {
       if ((p != null) && (p.getAge() > olderThanAge)) {
+        if (total == UNAVAILABLE ) {
+          total = 0;
+        }
         total += p.getVmem();
       }
     }
     return total;
   }
 
-  /**
-   * Get the cumulative resident set size (rss) memory used by all the processes
-   * in the process-tree that are older than the passed in age.
-   *
-   * @param olderThanAge processes above this age are included in the
-   *                      memory addition
-   * @return cumulative rss memory used by the process-tree in bytes,
-   *          for processes older than this age. return 0 if it cannot be
-   *          calculated
-   */
   @Override
-  public long getCumulativeRssmem(int olderThanAge) {
+  public long getRssMemorySize(int olderThanAge) {
     if (PAGE_SIZE < 0) {
-      return 0;
+      return UNAVAILABLE;
     }
     if (smapsEnabled) {
-      return getSmapBasedCumulativeRssmem(olderThanAge);
+      return getSmapBasedRssMemorySize(olderThanAge);
     }
+    boolean isAvailable = false;
     long totalPages = 0;
     for (ProcessInfo p : processTree.values()) {
       if ((p != null) && (p.getAge() > olderThanAge)) {
         totalPages += p.getRssmemPage();
+        isAvailable = true;
       }
     }
-    return totalPages * PAGE_SIZE; // convert # pages to byte
+    return isAvailable ? totalPages * PAGE_SIZE : UNAVAILABLE; // convert # pages to byte
   }
 
   /**
-   * Get the cumulative resident set size (RSS) memory used by all the processes
+   * Get the resident set size (RSS) memory used by all the processes
    * in the process-tree that are older than the passed in age. RSS is
    * calculated based on SMAP information. Skip mappings with "r--s", "r-xs"
    * permissions to get real RSS usage of the process.
    *
    * @param olderThanAge
    *          processes above this age are included in the memory addition
-   * @return cumulative rss memory used by the process-tree in bytes, for
-   *         processes older than this age. return 0 if it cannot be calculated
+   * @return rss memory used by the process-tree in bytes, for
+   * processes older than this age. return {@link #UNAVAILABLE} if it cannot
+   * be calculated.
    */
-  private long getSmapBasedCumulativeRssmem(int olderThanAge) {
-    long total = 0;
+  private long getSmapBasedRssMemorySize(int olderThanAge) {
+    long total = UNAVAILABLE;
     for (ProcessInfo p : processTree.values()) {
       if ((p != null) && (p.getAge() > olderThanAge)) {
         ProcessTreeSmapMemInfo procMemInfo = processSMAPTree.get(p.getPid());
@@ -412,6 +399,9 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
                   .equalsIgnoreCase(READ_EXECUTE_WITH_SHARED_PERMISSION)) {
               continue;
             }
+            if (total == UNAVAILABLE){
+              total = 0;
+            }
             total +=
                 Math.min(info.sharedDirty, info.pss) + info.privateDirty
                     + info.privateClean;
@@ -429,30 +419,34 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
         }
       }
     }
-    total = (total * KB_TO_BYTES); // convert to bytes
+    if (total > 0) {
+      total *= KB_TO_BYTES; // convert to bytes
+    }
     LOG.info("SmapBasedCumulativeRssmem (bytes) : " + total);
     return total; // size
   }
 
-  /**
-   * Get the CPU time in millisecond used by all the processes in the
-   * process-tree since the process-tree created
-   *
-   * @return cumulative CPU time in millisecond since the process-tree created
-   *         return 0 if it cannot be calculated
-   */
   @Override
   public long getCumulativeCpuTime() {
     if (JIFFY_LENGTH_IN_MILLIS < 0) {
-      return 0;
+      return UNAVAILABLE;
     }
     long incJiffies = 0;
+    boolean isAvailable = false;
     for (ProcessInfo p : processTree.values()) {
       if (p != null) {
         incJiffies += p.getDtime();
+        // data is available
+        isAvailable = true;
+      }
+    }
+    if (isAvailable) {
+      // reset cpuTime to 0 instead of UNAVAILABLE
+      if (cpuTime == UNAVAILABLE) {
+        cpuTime = 0L;
       }
+      cpuTime += incJiffies * JIFFY_LENGTH_IN_MILLIS;
     }
-    cpuTime += incJiffies * JIFFY_LENGTH_IN_MILLIS;
     return cpuTime;
   }
 
@@ -1031,8 +1025,8 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
     System.out.println("Cpu usage  " + procfsBasedProcessTree
         .getCpuUsagePercent());
     System.out.println("Vmem usage in bytes " + procfsBasedProcessTree
-        .getCumulativeVmem());
+        .getVirtualMemorySize());
     System.out.println("Rss mem usage in bytes " + procfsBasedProcessTree
-        .getCumulativeRssmem());
+        .getRssMemorySize());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e4f1b88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
index 6ee8834..3c4bf52 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
@@ -23,19 +23,23 @@ import java.lang.reflect.Constructor;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 
 /**
  * Interface class to obtain process resource usage
- *
+ * NOTE: This class should not be used by external users, but only by external
+ * developers to extend and include their own process-tree implementation, 
+ * especially for platforms other than Linux and Windows.
  */
 @Public
 @Evolving
 public abstract class ResourceCalculatorProcessTree extends Configured {
   static final Log LOG = LogFactory
       .getLog(ResourceCalculatorProcessTree.class);
+  public static final int UNAVAILABLE = -1;
 
   /**
    * Create process-tree instance with specified root process.
@@ -65,63 +69,64 @@ public abstract class ResourceCalculatorProcessTree extends Configured {
   public abstract String getProcessTreeDump();
 
   /**
-   * Get the cumulative virtual memory used by all the processes in the
+   * Get the virtual memory used by all the processes in the
    * process-tree.
    *
-   * @return cumulative virtual memory used by the process-tree in bytes.
+   * @return virtual memory used by the process-tree in bytes,
+   * {@link #UNAVAILABLE} if it cannot be calculated.
    */
-  public long getCumulativeVmem() {
-    return getCumulativeVmem(0);
+  public long getVirtualMemorySize() {
+    return getVirtualMemorySize(0);
   }
 
   /**
-   * Get the cumulative resident set size (rss) memory used by all the processes
+   * Get the resident set size (rss) memory used by all the processes
    * in the process-tree.
    *
-   * @return cumulative rss memory used by the process-tree in bytes. return 0
-   *         if it cannot be calculated
+   * @return rss memory used by the process-tree in bytes,
+   * {@link #UNAVAILABLE} if it cannot be calculated.
    */
-  public long getCumulativeRssmem() {
-    return getCumulativeRssmem(0);
+  public long getRssMemorySize() {
+    return getRssMemorySize(0);
   }
 
   /**
-   * Get the cumulative virtual memory used by all the processes in the
+   * Get the virtual memory used by all the processes in the
    * process-tree that are older than the passed in age.
    *
    * @param olderThanAge processes above this age are included in the
-   *                      memory addition
-   * @return cumulative virtual memory used by the process-tree in bytes,
-   *          for processes older than this age. return 0 if it cannot be
-   *          calculated
+   *                     memory addition
+   * @return virtual memory used by the process-tree in bytes for
+   * processes older than the specified age, {@link #UNAVAILABLE} if it
+   * cannot be calculated.
    */
-  public long getCumulativeVmem(int olderThanAge) {
-    return 0;
+  public long getVirtualMemorySize(int olderThanAge) {
+    return UNAVAILABLE;
   }
 
   /**
-   * Get the cumulative resident set size (rss) memory used by all the processes
+   * Get the resident set size (rss) memory used by all the processes
    * in the process-tree that are older than the passed in age.
    *
    * @param olderThanAge processes above this age are included in the
-   *                      memory addition
-   * @return cumulative rss memory used by the process-tree in bytes,
-   *          for processes older than this age. return 0 if it cannot be
-   *          calculated
+   *                     memory addition
+   * @return rss memory used by the process-tree in bytes for
+   * processes older than specified age, {@link #UNAVAILABLE} if it cannot be
+   * calculated.
    */
-  public long getCumulativeRssmem(int olderThanAge) {
-    return 0;
+  public long getRssMemorySize(int olderThanAge) {
+    return UNAVAILABLE;
   }
 
   /**
    * Get the CPU time in millisecond used by all the processes in the
    * process-tree since the process-tree was created
    *
-   * @return cumulative CPU time in millisecond since the process-tree created
-   *         return 0 if it cannot be calculated
+   * @return cumulative CPU time in millisecond since the process-tree
+   * created, {@link #UNAVAILABLE} if it cannot be calculated.
    */
   public long getCumulativeCpuTime() {
-    return 0;
+    return UNAVAILABLE;
   }
 
   /**
@@ -129,11 +134,11 @@ public abstract class ResourceCalculatorProcessTree extends Configured {
    * average between samples as a ratio of overall CPU cycles similar to top.
    * Thus, if 2 out of 4 cores are used this should return 200.0.
    *
-   * @return percentage CPU usage since the process-tree was created
-   *         return {@link CpuTimeTracker#UNAVAILABLE} if it cannot be calculated
+   * @return percentage CPU usage since the process-tree was created,
+   * {@link #UNAVAILABLE} if it cannot be calculated.
    */
   public float getCpuUsagePercent() {
-    return -1;
+    return UNAVAILABLE;
   }
 
   /** Verify that the tree process id is same as its process group id.
@@ -153,6 +158,7 @@ public abstract class ResourceCalculatorProcessTree extends Configured {
    * @return ResourceCalculatorProcessTree or null if ResourceCalculatorPluginTree
    *         is not available for this system.
    */
+  @Private
   public static ResourceCalculatorProcessTree getResourceCalculatorProcessTree(
     String pid, Class<? extends ResourceCalculatorProcessTree> clazz, Configuration conf) {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e4f1b88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
index 5c3251f..90426be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
@@ -45,8 +45,8 @@ public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
   }
   
   private String taskProcessId = null;
-  private long cpuTimeMs = 0;
-  private Map<String, ProcessInfo> processTree = 
+  private long cpuTimeMs = UNAVAILABLE;
+  private Map<String, ProcessInfo> processTree =
       new HashMap<String, ProcessInfo>();
     
   public static boolean isAvailable() {
@@ -173,10 +173,13 @@ public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
   }
 
   @Override
-  public long getCumulativeVmem(int olderThanAge) {
-    long total = 0;
+  public long getVirtualMemorySize(int olderThanAge) {
+    long total = UNAVAILABLE;
     for (ProcessInfo p : processTree.values()) {
       if ((p != null) && (p.age > olderThanAge)) {
+        if (total == UNAVAILABLE) {
+          total = 0;
+        }
         total += p.vmem;
       }
     }
@@ -184,10 +187,13 @@ public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
   }
 
   @Override
-  public long getCumulativeRssmem(int olderThanAge) {
-    long total = 0;
+  public long getRssMemorySize(int olderThanAge) {
+    long total = UNAVAILABLE;
     for (ProcessInfo p : processTree.values()) {
       if ((p != null) && (p.age > olderThanAge)) {
+        if (total == UNAVAILABLE) {
+          total = 0;
+        }
         total += p.workingSet;
       }
     }
@@ -197,6 +203,9 @@ public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
   @Override
   public long getCumulativeCpuTime() {
     for (ProcessInfo p : processTree.values()) {
+      if (cpuTimeMs == UNAVAILABLE) {
+        cpuTimeMs = 0;
+      }
       cpuTimeMs += p.cpuTimeMsDelta;
     }
     return cpuTimeMs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e4f1b88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index d62e21d..eeeeb52 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.util;
 
 import static org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.KB_TO_BYTES;
+import static org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree.UNAVAILABLE;
 import static org.junit.Assert.fail;
 import static org.junit.Assume.assumeTrue;
 
@@ -226,8 +227,8 @@ public class TestProcfsBasedProcessTree {
     p.updateProcessTree();
     Assert.assertFalse("ProcessTree must have been gone", isAlive(pid));
     Assert.assertTrue(
-      "Cumulative vmem for the gone-process is " + p.getCumulativeVmem()
-          + " . It should be zero.", p.getCumulativeVmem() == 0);
+      "vmem for the gone-process is " + p.getVirtualMemorySize()
+          + " . It should be zero.", p.getVirtualMemorySize() == 0);
     Assert.assertTrue(p.toString().equals("[ ]"));
   }
 
@@ -429,16 +430,16 @@ public class TestProcfsBasedProcessTree {
       // build the process tree.
       processTree.updateProcessTree();
 
-      // verify cumulative memory
-      Assert.assertEquals("Cumulative virtual memory does not match", 600000L,
-        processTree.getCumulativeVmem());
+      // verify virtual memory
+      Assert.assertEquals("Virtual memory does not match", 600000L,
+        processTree.getVirtualMemorySize());
 
       // verify rss memory
       long cumuRssMem =
           ProcfsBasedProcessTree.PAGE_SIZE > 0
               ? 600L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
-      Assert.assertEquals("Cumulative rss memory does not match", cumuRssMem,
-        processTree.getCumulativeRssmem());
+      Assert.assertEquals("rss memory does not match", cumuRssMem,
+        processTree.getRssMemorySize());
 
       // verify cumulative cpu time
       long cumuCpuTime =
@@ -456,8 +457,8 @@ public class TestProcfsBasedProcessTree {
       setSmapsInProceTree(processTree, true);
       // RSS=Min(shared_dirty,PSS)+PrivateClean+PrivateDirty (exclude r-xs,
       // r--s)
-      Assert.assertEquals("Cumulative rss memory does not match",
-        (100 * KB_TO_BYTES * 3), processTree.getCumulativeRssmem());
+      Assert.assertEquals("rss memory does not match",
+        (100 * KB_TO_BYTES * 3), processTree.getRssMemorySize());
 
       // test the cpu time again to see if it cumulates
       procInfos[0] =
@@ -563,9 +564,9 @@ public class TestProcfsBasedProcessTree {
               new SystemClock());
       setSmapsInProceTree(processTree, smapEnabled);
 
-      // verify cumulative memory
+      // verify virtual memory
       Assert.assertEquals("Cumulative memory does not match", 700000L,
-        processTree.getCumulativeVmem());
+        processTree.getVirtualMemorySize());
       // write one more process as child of 100.
       String[] newPids = { "500" };
       setupPidDirs(procfsRootDir, newPids);
@@ -581,34 +582,34 @@ public class TestProcfsBasedProcessTree {
 
       // check memory includes the new process.
       processTree.updateProcessTree();
-      Assert.assertEquals("Cumulative vmem does not include new process",
-        1200000L, processTree.getCumulativeVmem());
+      Assert.assertEquals("vmem does not include new process",
+        1200000L, processTree.getVirtualMemorySize());
       if (!smapEnabled) {
         long cumuRssMem =
             ProcfsBasedProcessTree.PAGE_SIZE > 0
                 ? 1200L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
-        Assert.assertEquals("Cumulative rssmem does not include new process",
-          cumuRssMem, processTree.getCumulativeRssmem());
+        Assert.assertEquals("rssmem does not include new process",
+          cumuRssMem, processTree.getRssMemorySize());
       } else {
-        Assert.assertEquals("Cumulative rssmem does not include new process",
-          100 * KB_TO_BYTES * 4, processTree.getCumulativeRssmem());
+        Assert.assertEquals("rssmem does not include new process",
+          100 * KB_TO_BYTES * 4, processTree.getRssMemorySize());
       }
 
       // however processes older than 1 iteration will retain the older value
       Assert.assertEquals(
-        "Cumulative vmem shouldn't have included new process", 700000L,
-        processTree.getCumulativeVmem(1));
+        "vmem shouldn't have included new process", 700000L,
+        processTree.getVirtualMemorySize(1));
       if (!smapEnabled) {
         long cumuRssMem =
             ProcfsBasedProcessTree.PAGE_SIZE > 0
                 ? 700L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
         Assert.assertEquals(
-          "Cumulative rssmem shouldn't have included new process", cumuRssMem,
-          processTree.getCumulativeRssmem(1));
+          "rssmem shouldn't have included new process", cumuRssMem,
+          processTree.getRssMemorySize(1));
       } else {
         Assert.assertEquals(
-          "Cumulative rssmem shouldn't have included new process",
-          100 * KB_TO_BYTES * 3, processTree.getCumulativeRssmem(1));
+          "rssmem shouldn't have included new process",
+          100 * KB_TO_BYTES * 3, processTree.getRssMemorySize(1));
       }
 
       // one more process
@@ -629,49 +630,49 @@ public class TestProcfsBasedProcessTree {
 
       // processes older than 2 iterations should be same as before.
       Assert.assertEquals(
-        "Cumulative vmem shouldn't have included new processes", 700000L,
-        processTree.getCumulativeVmem(2));
+        "vmem shouldn't have included new processes", 700000L,
+        processTree.getVirtualMemorySize(2));
       if (!smapEnabled) {
         long cumuRssMem =
             ProcfsBasedProcessTree.PAGE_SIZE > 0
                 ? 700L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
         Assert.assertEquals(
-          "Cumulative rssmem shouldn't have included new processes",
-          cumuRssMem, processTree.getCumulativeRssmem(2));
+          "rssmem shouldn't have included new processes",
+          cumuRssMem, processTree.getRssMemorySize(2));
       } else {
         Assert.assertEquals(
-          "Cumulative rssmem shouldn't have included new processes",
-          100 * KB_TO_BYTES * 3, processTree.getCumulativeRssmem(2));
+          "rssmem shouldn't have included new processes",
+          100 * KB_TO_BYTES * 3, processTree.getRssMemorySize(2));
       }
 
       // processes older than 1 iteration should not include new process,
       // but include process 500
       Assert.assertEquals(
-        "Cumulative vmem shouldn't have included new processes", 1200000L,
-        processTree.getCumulativeVmem(1));
+        "vmem shouldn't have included new processes", 1200000L,
+        processTree.getVirtualMemorySize(1));
       if (!smapEnabled) {
         long cumuRssMem =
             ProcfsBasedProcessTree.PAGE_SIZE > 0
                 ? 1200L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
         Assert.assertEquals(
-          "Cumulative rssmem shouldn't have included new processes",
-          cumuRssMem, processTree.getCumulativeRssmem(1));
+          "rssmem shouldn't have included new processes",
+          cumuRssMem, processTree.getRssMemorySize(1));
       } else {
         Assert.assertEquals(
-          "Cumulative rssmem shouldn't have included new processes",
-          100 * KB_TO_BYTES * 4, processTree.getCumulativeRssmem(1));
+          "rssmem shouldn't have included new processes",
+          100 * KB_TO_BYTES * 4, processTree.getRssMemorySize(1));
       }
 
-      // no processes older than 3 iterations, this should be 0
+      // no processes older than 3 iterations
       Assert.assertEquals(
-        "Getting non-zero vmem for processes older than 3 iterations", 0L,
-        processTree.getCumulativeVmem(3));
+          "Getting non-zero vmem for processes older than 3 iterations",
+          UNAVAILABLE, processTree.getVirtualMemorySize(3));
       Assert.assertEquals(
-        "Getting non-zero rssmem for processes older than 3 iterations", 0L,
-        processTree.getCumulativeRssmem(3));
+          "Getting non-zero rssmem for processes older than 3 iterations",
+          UNAVAILABLE, processTree.getRssMemorySize(3));
       Assert.assertEquals(
-        "Getting non-zero rssmem for processes older than 3 iterations", 0L,
-        processTree.getCumulativeRssmem(3));
+          "Getting non-zero rssmem for processes older than 3 iterations",
+          UNAVAILABLE, processTree.getRssMemorySize(3));
     } finally {
       FileUtil.fullyDelete(procfsRootDir);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e4f1b88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
index eaf7e8e..9bf525c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
@@ -41,11 +41,11 @@ public class TestResourceCalculatorProcessTree {
       return "Empty tree for testing";
     }
 
-    public long getCumulativeRssmem(int age) {
+    public long getRssMemorySize(int age) {
       return 0;
     }
 
-    public long getCumulativeVmem(int age) {
+    public long getVirtualMemorySize(int age) {
       return 0;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e4f1b88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java
index d5b5c37..2a208a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java
@@ -53,26 +53,26 @@ public class TestWindowsBasedProcessTree {
     WindowsBasedProcessTreeTester pTree = new WindowsBasedProcessTreeTester("-1");
     pTree.infoStr = "3524,1024,1024,500\r\n2844,1024,1024,500\r\n";
     pTree.updateProcessTree();
-    assertTrue(pTree.getCumulativeVmem() == 2048);
-    assertTrue(pTree.getCumulativeVmem(0) == 2048);
-    assertTrue(pTree.getCumulativeRssmem() == 2048);
-    assertTrue(pTree.getCumulativeRssmem(0) == 2048);
+    assertTrue(pTree.getVirtualMemorySize() == 2048);
+    assertTrue(pTree.getVirtualMemorySize(0) == 2048);
+    assertTrue(pTree.getRssMemorySize() == 2048);
+    assertTrue(pTree.getRssMemorySize(0) == 2048);
     assertTrue(pTree.getCumulativeCpuTime() == 1000);
 
     pTree.infoStr = "3524,1024,1024,1000\r\n2844,1024,1024,1000\r\n1234,1024,1024,1000\r\n";
     pTree.updateProcessTree();
-    assertTrue(pTree.getCumulativeVmem() == 3072);
-    assertTrue(pTree.getCumulativeVmem(1) == 2048);
-    assertTrue(pTree.getCumulativeRssmem() == 3072);
-    assertTrue(pTree.getCumulativeRssmem(1) == 2048);
-    assertTrue(pTree.getCumulativeCpuTime() == 3000);    
+    assertTrue(pTree.getVirtualMemorySize() == 3072);
+    assertTrue(pTree.getVirtualMemorySize(1) == 2048);
+    assertTrue(pTree.getRssMemorySize() == 3072);
+    assertTrue(pTree.getRssMemorySize(1) == 2048);
+    assertTrue(pTree.getCumulativeCpuTime() == 3000);
 
     pTree.infoStr = "3524,1024,1024,1500\r\n2844,1024,1024,1500\r\n";
     pTree.updateProcessTree();
-    assertTrue(pTree.getCumulativeVmem() == 2048);
-    assertTrue(pTree.getCumulativeVmem(2) == 2048);
-    assertTrue(pTree.getCumulativeRssmem() == 2048);
-    assertTrue(pTree.getCumulativeRssmem(2) == 2048);
-    assertTrue(pTree.getCumulativeCpuTime() == 4000);    
+    assertTrue(pTree.getVirtualMemorySize() == 2048);
+    assertTrue(pTree.getVirtualMemorySize(2) == 2048);
+    assertTrue(pTree.getRssMemorySize() == 2048);
+    assertTrue(pTree.getRssMemorySize(2) == 2048);
+    assertTrue(pTree.getCumulativeCpuTime() == 4000);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e4f1b88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
index 1375da8..ffa72a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
@@ -188,13 +188,19 @@ public class ContainerMetrics implements MetricsSource {
   }
 
   public void recordMemoryUsage(int memoryMBs) {
-    this.pMemMBsStat.add(memoryMBs);
+    if (memoryMBs >= 0) {
+      this.pMemMBsStat.add(memoryMBs);
+    }
   }
 
   public void recordCpuUsage(
       int totalPhysicalCpuPercent, int milliVcoresUsed) {
-    this.cpuCoreUsagePercent.add(totalPhysicalCpuPercent);
-    this.milliVcoresUsed.add(milliVcoresUsed);
+    if (totalPhysicalCpuPercent >=0) {
+      this.cpuCoreUsagePercent.add(totalPhysicalCpuPercent);
+    }
+    if (milliVcoresUsed >= 0) {
+      this.milliVcoresUsed.add(milliVcoresUsed);
+    }
   }
 
   public void recordProcessId(String processId) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e4f1b88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index b587e46..5153051 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -333,10 +333,10 @@ public class ContainersMonitorImpl extends AbstractService implements
   // method provided just for easy testing purposes
   boolean isProcessTreeOverLimit(ResourceCalculatorProcessTree pTree,
       String containerId, long limit) {
-    long currentMemUsage = pTree.getCumulativeVmem();
+    long currentMemUsage = pTree.getVirtualMemorySize();
     // as processes begin with an age 1, we want to see if there are processes
     // more than 1 iteration old.
-    long curMemUsageOfAgedProcesses = pTree.getCumulativeVmem(1);
+    long curMemUsageOfAgedProcesses = pTree.getVirtualMemorySize(1);
     return isProcessTreeOverLimit(containerId, currentMemUsage,
                                   curMemUsageOfAgedProcesses, limit);
   }
@@ -437,8 +437,8 @@ public class ContainersMonitorImpl extends AbstractService implements
                 + " ContainerId = " + containerId);
             ResourceCalculatorProcessTree pTree = ptInfo.getProcessTree();
             pTree.updateProcessTree();    // update process-tree
-            long currentVmemUsage = pTree.getCumulativeVmem();
-            long currentPmemUsage = pTree.getCumulativeRssmem();
+            long currentVmemUsage = pTree.getVirtualMemorySize();
+            long currentPmemUsage = pTree.getRssMemorySize();
             // if machine has 6 cores and 3 are used,
             // cpuUsagePercentPerCore should be 300% and
             // cpuUsageTotalCoresPercentage should be 50%
@@ -451,8 +451,8 @@ public class ContainersMonitorImpl extends AbstractService implements
                 * maxVCoresAllottedForContainers /nodeCpuPercentageForYARN);
             // as processes begin with an age 1, we want to see if there
             // are processes more than 1 iteration old.
-            long curMemUsageOfAgedProcesses = pTree.getCumulativeVmem(1);
-            long curRssMemUsageOfAgedProcesses = pTree.getCumulativeRssmem(1);
+            long curMemUsageOfAgedProcesses = pTree.getVirtualMemorySize(1);
+            long curRssMemUsageOfAgedProcesses = pTree.getRssMemorySize(1);
             long vmemLimit = ptInfo.getVmemLimit();
             long pmemLimit = ptInfo.getPmemLimit();
             LOG.info(String.format(


[11/20] hadoop git commit: HDFS-7890. Improve information on Top users for metrics in RollingWindowsManager and lower log level (Contributed by J.Andreina)

Posted by zj...@apache.org.
HDFS-7890. Improve information on Top users for metrics in RollingWindowsManager and lower log level (Contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc441418
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc441418
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc441418

Branch: refs/heads/YARN-2928
Commit: dc441418edd626bf67e8019cb6c8ee7bd5a29a62
Parents: 53e3d8c
Author: Vinayakumar B <vi...@apache.org>
Authored: Mon Mar 30 10:02:48 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:48 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 3 +++
 .../hdfs/server/namenode/top/window/RollingWindowManager.java     | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc441418/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f4991da..9b1cc3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -353,6 +353,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-6408. Remove redundant definitions in log4j.properties.
     (Abhiraj Butala via aajisaka)
 
+    HDFS-7890. Improve information on Top users for metrics in
+    RollingWindowsManager and lower log level (J.Andreina via vinayakumarb)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc441418/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
index 00e7087..4759cc8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
@@ -245,7 +245,7 @@ public class RollingWindowManager {
           metricName, userName, windowSum);
       topN.offer(new NameValuePair(userName, windowSum));
     }
-    LOG.info("topN size for command {} is: {}", metricName, topN.size());
+    LOG.debug("topN users size for command {} is: {}", metricName, topN.size());
     return topN;
   }
 


[14/20] hadoop git commit: HDFS-4396. Add START_MSG/SHUTDOWN_MSG for ZKFC. Contributed by Liang Xie.

Posted by zj...@apache.org.
HDFS-4396. Add START_MSG/SHUTDOWN_MSG for ZKFC. Contributed by Liang Xie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bfe248d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bfe248d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bfe248d

Branch: refs/heads/YARN-2928
Commit: 1bfe248dae743b7b045c9b5363f4ebe6757b7db7
Parents: dc44141
Author: Harsh J <ha...@cloudera.com>
Authored: Mon Mar 30 15:21:18 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:48 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 3 +++
 .../org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java     | 2 ++
 2 files changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bfe248d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9b1cc3e..f437ad8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -323,6 +323,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    HDFS-4396. Add START_MSG/SHUTDOWN_MSG for ZKFC
+    (Liang Xie via harsh)
+
     HDFS-7875. Improve log message when wrong value configured for
     dfs.datanode.failed.volumes.tolerated.
     (nijel via harsh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bfe248d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
index 85f77f1..4e256a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
@@ -167,6 +167,8 @@ public class DFSZKFailoverController extends ZKFailoverController {
 
   public static void main(String args[])
       throws Exception {
+    StringUtils.startupShutdownMessage(DFSZKFailoverController.class,
+        args, LOG);
     if (DFSUtil.parseHelpArgument(args, 
         ZKFailoverController.USAGE, System.out, true)) {
       System.exit(0);


[04/20] hadoop git commit: HADOOP-11760. Fix typo of javadoc in DistCp. Contributed by Brahma Reddy Battula.

Posted by zj...@apache.org.
HADOOP-11760. Fix typo of javadoc in DistCp. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a639fdd4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a639fdd4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a639fdd4

Branch: refs/heads/YARN-2928
Commit: a639fdd43b6ade9637e18c30e3e2dfb5d940ceb2
Parents: f402f6d
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Fri Mar 27 23:15:51 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:46 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../src/main/java/org/apache/hadoop/tools/DistCp.java             | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a639fdd4/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a7d4adc..febbf6b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -481,6 +481,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11724. DistCp throws NPE when the target directory is root.
     (Lei Eddy Xu via Yongjun Zhang) 
 
+    HADOOP-11760. Fix typo of javadoc in DistCp. (Brahma Reddy Battula via
+    ozawa).
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a639fdd4/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index ada4b25..6921a1e 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -401,7 +401,7 @@ public class DistCp extends Configured implements Tool {
    * job staging directory
    *
    * @return Returns the working folder information
-   * @throws Exception - EXception if any
+   * @throws Exception - Exception if any
    */
   private Path createMetaFolderPath() throws Exception {
     Configuration configuration = getConf();


[02/20] hadoop git commit: MAPREDUCE-6294. Remove an extra parameter described in Javadoc of TockenCache. Contributed by Brahma Reddy Battula.

Posted by zj...@apache.org.
MAPREDUCE-6294. Remove an extra parameter described in Javadoc of TockenCache. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/597feebe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/597feebe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/597feebe

Branch: refs/heads/YARN-2928
Commit: 597feebeb81a715874fcd4c62dc84ad37a5e49d2
Parents: a639fdd
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Sat Mar 28 00:08:35 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:46 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                              | 3 +++
 .../java/org/apache/hadoop/mapreduce/security/TokenCache.java     | 1 -
 2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/597feebe/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 9d6f1d4..ce16510 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -305,6 +305,9 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-6242. Progress report log is incredibly excessive in 
     application master. (Varun Saxena via devaraj)
 
+    MAPREDUCE-6294. Remove an extra parameter described in Javadoc of
+    TockenCache. (Brahma Reddy Battula via ozawa)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/597feebe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
index 7b1f657..6c0de1b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
@@ -105,7 +105,6 @@ public class TokenCache {
    * get delegation token for a specific FS
    * @param fs
    * @param credentials
-   * @param p
    * @param conf
    * @throws IOException
    */


[05/20] hadoop git commit: HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs. Contributed by Gautam Gopalakrishnan.

Posted by zj...@apache.org.
HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs. Contributed by Gautam Gopalakrishnan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d4d6150
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d4d6150
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d4d6150

Branch: refs/heads/YARN-2928
Commit: 7d4d6150f8c81a242f7676e27d65db9f31136007
Parents: 74e941d
Author: Harsh J <ha...@cloudera.com>
Authored: Sun Mar 29 00:45:01 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:47 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hdfs/server/namenode/FSNamesystem.java      |  2 +-
 .../namenode/metrics/TestNameNodeMetrics.java   | 84 ++++++++++++++++++++
 3 files changed, 88 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d4d6150/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f7cc2bc..496db06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -351,6 +351,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+    HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
+    (Gautam Gopalakrishnan via harsh)
+
     HDFS-5356. MiniDFSCluster should close all open FileSystems when shutdown()
     (Rakesh R via vinayakumarb)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d4d6150/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index d0999b8..0e0f484 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4784,7 +4784,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   @Metric({"TransactionsSinceLastCheckpoint",
       "Number of transactions since last checkpoint"})
   public long getTransactionsSinceLastCheckpoint() {
-    return getEditLog().getLastWrittenTxId() -
+    return getFSImage().getLastAppliedOrWrittenTxId() -
         getFSImage().getStorage().getMostRecentCheckpointTxId();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d4d6150/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 011db3c..64ea1e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -22,12 +22,16 @@ import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.Random;
+import com.google.common.collect.ImmutableList;
+import com.google.common.io.Files;
 
+import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
@@ -39,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@@ -47,7 +52,9 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsSource;
@@ -69,6 +76,7 @@ public class TestNameNodeMetrics {
     new Path("/testNameNodeMetrics");
   private static final String NN_METRICS = "NameNodeActivity";
   private static final String NS_METRICS = "FSNamesystem";
+  public static final Log LOG = LogFactory.getLog(TestNameNodeMetrics.class);
   
   // Number of datanodes in the cluster
   private static final int DATANODE_COUNT = 3; 
@@ -400,6 +408,82 @@ public class TestNameNodeMetrics {
   }
   
   /**
+   * Testing TransactionsSinceLastCheckpoint. Need a new cluster as
+   * the other tests in here don't use HA. See HDFS-7501.
+   */
+  @Test(timeout = 300000)
+  public void testTransactionSinceLastCheckpointMetrics() throws Exception {
+    Random random = new Random();
+    int retryCount = 0;
+    while (retryCount < 5) {
+      try {
+        int basePort = 10060 + random.nextInt(100) * 2;
+        MiniDFSNNTopology topology = new MiniDFSNNTopology()
+            .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+            .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
+            .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));
+
+        HdfsConfiguration conf2 = new HdfsConfiguration();
+        // Lower the checkpoint condition for purpose of testing.
+        conf2.setInt(
+            DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
+            100);
+        // Check for checkpoint condition very often, for purpose of testing.
+        conf2.setInt(
+            DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,
+            1);
+        // Poll and follow ANN txns very often, for purpose of testing.
+        conf2.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+        MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf2)
+            .nnTopology(topology).numDataNodes(1).build();
+        cluster2.waitActive();
+        DistributedFileSystem fs2 = cluster2.getFileSystem(0);
+        NameNode nn0 = cluster2.getNameNode(0);
+        NameNode nn1 = cluster2.getNameNode(1);
+        cluster2.transitionToActive(0);
+        fs2.mkdirs(new Path("/tmp-t1"));
+        fs2.mkdirs(new Path("/tmp-t2"));
+        HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
+        // Test to ensure tracking works before the first-ever
+        // checkpoint.
+        assertEquals("SBN failed to track 2 transactions pre-checkpoint.",
+            4L, // 2 txns added further when catch-up is called.
+            cluster2.getNameNode(1).getNamesystem()
+              .getTransactionsSinceLastCheckpoint());
+        // Complete up to the boundary required for
+        // an auto-checkpoint. Using 94 to expect fsimage
+        // rounded at 100, as 4 + 94 + 2 (catch-up call) = 100.
+        for (int i = 1; i <= 94; i++) {
+          fs2.mkdirs(new Path("/tmp-" + i));
+        }
+        HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
+        // Assert 100 transactions in checkpoint.
+        HATestUtil.waitForCheckpoint(cluster2, 1, ImmutableList.of(100));
+        // Test to ensure number tracks the right state of
+        // uncheckpointed edits, and does not go negative
+        // (as fixed in HDFS-7501).
+        assertEquals("Should be zero right after the checkpoint.",
+            0L,
+            cluster2.getNameNode(1).getNamesystem()
+              .getTransactionsSinceLastCheckpoint());
+        fs2.mkdirs(new Path("/tmp-t3"));
+        fs2.mkdirs(new Path("/tmp-t4"));
+        HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
+        // Test to ensure we track the right numbers after
+        // the checkpoint resets it to zero again.
+        assertEquals("SBN failed to track 2 added txns after the ckpt.",
+            4L,
+            cluster2.getNameNode(1).getNamesystem()
+              .getTransactionsSinceLastCheckpoint());
+        cluster2.shutdown();
+        break;
+      } catch (Exception e) {
+        LOG.warn("Unable to set up HA cluster, exception thrown: " + e);
+        retryCount++;
+      }
+    }
+  }
+  /**
    * Test NN checkpoint and transaction-related metrics.
    */
   @Test


[08/20] hadoop git commit: YARN-3288. Document and fix indentation in the DockerContainerExecutor code

Posted by zj...@apache.org.
YARN-3288. Document and fix indentation in the DockerContainerExecutor code


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74e941da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74e941da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74e941da

Branch: refs/heads/YARN-2928
Commit: 74e941daeb7c8d2d60e4949364a0fcdf2983fe04
Parents: fa7cc99
Author: Ravi Prakash <ra...@altiscale.com>
Authored: Sat Mar 28 08:00:41 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:47 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   2 +
 .../server/nodemanager/ContainerExecutor.java   |  18 +-
 .../nodemanager/DockerContainerExecutor.java    | 229 +++++++++++--------
 .../launcher/ContainerLaunch.java               |   8 +-
 .../TestDockerContainerExecutor.java            |  98 ++++----
 .../TestDockerContainerExecutorWithMocks.java   | 110 +++++----
 6 files changed, 277 insertions(+), 188 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e941da/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cd39b1a..0d07032 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -131,6 +131,8 @@ Release 2.8.0 - UNRELEASED
 
     YARN-3397. yarn rmadmin should skip -failover. (J.Andreina via kasha)
 
+    YARN-3288. Document and fix indentation in the DockerContainerExecutor code
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e941da/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 377fd1d..1c670a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -210,8 +210,22 @@ public abstract class ContainerExecutor implements Configurable {
     }
   }
 
-  public void writeLaunchEnv(OutputStream out, Map<String, String> environment, Map<Path, List<String>> resources, List<String> command) throws IOException{
-    ContainerLaunch.ShellScriptBuilder sb = ContainerLaunch.ShellScriptBuilder.create();
+  /**
+   * This method writes out the launch environment of a container. This can be
+   * overridden by extending ContainerExecutors to provide different behaviors
+   * @param out the output stream to which the environment is written (usually
+   * a script file which will be executed by the Launcher)
+   * @param environment The environment variables and their values
+   * @param resources The resources which have been localized for this container
+   * Symlinks will be created to these localized resources
+   * @param command The command that will be run.
+   * @throws IOException if any errors happened writing to the OutputStream,
+   * while creating symlinks
+   */
+  public void writeLaunchEnv(OutputStream out, Map<String, String> environment,
+    Map<Path, List<String>> resources, List<String> command) throws IOException{
+    ContainerLaunch.ShellScriptBuilder sb =
+      ContainerLaunch.ShellScriptBuilder.create();
     if (environment != null) {
       for (Map.Entry<String,String> env : environment.entrySet()) {
         sb.env(env.getKey().toString(), env.getValue().toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e941da/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
index c854173..71eaa04 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
@@ -18,10 +18,24 @@
 
 package org.apache.hadoop.yarn.server.nodemanager;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
+import static org.apache.hadoop.fs.CreateFlag.CREATE;
+import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.regex.Pattern;
 
 import org.apache.commons.lang.math.RandomUtils;
 import org.apache.commons.logging.Log;
@@ -45,38 +59,35 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.Conta
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.regex.Pattern;
-import java.net.InetSocketAddress;
-import static org.apache.hadoop.fs.CreateFlag.CREATE;
-import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 
 /**
- * This executor will launch a docker container and run the task inside the container.
+ * This executor will launch and run tasks inside Docker containers. It
+ * currently only supports simple authentication mode. It shares a lot of code
+ * with the DefaultContainerExecutor (and it may make sense to pull out those
+ * common pieces later).
  */
 public class DockerContainerExecutor extends ContainerExecutor {
-
   private static final Log LOG = LogFactory
-      .getLog(DockerContainerExecutor.class);
-  public static final String DOCKER_CONTAINER_EXECUTOR_SCRIPT = "docker_container_executor";
-  public static final String DOCKER_CONTAINER_EXECUTOR_SESSION_SCRIPT = "docker_container_executor_session";
-
-  // This validates that the image is a proper docker image and would not crash docker.
-  public static final String DOCKER_IMAGE_PATTERN = "^(([\\w\\.-]+)(:\\d+)*\\/)?[\\w\\.:-]+$";
-
+    .getLog(DockerContainerExecutor.class);
+  //The name of the script file that will launch the Docker containers
+  public static final String DOCKER_CONTAINER_EXECUTOR_SCRIPT =
+    "docker_container_executor";
+  //The name of the session script that the DOCKER_CONTAINER_EXECUTOR_SCRIPT
+  //launches in turn
+  public static final String DOCKER_CONTAINER_EXECUTOR_SESSION_SCRIPT =
+    "docker_container_executor_session";
+
+  //This validates that the image is a proper docker image and would not crash
+  //docker. The image name is not allowed to contain spaces. e.g.
+  //registry.somecompany.com:9999/containername:0.1 or
+  //containername:0.1 or
+  //containername
+  public static final String DOCKER_IMAGE_PATTERN =
+    "^(([\\w\\.-]+)(:\\d+)*\\/)?[\\w\\.:-]+$";
 
   private final FileContext lfs;
   private final Pattern dockerImagePattern;
@@ -96,23 +107,26 @@ public class DockerContainerExecutor extends ContainerExecutor {
 
   @Override
   public void init() throws IOException {
-    String auth = getConf().get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);
+    String auth =
+      getConf().get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);
     if (auth != null && !auth.equals("simple")) {
-      throw new IllegalStateException("DockerContainerExecutor only works with simple authentication mode");
+      throw new IllegalStateException(
+        "DockerContainerExecutor only works with simple authentication mode");
     }
-    String dockerExecutor = getConf().get(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME,
+    String dockerExecutor = getConf().get(
+      YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME,
       YarnConfiguration.NM_DEFAULT_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME);
     if (!new File(dockerExecutor).exists()) {
-      throw new IllegalStateException("Invalid docker exec path: " + dockerExecutor);
+      throw new IllegalStateException(
+        "Invalid docker exec path: " + dockerExecutor);
     }
   }
 
   @Override
   public synchronized void startLocalizer(Path nmPrivateContainerTokensPath,
-                                          InetSocketAddress nmAddr, String user, String appId, String locId,
-                                          LocalDirsHandlerService dirsHandler)
+    InetSocketAddress nmAddr, String user, String appId, String locId,
+    LocalDirsHandlerService dirsHandler)
     throws IOException, InterruptedException {
-
     List<String> localDirs = dirsHandler.getLocalDirs();
     List<String> logDirs = dirsHandler.getLogDirs();
 
@@ -128,7 +142,8 @@ public class DockerContainerExecutor extends ContainerExecutor {
     // randomly choose the local directory
     Path appStorageDir = getWorkingDir(localDirs, user, appId);
 
-    String tokenFn = String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId);
+    String tokenFn =
+      String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId);
     Path tokenDst = new Path(appStorageDir, tokenFn);
     copyFile(nmPrivateContainerTokensPath, tokenDst, user);
     LOG.info("Copying from " + nmPrivateContainerTokensPath + " to " + tokenDst);
@@ -140,31 +155,34 @@ public class DockerContainerExecutor extends ContainerExecutor {
 
 
   @Override
-  public int launchContainer(Container container,
-                             Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath,
-                             String userName, String appId, Path containerWorkDir,
-                             List<String> localDirs, List<String> logDirs) throws IOException {
+  public int launchContainer(Container container, Path
+    nmPrivateContainerScriptPath, Path nmPrivateTokensPath, String userName,
+    String appId, Path containerWorkDir, List<String> localDirs, List<String>
+    logDirs) throws IOException {
+    //Variables for the launch environment can be injected from the command-line
+    //while submitting the application
     String containerImageName = container.getLaunchContext().getEnvironment()
-        .get(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME);
+      .get(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME);
     if (LOG.isDebugEnabled()) {
       LOG.debug("containerImageName from launchContext: " + containerImageName);
     }
-    Preconditions.checkArgument(!Strings.isNullOrEmpty(containerImageName), "Container image must not be null");
+    Preconditions.checkArgument(!Strings.isNullOrEmpty(containerImageName),
+      "Container image must not be null");
     containerImageName = containerImageName.replaceAll("['\"]", "");
 
-    Preconditions.checkArgument(saneDockerImage(containerImageName), "Image: " + containerImageName + " is not a proper docker image");
-    String dockerExecutor = getConf().get(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME,
-        YarnConfiguration.NM_DEFAULT_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME);
+    Preconditions.checkArgument(saneDockerImage(containerImageName), "Image: "
+      + containerImageName + " is not a proper docker image");
+    String dockerExecutor = getConf().get(
+      YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME,
+      YarnConfiguration.NM_DEFAULT_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME);
 
     FsPermission dirPerm = new FsPermission(APPDIR_PERM);
     ContainerId containerId = container.getContainerId();
 
     // create container dirs on all disks
     String containerIdStr = ConverterUtils.toString(containerId);
-    String appIdStr =
-        ConverterUtils.toString(
-            containerId.getApplicationAttemptId().
-                getApplicationId());
+    String appIdStr = ConverterUtils.toString(
+      containerId.getApplicationAttemptId().getApplicationId());
     for (String sLocalDir : localDirs) {
       Path usersdir = new Path(sLocalDir, ContainerLocalizer.USERCACHE);
       Path userdir = new Path(usersdir, userName);
@@ -178,46 +196,57 @@ public class DockerContainerExecutor extends ContainerExecutor {
     createContainerLogDirs(appIdStr, containerIdStr, logDirs, userName);
 
     Path tmpDir = new Path(containerWorkDir,
-        YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR);
+      YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR);
     createDir(tmpDir, dirPerm, false, userName);
 
     // copy launch script to work dir
     Path launchDst =
-        new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT);
+      new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT);
     lfs.util().copy(nmPrivateContainerScriptPath, launchDst);
 
     // copy container tokens to work dir
     Path tokenDst =
-        new Path(containerWorkDir, ContainerLaunch.FINAL_CONTAINER_TOKENS_FILE);
+      new Path(containerWorkDir, ContainerLaunch.FINAL_CONTAINER_TOKENS_FILE);
     lfs.util().copy(nmPrivateTokensPath, tokenDst);
 
-
-
     String localDirMount = toMount(localDirs);
     String logDirMount = toMount(logDirs);
-    String containerWorkDirMount = toMount(Collections.singletonList(containerWorkDir.toUri().getPath()));
+    String containerWorkDirMount = toMount(Collections.singletonList(
+      containerWorkDir.toUri().getPath()));
     StringBuilder commands = new StringBuilder();
+    //Use docker run to launch the docker container. See man pages for
+    //docker-run
+    //--rm removes the container automatically once the container finishes
+    //--net=host allows the container to take on the host's network stack
+    //--name sets the Docker Container name to the YARN containerId string
+    //-v is used to bind mount volumes for local, log and work dirs.
     String commandStr = commands.append(dockerExecutor)
-        .append(" ")
-        .append("run")
-        .append(" ")
-        .append("--rm --net=host")
-        .append(" ")
-        .append(" --name " + containerIdStr)
-        .append(localDirMount)
-        .append(logDirMount)
-        .append(containerWorkDirMount)
-        .append(" ")
-        .append(containerImageName)
-        .toString();
-    String dockerPidScript = "`" + dockerExecutor + " inspect --format {{.State.Pid}} " + containerIdStr + "`";
+      .append(" ")
+      .append("run")
+      .append(" ")
+      .append("--rm --net=host")
+      .append(" ")
+      .append(" --name " + containerIdStr)
+      .append(localDirMount)
+      .append(logDirMount)
+      .append(containerWorkDirMount)
+      .append(" ")
+      .append(containerImageName)
+      .toString();
+    //Get the pid of the process which has been launched as a docker container
+    //using docker inspect
+    String dockerPidScript = "`" + dockerExecutor +
+      " inspect --format {{.State.Pid}} " + containerIdStr + "`";
+
     // Create new local launch wrapper script
-    LocalWrapperScriptBuilder sb =
-      new UnixLocalWrapperScriptBuilder(containerWorkDir, commandStr, dockerPidScript);
+    LocalWrapperScriptBuilder sb = new UnixLocalWrapperScriptBuilder(
+      containerWorkDir, commandStr, dockerPidScript);
     Path pidFile = getPidFilePath(containerId);
     if (pidFile != null) {
       sb.writeLocalWrapperScript(launchDst, pidFile);
     } else {
+      //Although the container was activated by ContainerLaunch before exec()
+      //was called, since then deactivateContainer() has been called.
       LOG.info("Container " + containerIdStr
           + " was marked as inactive. Returning terminated error");
       return ExitCode.TERMINATED.getExitCode();
@@ -234,12 +263,13 @@ public class DockerContainerExecutor extends ContainerExecutor {
       String[] command = getRunCommand(sb.getWrapperScriptPath().toString(),
         containerIdStr, userName, pidFile, this.getConf());
       if (LOG.isDebugEnabled()) {
-        LOG.debug("launchContainer: " + commandStr + " " + Joiner.on(" ").join(command));
+        LOG.debug("launchContainer: " + commandStr + " " +
+          Joiner.on(" ").join(command));
       }
       shExec = new ShellCommandExecutor(
-          command,
-          new File(containerWorkDir.toUri().getPath()),
-          container.getLaunchContext().getEnvironment());      // sanitized env
+        command,
+        new File(containerWorkDir.toUri().getPath()),
+        container.getLaunchContext().getEnvironment());      // sanitized env
       if (isContainerActive(containerId)) {
         shExec.execute();
       } else {
@@ -279,9 +309,17 @@ public class DockerContainerExecutor extends ContainerExecutor {
   }
 
   @Override
-  public void writeLaunchEnv(OutputStream out, Map<String, String> environment, Map<Path, List<String>> resources, List<String> command) throws IOException {
-    ContainerLaunch.ShellScriptBuilder sb = ContainerLaunch.ShellScriptBuilder.create();
+  /**
+   * Filter the environment variables that may conflict with the ones set in
+   * the docker image and write them out to an OutputStream.
+   */
+  public void writeLaunchEnv(OutputStream out, Map<String, String> environment,
+    Map<Path, List<String>> resources, List<String> command)
+    throws IOException {
+    ContainerLaunch.ShellScriptBuilder sb =
+      ContainerLaunch.ShellScriptBuilder.create();
 
+    //Remove environments that may conflict with the ones in Docker image.
     Set<String> exclusionSet = new HashSet<String>();
     exclusionSet.add(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME);
     exclusionSet.add(ApplicationConstants.Environment.HADOOP_YARN_HOME.name());
@@ -427,6 +465,9 @@ public class DockerContainerExecutor extends ContainerExecutor {
     return builder.toString();
   }
 
+  //This class facilitates (only) the creation of platform-specific scripts that
+  //will be used to launch the containers
+  //TODO: This should be re-used from the DefaultContainerExecutor.
   private abstract class LocalWrapperScriptBuilder {
 
     private final Path wrapperScriptPath;
@@ -435,7 +476,8 @@ public class DockerContainerExecutor extends ContainerExecutor {
       return wrapperScriptPath;
     }
 
-    public void writeLocalWrapperScript(Path launchDst, Path pidFile) throws IOException {
+    public void writeLocalWrapperScript(Path launchDst, Path pidFile)
+      throws IOException {
       DataOutputStream out = null;
       PrintStream pout = null;
 
@@ -448,8 +490,8 @@ public class DockerContainerExecutor extends ContainerExecutor {
       }
     }
 
-    protected abstract void writeLocalWrapperScript(Path launchDst, Path pidFile,
-                                                    PrintStream pout);
+    protected abstract void writeLocalWrapperScript(Path launchDst,
+      Path pidFile, PrintStream pout);
 
     protected LocalWrapperScriptBuilder(Path containerWorkDir) {
       this.wrapperScriptPath = new Path(containerWorkDir,
@@ -457,13 +499,15 @@ public class DockerContainerExecutor extends ContainerExecutor {
     }
   }
 
+  //TODO: This class too should be used from DefaultContainerExecutor.
   private final class UnixLocalWrapperScriptBuilder
-      extends LocalWrapperScriptBuilder {
+    extends LocalWrapperScriptBuilder {
     private final Path sessionScriptPath;
     private final String dockerCommand;
     private final String dockerPidScript;
 
-    public UnixLocalWrapperScriptBuilder(Path containerWorkDir, String dockerCommand, String dockerPidScript) {
+    public UnixLocalWrapperScriptBuilder(Path containerWorkDir,
+      String dockerCommand, String dockerPidScript) {
       super(containerWorkDir);
       this.dockerCommand = dockerCommand;
       this.dockerPidScript = dockerPidScript;
@@ -480,8 +524,7 @@ public class DockerContainerExecutor extends ContainerExecutor {
 
     @Override
     public void writeLocalWrapperScript(Path launchDst, Path pidFile,
-                                        PrintStream pout) {
-
+      PrintStream pout) {
       String exitCodeFile = ContainerLaunch.getExitCodeFile(
         pidFile.toString());
       String tmpFile = exitCodeFile + ".tmp";
@@ -505,7 +548,8 @@ public class DockerContainerExecutor extends ContainerExecutor {
         // hence write pid to tmp file first followed by a mv
         pout.println("#!/usr/bin/env bash");
         pout.println();
-        pout.println("echo "+ dockerPidScript +" > " + pidFile.toString() + ".tmp");
+        pout.println("echo "+ dockerPidScript +" > " + pidFile.toString()
+          + ".tmp");
         pout.println("/bin/mv -f " + pidFile.toString() + ".tmp " + pidFile);
         pout.println(dockerCommand + " bash \"" +
           launchDst.toUri().getPath().toString() + "\"");
@@ -518,7 +562,7 @@ public class DockerContainerExecutor extends ContainerExecutor {
   }
 
   protected void createDir(Path dirPath, FsPermission perms,
-                           boolean createParent, String user) throws IOException {
+    boolean createParent, String user) throws IOException {
     lfs.mkdir(dirPath, perms, createParent);
     if (!perms.equals(perms.applyUMask(lfs.getUMask()))) {
       lfs.setPermission(dirPath, perms);
@@ -532,13 +576,14 @@ public class DockerContainerExecutor extends ContainerExecutor {
    * </ul>
    */
   void createUserLocalDirs(List<String> localDirs, String user)
-      throws IOException {
+    throws IOException {
     boolean userDirStatus = false;
     FsPermission userperms = new FsPermission(USER_PERM);
     for (String localDir : localDirs) {
       // create $local.dir/usercache/$user and its immediate parent
       try {
-        createDir(getUserCacheDir(new Path(localDir), user), userperms, true, user);
+        createDir(getUserCacheDir(new Path(localDir), user), userperms, true,
+          user);
       } catch (IOException e) {
         LOG.warn("Unable to create the user directory : " + localDir, e);
         continue;
@@ -633,7 +678,7 @@ public class DockerContainerExecutor extends ContainerExecutor {
    * Create application log directories on all disks.
    */
   void createContainerLogDirs(String appId, String containerId,
-                              List<String> logDirs, String user) throws IOException {
+    List<String> logDirs, String user) throws IOException {
 
     boolean containerLogDirStatus = false;
     FsPermission containerLogDirPerms = new FsPermission(LOGDIR_PERM);
@@ -707,7 +752,7 @@ public class DockerContainerExecutor extends ContainerExecutor {
   }
 
   protected Path getWorkingDir(List<String> localDirs, String user,
-                               String appId) throws IOException {
+    String appId) throws IOException {
     Path appStorageDir = null;
     long totalAvailable = 0L;
     long[] availableOnDisk = new long[localDirs.size()];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e941da/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index a87238d..5a9229b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -267,11 +267,11 @@ public class ContainerLaunch implements Callable<Integer> {
         // Sanitize the container's environment
         sanitizeEnv(environment, containerWorkDir, appDirs, containerLogDirs,
           localResources, nmPrivateClasspathJarDir);
-        
+
         // Write out the environment
-        exec.writeLaunchEnv(containerScriptOutStream, environment, localResources,
-            launchContext.getCommands());
-        
+        exec.writeLaunchEnv(containerScriptOutStream, environment,
+          localResources, launchContext.getCommands());
+
         // /////////// End of writing out container-script
 
         // /////////// Write out the container-tokens in the nmPrivate space.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e941da/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
index ac02542..65e381c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
@@ -18,7 +18,18 @@
 
 package org.apache.hadoop.yarn.server.nodemanager;
 
-import com.google.common.base.Strings;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -26,48 +37,29 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.LineNumberReader;
-import java.io.PrintWriter;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
-import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+import com.google.common.base.Strings;
 
 /**
- * This is intended to test the DockerContainerExecutor code, but it requires docker
- * to be installed.
+ * This is intended to test the DockerContainerExecutor code, but it requires
+ * docker to be installed.
  * <br><ol>
- * <li>Install docker, and Compile the code with docker-service-url set to the host and port
- * where docker service is running.
+ * <li>Install docker, and Compile the code with docker-service-url set to the
+ * host and port where docker service is running.
  * <br><pre><code>
- * > mvn clean install -Ddocker-service-url=tcp://0.0.0.0:4243
- *                          -DskipTests
+ * > mvn clean install -Ddocker-service-url=tcp://0.0.0.0:4243 -DskipTests
  * </code></pre>
  */
 public class TestDockerContainerExecutor {
   private static final Log LOG = LogFactory
-      .getLog(TestDockerContainerExecutor.class);
+    .getLog(TestDockerContainerExecutor.class);
   private static File workSpace = null;
   private DockerContainerExecutor exec = null;
   private LocalDirsHandlerService dirsHandler;
@@ -75,14 +67,10 @@ public class TestDockerContainerExecutor {
   private FileContext lfs;
   private String yarnImage;
 
-  private int id = 0;
   private String appSubmitter;
   private String dockerUrl;
   private String testImage = "centos:latest";
   private String dockerExec;
-  private String containerIdStr;
-
-
   private ContainerId getNextContainerId() {
     ContainerId cId = mock(ContainerId.class, RETURNS_DEEP_STUBS);
     String id = "CONTAINER_" + System.currentTimeMillis();
@@ -91,6 +79,8 @@ public class TestDockerContainerExecutor {
   }
 
   @Before
+  //Initialize a new DockerContainerExecutor that will be used to launch mocked
+  //containers.
   public void setup() {
     try {
       lfs = FileContext.getLocalFSFileContext();
@@ -113,8 +103,10 @@ public class TestDockerContainerExecutor {
     }
     dockerUrl = " -H " + dockerUrl;
     dockerExec = "docker " + dockerUrl;
-    conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, yarnImage);
-    conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME, dockerExec);
+    conf.set(
+      YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, yarnImage);
+    conf.set(
+      YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME, dockerExec);
     exec = new DockerContainerExecutor();
     dirsHandler = new LocalDirsHandlerService();
     dirsHandler.init(conf);
@@ -129,11 +121,10 @@ public class TestDockerContainerExecutor {
 
   private Shell.ShellCommandExecutor shellExec(String command) {
     try {
-
       Shell.ShellCommandExecutor shExec = new Shell.ShellCommandExecutor(
-          command.split("\\s+"),
-          new File(workDir.toUri().getPath()),
-          System.getenv());
+        command.split("\\s+"),
+        new File(workDir.toUri().getPath()),
+        System.getenv());
       shExec.execute();
       return shExec;
     } catch (IOException e) {
@@ -145,14 +136,24 @@ public class TestDockerContainerExecutor {
     return exec != null;
   }
 
-  private int runAndBlock(ContainerId cId, Map<String, String> launchCtxEnv, String... cmd) throws IOException {
+  /**
+   * Test that a docker container can be launched to run a command
+   * @param cId a fake ContainerID
+   * @param launchCtxEnv
+   * @param cmd the command to launch inside the docker container
+   * @return the exit code of the process used to launch the docker container
+   * @throws IOException
+   */
+  private int runAndBlock(ContainerId cId, Map<String, String> launchCtxEnv,
+    String... cmd) throws IOException {
     String appId = "APP_" + System.currentTimeMillis();
     Container container = mock(Container.class);
     ContainerLaunchContext context = mock(ContainerLaunchContext.class);
 
     when(container.getContainerId()).thenReturn(cId);
     when(container.getLaunchContext()).thenReturn(context);
-    when(cId.getApplicationAttemptId().getApplicationId().toString()).thenReturn(appId);
+    when(cId.getApplicationAttemptId().getApplicationId().toString())
+      .thenReturn(appId);
     when(context.getEnvironment()).thenReturn(launchCtxEnv);
 
     String script = writeScriptFile(launchCtxEnv, cmd);
@@ -164,11 +165,13 @@ public class TestDockerContainerExecutor {
 
     exec.activateContainer(cId, pidFile);
     return exec.launchContainer(container, scriptPath, tokensPath,
-        appSubmitter, appId, workDir, dirsHandler.getLocalDirs(),
-        dirsHandler.getLogDirs());
+      appSubmitter, appId, workDir, dirsHandler.getLocalDirs(),
+      dirsHandler.getLogDirs());
   }
 
-  private String writeScriptFile(Map<String, String> launchCtxEnv, String... cmd) throws IOException {
+  // Write the script used to launch the docker container in a temp file
+  private String writeScriptFile(Map<String, String> launchCtxEnv,
+    String... cmd) throws IOException {
     File f = File.createTempFile("TestDockerContainerExecutor", ".sh");
     f.deleteOnExit();
     PrintWriter p = new PrintWriter(new FileOutputStream(f));
@@ -193,6 +196,10 @@ public class TestDockerContainerExecutor {
     }
   }
 
+  /**
+   * Test that a touch command can be launched successfully in a docker
+   * container
+   */
   @Test
   public void testLaunchContainer() throws IOException {
     if (!shouldRun()) {
@@ -201,12 +208,13 @@ public class TestDockerContainerExecutor {
     }
 
     Map<String, String> env = new HashMap<String, String>();
-    env.put(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
+    env.put(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME,
+      testImage);
     String touchFileName = "touch-file-" + System.currentTimeMillis();
     File touchFile = new File(dirsHandler.getLocalDirs().get(0), touchFileName);
     ContainerId cId = getNextContainerId();
-    int ret = runAndBlock(
-        cId, env, "touch", touchFile.getAbsolutePath(), "&&", "cp", touchFile.getAbsolutePath(), "/");
+    int ret = runAndBlock(cId, env, "touch", touchFile.getAbsolutePath(), "&&",
+      "cp", touchFile.getAbsolutePath(), "/");
 
     assertEquals(0, ret);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e941da/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutorWithMocks.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutorWithMocks.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutorWithMocks.java
index 3584fed..8acd9ca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutorWithMocks.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutorWithMocks.java
@@ -18,7 +18,23 @@
 
 package org.apache.hadoop.yarn.server.nodemanager;
 
-import com.google.common.base.Strings;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
+import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.LineNumberReader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -36,30 +52,13 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.LineNumberReader;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
-
 /**
  * Mock tests for docker container executor
  */
 public class TestDockerContainerExecutorWithMocks {
 
   private static final Log LOG = LogFactory
-      .getLog(TestDockerContainerExecutorWithMocks.class);
+    .getLog(TestDockerContainerExecutorWithMocks.class);
   public static final String DOCKER_LAUNCH_COMMAND = "/bin/true";
   private DockerContainerExecutor dockerContainerExecutor = null;
   private LocalDirsHandlerService dirsHandler;
@@ -81,8 +80,10 @@ public class TestDockerContainerExecutorWithMocks {
     conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath);
     conf.set(YarnConfiguration.NM_LOCAL_DIRS, "/tmp/nm-local-dir" + time);
     conf.set(YarnConfiguration.NM_LOG_DIRS, "/tmp/userlogs" + time);
-    conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, yarnImage);
-    conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME , DOCKER_LAUNCH_COMMAND);
+    conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME,
+      yarnImage);
+    conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME,
+      DOCKER_LAUNCH_COMMAND);
     dockerContainerExecutor = new DockerContainerExecutor();
     dirsHandler = new LocalDirsHandlerService();
     dirsHandler.init(conf);
@@ -95,7 +96,6 @@ public class TestDockerContainerExecutorWithMocks {
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
-
   }
 
   @After
@@ -110,13 +110,17 @@ public class TestDockerContainerExecutorWithMocks {
   }
 
   @Test(expected = IllegalStateException.class)
+  //Test that DockerContainerExecutor doesn't successfully init on a secure
+  //cluster
   public void testContainerInitSecure() throws IOException {
     dockerContainerExecutor.getConf().set(
-        CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+      CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     dockerContainerExecutor.init();
   }
 
   @Test(expected = IllegalArgumentException.class)
+  //Test that when the image name is null, the container launch throws an
+  //IllegalArgumentException
   public void testContainerLaunchNullImage() throws IOException {
     String appSubmitter = "nobody";
     String appId = "APP_ID";
@@ -126,17 +130,19 @@ public class TestDockerContainerExecutorWithMocks {
     Container container = mock(Container.class, RETURNS_DEEP_STUBS);
     ContainerId cId = mock(ContainerId.class, RETURNS_DEEP_STUBS);
     ContainerLaunchContext context = mock(ContainerLaunchContext.class);
-    HashMap<String, String> env = new HashMap<String,String>();
 
+    HashMap<String, String> env = new HashMap<String,String>();
     when(container.getContainerId()).thenReturn(cId);
     when(container.getLaunchContext()).thenReturn(context);
-    when(cId.getApplicationAttemptId().getApplicationId().toString()).thenReturn(appId);
+    when(cId.getApplicationAttemptId().getApplicationId().toString())
+      .thenReturn(appId);
     when(cId.toString()).thenReturn(containerId);
 
     when(context.getEnvironment()).thenReturn(env);
-    env.put(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
-    dockerContainerExecutor.getConf()
-        .set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
+    env.put(
+      YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
+    dockerContainerExecutor.getConf().set(
+      YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
     Path scriptPath = new Path("file:///bin/echo");
     Path tokensPath = new Path("file:///dev/null");
 
@@ -149,6 +155,8 @@ public class TestDockerContainerExecutorWithMocks {
   }
 
   @Test(expected = IllegalArgumentException.class)
+  //Test that when the image name is invalid, the container launch throws an
+  //IllegalArgumentException
   public void testContainerLaunchInvalidImage() throws IOException {
     String appSubmitter = "nobody";
     String appId = "APP_ID";
@@ -162,13 +170,15 @@ public class TestDockerContainerExecutorWithMocks {
 
     when(container.getContainerId()).thenReturn(cId);
     when(container.getLaunchContext()).thenReturn(context);
-    when(cId.getApplicationAttemptId().getApplicationId().toString()).thenReturn(appId);
+    when(cId.getApplicationAttemptId().getApplicationId().toString())
+      .thenReturn(appId);
     when(cId.toString()).thenReturn(containerId);
 
     when(context.getEnvironment()).thenReturn(env);
-    env.put(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
-    dockerContainerExecutor.getConf()
-      .set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
+    env.put(
+      YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
+    dockerContainerExecutor.getConf().set(
+      YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
     Path scriptPath = new Path("file:///bin/echo");
     Path tokensPath = new Path("file:///dev/null");
 
@@ -181,6 +191,8 @@ public class TestDockerContainerExecutorWithMocks {
   }
 
   @Test
+  //Test that a container launch correctly wrote the session script with the
+  //commands we expected
   public void testContainerLaunch() throws IOException {
     String appSubmitter = "nobody";
     String appId = "APP_ID";
@@ -194,40 +206,48 @@ public class TestDockerContainerExecutorWithMocks {
 
     when(container.getContainerId()).thenReturn(cId);
     when(container.getLaunchContext()).thenReturn(context);
-    when(cId.getApplicationAttemptId().getApplicationId().toString()).thenReturn(appId);
+    when(cId.getApplicationAttemptId().getApplicationId().toString())
+      .thenReturn(appId);
     when(cId.toString()).thenReturn(containerId);
 
     when(context.getEnvironment()).thenReturn(env);
-    env.put(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
+    env.put(
+      YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
     Path scriptPath = new Path("file:///bin/echo");
     Path tokensPath = new Path("file:///dev/null");
 
     Path pidFile = new Path(workDir, "pid");
 
     dockerContainerExecutor.activateContainer(cId, pidFile);
-    int ret = dockerContainerExecutor.launchContainer(container, scriptPath, tokensPath,
-        appSubmitter, appId, workDir, dirsHandler.getLocalDirs(),
-        dirsHandler.getLogDirs());
+    int ret = dockerContainerExecutor.launchContainer(container, scriptPath,
+      tokensPath, appSubmitter, appId, workDir, dirsHandler.getLocalDirs(),
+      dirsHandler.getLogDirs());
     assertEquals(0, ret);
     //get the script
     Path sessionScriptPath = new Path(workDir,
-        Shell.appendScriptExtension(
-            DockerContainerExecutor.DOCKER_CONTAINER_EXECUTOR_SESSION_SCRIPT));
-    LineNumberReader lnr = new LineNumberReader(new FileReader(sessionScriptPath.toString()));
+      Shell.appendScriptExtension(
+        DockerContainerExecutor.DOCKER_CONTAINER_EXECUTOR_SESSION_SCRIPT));
+    LineNumberReader lnr = new LineNumberReader(new FileReader(
+      sessionScriptPath.toString()));
     boolean cmdFound = false;
     List<String> localDirs = dirsToMount(dirsHandler.getLocalDirs());
     List<String> logDirs = dirsToMount(dirsHandler.getLogDirs());
-    List<String> workDirMount = dirsToMount(Collections.singletonList(workDir.toUri().getPath()));
-    List<String> expectedCommands =  new ArrayList<String>(
-        Arrays.asList(DOCKER_LAUNCH_COMMAND, "run", "--rm", "--net=host",  "--name", containerId));
+    List<String> workDirMount = dirsToMount(Collections.singletonList(
+      workDir.toUri().getPath()));
+    List<String> expectedCommands =  new ArrayList<String>(Arrays.asList(
+      DOCKER_LAUNCH_COMMAND, "run", "--rm", "--net=host",  "--name",
+      containerId));
     expectedCommands.addAll(localDirs);
     expectedCommands.addAll(logDirs);
     expectedCommands.addAll(workDirMount);
     String shellScript =  workDir + "/launch_container.sh";
 
-    expectedCommands.addAll(Arrays.asList(testImage.replaceAll("['\"]", ""), "bash","\"" + shellScript + "\""));
+    expectedCommands.addAll(Arrays.asList(testImage.replaceAll("['\"]", ""),
+      "bash","\"" + shellScript + "\""));
 
-    String expectedPidString = "echo `/bin/true inspect --format {{.State.Pid}} " + containerId+"` > "+ pidFile.toString() + ".tmp";
+    String expectedPidString =
+      "echo `/bin/true inspect --format {{.State.Pid}} " + containerId+"` > "+
+      pidFile.toString() + ".tmp";
     boolean pidSetterFound = false;
     while(lnr.ready()){
       String line = lnr.readLine();


[03/20] hadoop git commit: HDFS-7990. IBR delete ack should not be delayed. Contributed by Daryn Sharp.

Posted by zj...@apache.org.
HDFS-7990. IBR delete ack should not be delayed. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f402f6d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f402f6d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f402f6d5

Branch: refs/heads/YARN-2928
Commit: f402f6d592569601efee5682316aad0a403447b3
Parents: ee35265
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Mar 27 09:05:17 2015 -0500
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:46 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt        |  2 ++
 .../hdfs/server/datanode/BPServiceActor.java       | 17 +++++++----------
 .../apache/hadoop/hdfs/server/datanode/DNConf.java |  2 --
 .../hdfs/server/datanode/SimulatedFSDataset.java   | 13 ++++++++++++-
 .../datanode/TestIncrementalBlockReports.java      |  4 ++--
 5 files changed, 23 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f402f6d5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dff8bd2..72ea4fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -342,6 +342,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes
     a lot of time if disks are busy (Rushabh S Shah via kihwal)
 
+    HDFS-7990. IBR delete ack should not be delayed. (daryn via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f402f6d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 10cce45..3b4756c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -82,12 +82,11 @@ class BPServiceActor implements Runnable {
 
   final BPOfferService bpos;
   
-  // lastBlockReport, lastDeletedReport and lastHeartbeat may be assigned/read
+  // lastBlockReport and lastHeartbeat may be assigned/read
   // by testing threads (through BPServiceActor#triggerXXX), while also 
   // assigned/read by the actor thread. Thus they should be declared as volatile
   // to make sure the "happens-before" consistency.
   volatile long lastBlockReport = 0;
-  volatile long lastDeletedReport = 0;
 
   boolean resetBlockReportTime = true;
 
@@ -417,10 +416,10 @@ class BPServiceActor implements Runnable {
   @VisibleForTesting
   void triggerDeletionReportForTests() {
     synchronized (pendingIncrementalBRperStorage) {
-      lastDeletedReport = 0;
+      sendImmediateIBR = true;
       pendingIncrementalBRperStorage.notifyAll();
 
-      while (lastDeletedReport == 0) {
+      while (sendImmediateIBR) {
         try {
           pendingIncrementalBRperStorage.wait(100);
         } catch (InterruptedException e) {
@@ -465,7 +464,6 @@ class BPServiceActor implements Runnable {
     // or we will report an RBW replica after the BlockReport already reports
     // a FINALIZED one.
     reportReceivedDeletedBlocks();
-    lastDeletedReport = startTime;
 
     long brCreateStartTime = monotonicNow();
     Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
@@ -674,7 +672,6 @@ class BPServiceActor implements Runnable {
    */
   private void offerService() throws Exception {
     LOG.info("For namenode " + nnAddr + " using"
-        + " DELETEREPORT_INTERVAL of " + dnConf.deleteReportInterval + " msec "
         + " BLOCKREPORT_INTERVAL of " + dnConf.blockReportInterval + "msec"
         + " CACHEREPORT_INTERVAL of " + dnConf.cacheReportInterval + "msec"
         + " Initial delay: " + dnConf.initialBlockReportDelay + "msec"
@@ -690,7 +687,9 @@ class BPServiceActor implements Runnable {
         //
         // Every so often, send heartbeat or block-report
         //
-        if (startTime - lastHeartbeat >= dnConf.heartBeatInterval) {
+        boolean sendHeartbeat =
+            startTime - lastHeartbeat >= dnConf.heartBeatInterval;
+        if (sendHeartbeat) {
           //
           // All heartbeat messages include following info:
           // -- Datanode name
@@ -729,10 +728,8 @@ class BPServiceActor implements Runnable {
             }
           }
         }
-        if (sendImmediateIBR ||
-            (startTime - lastDeletedReport > dnConf.deleteReportInterval)) {
+        if (sendImmediateIBR || sendHeartbeat) {
           reportReceivedDeletedBlocks();
-          lastDeletedReport = startTime;
         }
 
         List<DatanodeCommand> cmds = blockReport();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f402f6d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
index 67cd1ce..3406f29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
@@ -82,7 +82,6 @@ public class DNConf {
   final long heartBeatInterval;
   final long blockReportInterval;
   final long blockReportSplitThreshold;
-  final long deleteReportInterval;
   final long initialBlockReportDelay;
   final long cacheReportInterval;
   final long dfsclientSlowIoWarningThresholdMs;
@@ -164,7 +163,6 @@ public class DNConf {
     heartBeatInterval = conf.getLong(DFS_HEARTBEAT_INTERVAL_KEY,
         DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000L;
     
-    this.deleteReportInterval = 100 * heartBeatInterval;
     // do we need to sync block file contents to disk when blockfile is closed?
     this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, 
         DFS_DATANODE_SYNCONCLOSE_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f402f6d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 5c7b4ac..23fc95b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -84,7 +84,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
     @Override
     public SimulatedFSDataset newInstance(DataNode datanode,
         DataStorage storage, Configuration conf) throws IOException {
-      return new SimulatedFSDataset(storage, conf);
+      return new SimulatedFSDataset(datanode, storage, conf);
     }
 
     @Override
@@ -509,8 +509,15 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   private final SimulatedStorage storage;
   private final SimulatedVolume volume;
   private final String datanodeUuid;
+  private final DataNode datanode;
   
+
   public SimulatedFSDataset(DataStorage storage, Configuration conf) {
+    this(null, storage, conf);
+  }
+
+  public SimulatedFSDataset(DataNode datanode, DataStorage storage, Configuration conf) {
+    this.datanode = datanode;
     if (storage != null) {
       for (int i = 0; i < storage.getNumStorageDirs(); ++i) {
         storage.createStorageID(storage.getStorageDir(i), false);
@@ -737,6 +744,10 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
       }
       storage.free(bpid, binfo.getNumBytes());
       map.remove(b);
+      if (datanode != null) {
+        datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, b),
+            binfo.getStorageUuid());
+      }
     }
     if (error) {
       throw new IOException("Invalidate: Missing blocks.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f402f6d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
index b5aa93f..cd2c125 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
@@ -159,8 +159,8 @@ public class TestIncrementalBlockReports {
           anyString(),
           any(StorageReceivedDeletedBlocks[].class));
 
-      // Trigger a block report, this also triggers an IBR.
-      DataNodeTestUtils.triggerBlockReport(singletonDn);
+      // Trigger a heartbeat, this also triggers an IBR.
+      DataNodeTestUtils.triggerHeartbeat(singletonDn);
       Thread.sleep(2000);
 
       // Ensure that the deleted block is reported.


[18/20] hadoop git commit: HDFS-7261. storageMap is accessed without synchronization in DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin P. McCabe)

Posted by zj...@apache.org.
HDFS-7261. storageMap is accessed without synchronization in DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afb05c84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afb05c84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afb05c84

Branch: refs/heads/YARN-2928
Commit: afb05c84e625d85fd12287968ee6124470016ad7
Parents: 5c42a67
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Mon Mar 30 10:46:21 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:49 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  4 +++
 .../blockmanagement/DatanodeDescriptor.java     | 29 ++++++++++++--------
 2 files changed, 21 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afb05c84/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index efba80e..79a81c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -379,6 +379,10 @@ Release 2.8.0 - UNRELEASED
     HDFS-8002. Website refers to /trash directory. (Brahma Reddy Battula via
     aajisaka)
 
+    HDFS-7261. storageMap is accessed without synchronization in
+    DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin
+    P. McCabe)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afb05c84/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index d0d7a72..4731ad4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -447,8 +447,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
     if (checkFailedStorages) {
       LOG.info("Number of failed storage changes from "
           + this.volumeFailures + " to " + volFailures);
-      failedStorageInfos = new HashSet<DatanodeStorageInfo>(
-          storageMap.values());
+      synchronized (storageMap) {
+        failedStorageInfos =
+            new HashSet<DatanodeStorageInfo>(storageMap.values());
+      }
     }
 
     setCacheCapacity(cacheCapacity);
@@ -480,8 +482,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
     if (checkFailedStorages) {
       updateFailedStorage(failedStorageInfos);
     }
-
-    if (storageMap.size() != reports.length) {
+    long storageMapSize;
+    synchronized (storageMap) {
+      storageMapSize = storageMap.size();
+    }
+    if (storageMapSize != reports.length) {
       pruneStorageMap(reports);
     }
   }
@@ -491,14 +496,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * as long as they have associated block replicas.
    */
   private void pruneStorageMap(final StorageReport[] reports) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Number of storages reported in heartbeat=" + reports.length +
-                    "; Number of storages in storageMap=" + storageMap.size());
-    }
+    synchronized (storageMap) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Number of storages reported in heartbeat=" + reports.length
+            + "; Number of storages in storageMap=" + storageMap.size());
+      }
 
-    HashMap<String, DatanodeStorageInfo> excessStorages;
+      HashMap<String, DatanodeStorageInfo> excessStorages;
 
-    synchronized (storageMap) {
       // Init excessStorages with all known storages.
       excessStorages = new HashMap<String, DatanodeStorageInfo>(storageMap);
 
@@ -515,8 +520,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
           LOG.info("Removed storage " + storageInfo + " from DataNode" + this);
         } else if (LOG.isDebugEnabled()) {
           // This can occur until all block reports are received.
-          LOG.debug("Deferring removal of stale storage " + storageInfo +
-                        " with " + storageInfo.numBlocks() + " blocks");
+          LOG.debug("Deferring removal of stale storage " + storageInfo
+              + " with " + storageInfo.numBlocks() + " blocks");
         }
       }
     }


[06/20] hadoop git commit: MAPREDUCE-6291. Correct mapred queue usage command. Contributed by Brahma Reddy Battula.

Posted by zj...@apache.org.
MAPREDUCE-6291. Correct mapred queue usage command. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa7cc99c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa7cc99c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa7cc99c

Branch: refs/heads/YARN-2928
Commit: fa7cc99cd158168b8c7ff32428c3e2409315d7cb
Parents: 7fa9e0e
Author: Harsh J <ha...@cloudera.com>
Authored: Sat Mar 28 11:57:21 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:47 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                              | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/JobQueueClient.java    | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java   | 2 +-
 .../src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java      | 2 +-
 .../src/main/java/org/apache/hadoop/tools/HadoopArchives.java     | 2 +-
 5 files changed, 7 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7cc99c/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index ce16510..b0367a7 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -256,6 +256,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    MAPREDUCE-6291. Correct mapred queue usage command.
+    (Brahma Reddu Battula via harsh)
+
     MAPREDUCE-579. Streaming "slowmatch" documentation. (harsh)
 
     MAPREDUCE-6287. Deprecated methods in org.apache.hadoop.examples.Sort

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7cc99c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
index 097e338..81f6140 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
@@ -224,7 +224,7 @@ class JobQueueClient extends Configured implements Tool {
   }
 
   private void displayUsage(String cmd) {
-    String prefix = "Usage: JobQueueClient ";
+    String prefix = "Usage: queue ";
     if ("-queueinfo".equals(cmd)) {
       System.err.println(prefix + "[" + cmd + "<job-queue-name> [-showJobs]]");
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7cc99c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
index 8f4259e..4f5b6a1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
@@ -363,7 +363,7 @@ public class Submitter extends Configured implements Tool {
     void printUsage() {
       // The CLI package should do this for us, but I can't figure out how
       // to make it print something reasonable.
-      System.out.println("bin/hadoop pipes");
+      System.out.println("Usage: pipes ");
       System.out.println("  [-input <path>] // Input directory");
       System.out.println("  [-output <path>] // Output directory");
       System.out.println("  [-jar <jar file> // jar filename");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7cc99c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
index c3f1564..a350829 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
@@ -421,7 +421,7 @@ public class CLI extends Configured implements Tool {
    * Display usage of the command-line tool and terminate execution.
    */
   private void displayUsage(String cmd) {
-    String prefix = "Usage: CLI ";
+    String prefix = "Usage: job ";
     String jobPriorityValues = getJobPriorityNames();
     String taskStates = "running, completed";
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7cc99c/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
index 18cd972..c5c42b1 100644
--- a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
+++ b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
@@ -101,7 +101,7 @@ public class HadoopArchives implements Tool {
   /** the desired replication degree; default is 10 **/
   short repl = 10;
 
-  private static final String usage = "archive"
+  private static final String usage = "Usage: archive"
   + " -archiveName <NAME>.har -p <parent path> [-r <replication factor>]" +
       "<src>* <dest>" +
   "\n";


[16/20] hadoop git commit: HADOOP-11754. RM fails to start in non-secure mode due to authentication filter failure. Contributed by Haohui Mai.

Posted by zj...@apache.org.
HADOOP-11754. RM fails to start in non-secure mode due to authentication filter failure. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/471b1d93
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/471b1d93
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/471b1d93

Branch: refs/heads/YARN-2928
Commit: 471b1d9362b2fdcc3514720176210ab363ea8bfa
Parents: 6e598f8
Author: Haohui Mai <wh...@apache.org>
Authored: Mon Mar 30 11:44:22 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:49 2015 -0700

----------------------------------------------------------------------
 .../server/AuthenticationFilter.java            | 108 +++++++++----------
 .../server/TestAuthenticationFilter.java        |  20 ++--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../org/apache/hadoop/http/HttpServer2.java     |  53 ++++++++-
 .../AuthenticationFilterInitializer.java        |  18 ++--
 5 files changed, 128 insertions(+), 74 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/471b1d93/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index 5c22fce..684e91c 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory;
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
@@ -183,8 +184,6 @@ public class AuthenticationFilter implements Filter {
   private Signer signer;
   private SignerSecretProvider secretProvider;
   private AuthenticationHandler authHandler;
-  private boolean randomSecret;
-  private boolean customSecretProvider;
   private long validity;
   private String cookieDomain;
   private String cookiePath;
@@ -226,7 +225,6 @@ public class AuthenticationFilter implements Filter {
 
     initializeAuthHandler(authHandlerClassName, filterConfig);
 
-
     cookieDomain = config.getProperty(COOKIE_DOMAIN, null);
     cookiePath = config.getProperty(COOKIE_PATH, null);
   }
@@ -237,11 +235,8 @@ public class AuthenticationFilter implements Filter {
       Class<?> klass = Thread.currentThread().getContextClassLoader().loadClass(authHandlerClassName);
       authHandler = (AuthenticationHandler) klass.newInstance();
       authHandler.init(config);
-    } catch (ClassNotFoundException ex) {
-      throw new ServletException(ex);
-    } catch (InstantiationException ex) {
-      throw new ServletException(ex);
-    } catch (IllegalAccessException ex) {
+    } catch (ClassNotFoundException | InstantiationException |
+        IllegalAccessException ex) {
       throw new ServletException(ex);
     }
   }
@@ -251,62 +246,59 @@ public class AuthenticationFilter implements Filter {
     secretProvider = (SignerSecretProvider) filterConfig.getServletContext().
         getAttribute(SIGNER_SECRET_PROVIDER_ATTRIBUTE);
     if (secretProvider == null) {
-      Class<? extends SignerSecretProvider> providerClass
-              = getProviderClass(config);
-      try {
-        secretProvider = providerClass.newInstance();
-      } catch (InstantiationException ex) {
-        throw new ServletException(ex);
-      } catch (IllegalAccessException ex) {
-        throw new ServletException(ex);
-      }
+      // As tomcat cannot specify the provider object in the configuration.
+      // It'll go into this path
       try {
-        secretProvider.init(config, filterConfig.getServletContext(), validity);
+        secretProvider = constructSecretProvider(
+            filterConfig.getServletContext(),
+            config, false);
       } catch (Exception ex) {
         throw new ServletException(ex);
       }
-    } else {
-      customSecretProvider = true;
     }
     signer = new Signer(secretProvider);
   }
 
-  @SuppressWarnings("unchecked")
-  private Class<? extends SignerSecretProvider> getProviderClass(Properties config)
-          throws ServletException {
-    String providerClassName;
-    String signerSecretProviderName
-            = config.getProperty(SIGNER_SECRET_PROVIDER, null);
-    // fallback to old behavior
-    if (signerSecretProviderName == null) {
-      String signatureSecretFile = config.getProperty(
-          SIGNATURE_SECRET_FILE, null);
-      // The precedence from high to low : file, random
-      if (signatureSecretFile != null) {
-        providerClassName = FileSignerSecretProvider.class.getName();
-      } else {
-        providerClassName = RandomSignerSecretProvider.class.getName();
-        randomSecret = true;
+  public static SignerSecretProvider constructSecretProvider(
+      ServletContext ctx, Properties config,
+      boolean disallowFallbackToRandomSecretProvider) throws Exception {
+    String name = config.getProperty(SIGNER_SECRET_PROVIDER, "file");
+    long validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY,
+                                                      "36000")) * 1000;
+
+    if (!disallowFallbackToRandomSecretProvider
+        && "file".equals(name)
+        && config.getProperty(SIGNATURE_SECRET_FILE) == null) {
+      name = "random";
+    }
+
+    SignerSecretProvider provider;
+    if ("file".equals(name)) {
+      provider = new FileSignerSecretProvider();
+      try {
+        provider.init(config, ctx, validity);
+      } catch (Exception e) {
+        if (!disallowFallbackToRandomSecretProvider) {
+          LOG.info("Unable to initialize FileSignerSecretProvider, " +
+                       "falling back to use random secrets.");
+          provider = new RandomSignerSecretProvider();
+          provider.init(config, ctx, validity);
+        } else {
+          throw e;
+        }
       }
+    } else if ("random".equals(name)) {
+      provider = new RandomSignerSecretProvider();
+      provider.init(config, ctx, validity);
+    } else if ("zookeeper".equals(name)) {
+      provider = new ZKSignerSecretProvider();
+      provider.init(config, ctx, validity);
     } else {
-      if ("random".equals(signerSecretProviderName)) {
-        providerClassName = RandomSignerSecretProvider.class.getName();
-        randomSecret = true;
-      } else if ("file".equals(signerSecretProviderName)) {
-        providerClassName = FileSignerSecretProvider.class.getName();
-      } else if ("zookeeper".equals(signerSecretProviderName)) {
-        providerClassName = ZKSignerSecretProvider.class.getName();
-      } else {
-        providerClassName = signerSecretProviderName;
-        customSecretProvider = true;
-      }
-    }
-    try {
-      return (Class<? extends SignerSecretProvider>) Thread.currentThread().
-              getContextClassLoader().loadClass(providerClassName);
-    } catch (ClassNotFoundException ex) {
-      throw new ServletException(ex);
+      provider = (SignerSecretProvider) Thread.currentThread().
+          getContextClassLoader().loadClass(name).newInstance();
+      provider.init(config, ctx, validity);
     }
+    return provider;
   }
 
   /**
@@ -335,7 +327,7 @@ public class AuthenticationFilter implements Filter {
    * @return if a random secret is being used.
    */
   protected boolean isRandomSecret() {
-    return randomSecret;
+    return secretProvider.getClass() == RandomSignerSecretProvider.class;
   }
 
   /**
@@ -344,7 +336,10 @@ public class AuthenticationFilter implements Filter {
    * @return if a custom implementation of a SignerSecretProvider is being used.
    */
   protected boolean isCustomSignerSecretProvider() {
-    return customSecretProvider;
+    Class<?> clazz = secretProvider.getClass();
+    return clazz != FileSignerSecretProvider.class && clazz !=
+        RandomSignerSecretProvider.class && clazz != ZKSignerSecretProvider
+        .class;
   }
 
   /**
@@ -385,9 +380,6 @@ public class AuthenticationFilter implements Filter {
       authHandler.destroy();
       authHandler = null;
     }
-    if (secretProvider != null) {
-      secretProvider.destroy();
-    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/471b1d93/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
index 26c10a9..63b812d 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
@@ -18,7 +18,9 @@ import java.io.FileWriter;
 import java.io.IOException;
 import java.io.Writer;
 import java.net.HttpCookie;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Properties;
@@ -151,8 +153,7 @@ public class TestAuthenticationFilter {
   }
 
   @Test
-  public void testInit() throws Exception {
-
+  public void testFallbackToRandomSecretProvider() throws Exception {
     // minimal configuration & simple auth handler (Pseudo)
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
@@ -162,8 +163,8 @@ public class TestAuthenticationFilter {
           AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn(
           (new Long(TOKEN_VALIDITY_SEC)).toString());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-          new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
-                                           AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
+          new Vector<>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                                     AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
           .thenReturn(null);
@@ -178,16 +179,17 @@ public class TestAuthenticationFilter {
     } finally {
       filter.destroy();
     }
-
+  }
+  @Test
+  public void testInit() throws Exception {
     // custom secret as inline
-    filter = new AuthenticationFilter();
+    AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
-      Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
-                                 AuthenticationFilter.SIGNATURE_SECRET)).elements());
+          new Vector<>(Arrays.asList(AuthenticationFilter.AUTH_TYPE))
+              .elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
           AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE)).thenReturn(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/471b1d93/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8b59972..b5d2303 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1178,6 +1178,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11761. Fix findbugs warnings in org.apache.hadoop.security
     .authentication. (Li Lu via wheat9)
 
+    HADOOP-11754. RM fails to start in non-secure mode due to authentication
+    filter failure. (wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/471b1d93/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 566861e..0f1c222 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -31,6 +31,7 @@ import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Properties;
 
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
@@ -53,6 +54,11 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.ConfServlet;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
+import org.apache.hadoop.security.authentication.util.FileSignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.RandomSignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.ZKSignerSecretProvider;
 import org.apache.hadoop.security.ssl.SslSocketConnectorSecure;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
@@ -91,6 +97,8 @@ import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.sun.jersey.spi.container.servlet.ServletContainer;
 
+import static org.apache.hadoop.security.authentication.server
+    .AuthenticationFilter.*;
 /**
  * Create a Jetty embedded server to answer http requests. The primary goal is
  * to serve up status information for the server. There are three contexts:
@@ -160,6 +168,8 @@ public final class HttpServer2 implements FilterContainer {
     private boolean findPort;
 
     private String hostName;
+    private boolean disallowFallbackToRandomSignerSecretProvider;
+    private String authFilterConfigurationPrefix = "hadoop.http.authentication.";
 
     public Builder setName(String name){
       this.name = name;
@@ -254,6 +264,16 @@ public final class HttpServer2 implements FilterContainer {
       return this;
     }
 
+    public Builder disallowFallbackToRandomSingerSecretProvider(boolean value) {
+      this.disallowFallbackToRandomSignerSecretProvider = value;
+      return this;
+    }
+
+    public Builder authFilterConfigurationPrefix(String value) {
+      this.authFilterConfigurationPrefix = value;
+      return this;
+    }
+
     public HttpServer2 build() throws IOException {
       Preconditions.checkNotNull(name, "name is not set");
       Preconditions.checkState(!endpoints.isEmpty(), "No endpoints specified");
@@ -314,6 +334,18 @@ public final class HttpServer2 implements FilterContainer {
     this.webServer = new Server();
     this.adminsAcl = b.adminsAcl;
     this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir);
+    try {
+      SignerSecretProvider secretProvider =
+          constructSecretProvider(b, webAppContext.getServletContext());
+      this.webAppContext.getServletContext().setAttribute
+          (AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE,
+           secretProvider);
+    } catch(IOException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new IOException(e);
+    }
+
     this.findPort = b.findPort;
     initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs);
   }
@@ -405,9 +437,28 @@ public final class HttpServer2 implements FilterContainer {
     return ctx;
   }
 
+  private static SignerSecretProvider constructSecretProvider(final Builder b,
+      ServletContext ctx)
+      throws Exception {
+    final Configuration conf = b.conf;
+    Properties config = getFilterProperties(conf,
+                                            b.authFilterConfigurationPrefix);
+    return AuthenticationFilter.constructSecretProvider(
+        ctx, config, b.disallowFallbackToRandomSignerSecretProvider);
+  }
+
+  private static Properties getFilterProperties(Configuration conf, String
+      prefix) {
+    Properties prop = new Properties();
+    Map<String, String> filterConfig = AuthenticationFilterInitializer
+        .getFilterConfigMap(conf, prefix);
+    prop.putAll(filterConfig);
+    return prop;
+  }
+
   private static void addNoCacheFilter(WebAppContext ctxt) {
     defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(),
-        Collections.<String, String> emptyMap(), new String[] { "/*" });
+                 Collections.<String, String> emptyMap(), new String[] { "/*" });
   }
 
   @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/471b1d93/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
index cb3830d..ca221f5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
@@ -56,6 +56,15 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
    */
   @Override
   public void initFilter(FilterContainer container, Configuration conf) {
+    Map<String, String> filterConfig = getFilterConfigMap(conf, PREFIX);
+
+    container.addFilter("authentication",
+                        AuthenticationFilter.class.getName(),
+                        filterConfig);
+  }
+
+  public static Map<String, String> getFilterConfigMap(Configuration conf,
+      String prefix) {
     Map<String, String> filterConfig = new HashMap<String, String>();
 
     //setting the cookie path to root '/' so it is used for all resources.
@@ -63,9 +72,9 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
 
     for (Map.Entry<String, String> entry : conf) {
       String name = entry.getKey();
-      if (name.startsWith(PREFIX)) {
+      if (name.startsWith(prefix)) {
         String value = conf.get(name);
-        name = name.substring(PREFIX.length());
+        name = name.substring(prefix.length());
         filterConfig.put(name, value);
       }
     }
@@ -82,10 +91,7 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
       }
       filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL, principal);
     }
-
-    container.addFilter("authentication",
-                        AuthenticationFilter.class.getName(),
-                        filterConfig);
+    return filterConfig;
   }
 
 }


[07/20] hadoop git commit: HDFS-7700. Document quota support for storage types. (Contributed by Xiaoyu Yao)

Posted by zj...@apache.org.
HDFS-7700. Document quota support for storage types. (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fa9e0e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fa9e0e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fa9e0e6

Branch: refs/heads/YARN-2928
Commit: 7fa9e0e610669eea0f65ed513dcb6832aa0993ba
Parents: 8f63bd7
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Mar 27 19:49:26 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:47 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../src/site/markdown/HDFSCommands.md           |  8 ++--
 .../src/site/markdown/HdfsQuotaAdminGuide.md    | 41 ++++++++++++++++++--
 3 files changed, 45 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fa9e0e6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index af1dd60..f7cc2bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1311,6 +1311,9 @@ Release 2.7.0 - UNRELEASED
       HDFS-7824. GetContentSummary API and its namenode implementation for
       Storage Type Quota/Usage. (Xiaoyu Yao via Arpit Agarwal)
 
+      HDFS-7700. Document quota support for storage types. (Xiaoyu Yao via
+      Arpit Agarwal)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fa9e0e6/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 191b5bc..bdb051b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -307,8 +307,8 @@ Usage:
               [-refreshNodes]
               [-setQuota <quota> <dirname>...<dirname>]
               [-clrQuota <dirname>...<dirname>]
-              [-setSpaceQuota <quota> <dirname>...<dirname>]
-              [-clrSpaceQuota <dirname>...<dirname>]
+              [-setSpaceQuota <quota> [-storageType <storagetype>] <dirname>...<dirname>]
+              [-clrSpaceQuota [-storageType <storagetype>] <dirname>...<dirname>]
               [-setStoragePolicy <path> <policyName>]
               [-getStoragePolicy <path>]
               [-finalizeUpgrade]
@@ -342,8 +342,8 @@ Usage:
 | `-refreshNodes` | Re-read the hosts and exclude files to update the set of Datanodes that are allowed to connect to the Namenode and those that should be decommissioned or recommissioned. |
 | `-setQuota` \<quota\> \<dirname\>...\<dirname\> | See [HDFS Quotas Guide](../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands) for the detail. |
 | `-clrQuota` \<dirname\>...\<dirname\> | See [HDFS Quotas Guide](../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands) for the detail. |
-| `-setSpaceQuota` \<quota\> \<dirname\>...\<dirname\> | See [HDFS Quotas Guide](../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands) for the detail. |
-| `-clrSpaceQuota` \<dirname\>...\<dirname\> | See [HDFS Quotas Guide](../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands) for the detail. |
+| `-setSpaceQuota` \<quota\> `[-storageType <storagetype>]` \<dirname\>...\<dirname\> | See [HDFS Quotas Guide](../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands) for the detail. |
+| `-clrSpaceQuota` `[-storageType <storagetype>]` \<dirname\>...\<dirname\> | See [HDFS Quotas Guide](../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands) for the detail. |
 | `-setStoragePolicy` \<path\> \<policyName\> | Set a storage policy to a file or a directory. |
 | `-getStoragePolicy` \<path\> | Get the storage policy of a file or a directory. |
 | `-finalizeUpgrade` | Finalize upgrade of HDFS. Datanodes delete their previous version working directories, followed by Namenode doing the same. This completes the upgrade process. |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fa9e0e6/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md
index a1bcd78..7c15bb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md
@@ -19,6 +19,7 @@ HDFS Quotas Guide
     * [Overview](#Overview)
     * [Name Quotas](#Name_Quotas)
     * [Space Quotas](#Space_Quotas)
+    * [Storage Type Quotas](#Storage_Type_Quotas)
     * [Administrative Commands](#Administrative_Commands)
     * [Reporting Command](#Reporting_Command)
 
@@ -41,6 +42,17 @@ The space quota is a hard limit on the number of bytes used by files in the tree
 
 Quotas are persistent with the fsimage. When starting, if the fsimage is immediately in violation of a quota (perhaps the fsimage was surreptitiously modified), a warning is printed for each of such violations. Setting or removing a quota creates a journal entry.
 
+Storage Type Quotas
+------------------
+
+The storage type quota is a hard limit on the usage of specific storage type (SSD, DISK, ARCHIVE) by files in the tree rooted at the directory. It works similar to storage space quota in many aspects but offers fine-grain control over the cluster storage space usage. To set storage type quota on a directory, storage policies must be configured on the directory in order to allow files to be stored in different storage types according to the storage policy. See the [HDFS Storage Policy Documentation](./ArchivalStorage.html) for more information.
+
+The storage type quota can be combined with the space quotas and name quotas to efficiently manage the cluster storage usage. For example,
+
+1. For directories with storage policy configured, administrator should set storage type quotas for resource constraint storage types such as SSD and leave quotas for other storage types and overall space quota with either less restrictive values or default unlimited. HDFS will deduct quotas from both target storage type based on storage policy and the overall space quota.
+2. For directories without storage policy configured, administrator should not configure storage type quota. Storage type quota can be configured even though the specific storage type is unavailable (or available but not configured properly with storage type information). However, overall space quota is recommended in this case as the storage type information is either unavailable or inaccurate for storage type quota enforcement.
+3. Storage type quota on DISK are of limited use except when DISK is not the dominant storage medium. (e.g. cluster with predominantly ARCHIVE storage).
+
 Administrative Commands
 -----------------------
 
@@ -77,17 +89,40 @@ Quotas are managed by a set of commands available only to the administrator.
     directory, with faults reported if the directory does not exist or
     it is a file. It is not a fault if the directory has no quota.
 
+
+*   `hdfs dfsadmin -setSpaceQuota <N> -storageType <storagetype> <directory>...<directory>`
+
+    Set the storage type quota to be N bytes of storage type specified for each directory.
+    This is a hard limit on total storage type usage for all the files under the directory tree.
+    The storage type quota usage reflects the intended usage based on storage policy. For example,
+    one GB of data with replication of 3 and ALL_SSD storage policy consumes 3GB of SSD quota. N
+    can also be specified with a binary prefix for convenience, for e.g. 50g for 50
+    gigabytes and 2t for 2 terabytes etc. Best effort for each
+    directory, with faults reported if N is neither zero nor a positive
+    integer, the directory does not exist or it is a file, or the
+    directory would immediately exceed the new quota.
+
+*   `hdfs dfsadmin -clrSpaceQuota -storageType <storagetype> <directory>...<directory>`
+
+    Remove storage type quota specified for each directory. Best effort
+    for each directory, with faults reported if the directory does not exist or
+    it is a file. It is not a fault if the directory has no storage type quota on
+    for storage type specified.
+
 Reporting Command
 -----------------
 
 An an extension to the count command of the HDFS shell reports quota values and the current count of names and bytes in use.
 
-*   `hadoop fs -count -q [-h] [-v] <directory>...<directory>`
+*   `hadoop fs -count -q [-h] [-v] [-t [comma-separated list of storagetypes]] <directory>...<directory>`
 
     With the -q option, also report the name quota value set for each
     directory, the available name quota remaining, the space quota
     value set, and the available space quota remaining. If the
     directory does not have a quota set, the reported values are `none`
     and `inf`. The -h option shows sizes in human readable format.
-    The -v option displays a header line.
-
+    The -v option displays a header line. The -t option displays the per
+    storage type quota set and the available quota remaining for each directory.
+    If specific storage types are given after -t option, only quota and remaining
+    quota of the types specified will be displayed. Otherwise, quota and
+    remaining quota of all storage types that support quota will be displayed.
\ No newline at end of file


[12/20] hadoop git commit: HDFS-7742. Favoring decommissioning node for replication can cause a block to stay underreplicated for long periods. Contributed by Nathan Roberts.

Posted by zj...@apache.org.
HDFS-7742. Favoring decommissioning node for replication can cause a block to stay
underreplicated for long periods. Contributed by Nathan Roberts.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/040fd169
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/040fd169
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/040fd169

Branch: refs/heads/YARN-2928
Commit: 040fd169007acb6c310f317e63a50306b8b4cb49
Parents: 1bfe248
Author: Kihwal Lee <ki...@apache.org>
Authored: Mon Mar 30 10:10:11 2015 -0500
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:48 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../server/blockmanagement/BlockManager.java    | 10 ++---
 .../blockmanagement/TestBlockManager.java       | 42 ++++++++++++++++++++
 3 files changed, 50 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/040fd169/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f437ad8..811ee75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -829,6 +829,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7410. Support CreateFlags with append() to support hsync() for
     appending streams (Vinayakumar B via Colin P. McCabe)
 
+    HDFS-7742. Favoring decommissioning node for replication can cause a block 
+    to stay underreplicated for long periods (Nathan Roberts via kihwal)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/040fd169/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ad40782..f6e15a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1637,7 +1637,8 @@ public class BlockManager {
       // If so, do not select the node as src node
       if ((nodesCorrupt != null) && nodesCorrupt.contains(node))
         continue;
-      if(priority != UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY
+      if(priority != UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY 
+          && !node.isDecommissionInProgress() 
           && node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams)
       {
         continue; // already reached replication limit
@@ -1652,13 +1653,12 @@ public class BlockManager {
       // never use already decommissioned nodes
       if(node.isDecommissioned())
         continue;
-      // we prefer nodes that are in DECOMMISSION_INPROGRESS state
-      if(node.isDecommissionInProgress() || srcNode == null) {
+
+      // We got this far, current node is a reasonable choice
+      if (srcNode == null) {
         srcNode = node;
         continue;
       }
-      if(srcNode.isDecommissionInProgress())
-        continue;
       // switch to a different node randomly
       // this to prevent from deterministically selecting the same node even
       // if the node failed to replicate the block on previous iterations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/040fd169/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 707c780..91abb2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -535,6 +535,48 @@ public class TestBlockManager {
   }
 
   @Test
+  public void testFavorDecomUntilHardLimit() throws Exception {
+    bm.maxReplicationStreams = 0;
+    bm.replicationStreamsHardLimit = 1;
+
+    long blockId = 42;         // arbitrary
+    Block aBlock = new Block(blockId, 0, 0);
+    List<DatanodeDescriptor> origNodes = getNodes(0, 1);
+    // Add the block to the first node.
+    addBlockOnNodes(blockId,origNodes.subList(0,1));
+    origNodes.get(0).startDecommission();
+
+    List<DatanodeDescriptor> cntNodes = new LinkedList<DatanodeDescriptor>();
+    List<DatanodeStorageInfo> liveNodes = new LinkedList<DatanodeStorageInfo>();
+
+    assertNotNull("Chooses decommissioning source node for a normal replication"
+        + " if all available source nodes have reached their replication"
+        + " limits below the hard limit.",
+        bm.chooseSourceDatanode(
+            aBlock,
+            cntNodes,
+            liveNodes,
+            new NumberReplicas(),
+            UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED));
+
+
+    // Increase the replication count to test replication count > hard limit
+    DatanodeStorageInfo targets[] = { origNodes.get(1).getStorageInfos()[0] };
+    origNodes.get(0).addBlockToBeReplicated(aBlock, targets);
+
+    assertNull("Does not choose a source decommissioning node for a normal"
+        + " replication when all available nodes exceed the hard limit.",
+        bm.chooseSourceDatanode(
+            aBlock,
+            cntNodes,
+            liveNodes,
+            new NumberReplicas(),
+            UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED));
+  }
+
+
+
+  @Test
   public void testSafeModeIBR() throws Exception {
     DatanodeDescriptor node = spy(nodes.get(0));
     DatanodeStorageInfo ds = node.getStorageInfos()[0];


[09/20] hadoop git commit: HDFS-6263. Remove DRFA.MaxBackupIndex config from log4j.properties. Contributed by Abhiraj Butala.

Posted by zj...@apache.org.
HDFS-6263. Remove DRFA.MaxBackupIndex config from log4j.properties. Contributed by Abhiraj Butala.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e700a4b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e700a4b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e700a4b9

Branch: refs/heads/YARN-2928
Commit: e700a4b9d0d008643241496200efd3746609350c
Parents: 7d4d615
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Mar 30 10:52:15 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:47 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 3 +++
 .../src/contrib/bkjournal/src/test/resources/log4j.properties     | 2 --
 2 files changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e700a4b9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 496db06..e026f85 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -347,6 +347,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8004. Use KeyProviderCryptoExtension#warmUpEncryptedKeys when creating
     an encryption zone. (awang via asuresh)
 
+    HDFS-6263. Remove DRFA.MaxBackupIndex config from log4j.properties.
+    (Abhiraj Butala via aajisaka)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e700a4b9/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties
index 8a6b217..f66c84b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties
@@ -53,8 +53,6 @@ log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p - [%t:%C{
 
 # Max log file size of 10MB
 log4j.appender.ROLLINGFILE.MaxFileSize=10MB
-# uncomment the next line to limit number of backup files
-#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
 
 log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
 log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n