You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aw...@apache.org on 2015/07/28 19:46:41 UTC
[01/10] hadoop git commit: YARN-3852. Add docker container support to
container-executor. Contributed by Abin Shahab.
Repository: hadoop
Updated Branches:
refs/heads/HADOOP-12111 4d4f288d3 -> 03335bb4d
YARN-3852. Add docker container support to container-executor. Contributed by Abin Shahab.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f36835ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f36835ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f36835ff
Branch: refs/heads/HADOOP-12111
Commit: f36835ff9b878fa20fe58a30f9d1e8c47702d6d2
Parents: 2196e39
Author: Varun Vasudev <vv...@apache.org>
Authored: Mon Jul 27 10:12:30 2015 -0700
Committer: Varun Vasudev <vv...@apache.org>
Committed: Mon Jul 27 10:14:51 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
.../container-executor/impl/configuration.c | 17 +-
.../container-executor/impl/configuration.h | 2 +
.../impl/container-executor.c | 417 ++++++++++++++++---
.../impl/container-executor.h | 25 +-
.../main/native/container-executor/impl/main.c | 97 ++++-
6 files changed, 480 insertions(+), 81 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f36835ff/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3b7d8a8..4e54aea 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -150,6 +150,9 @@ Release 2.8.0 - UNRELEASED
YARN-3656. LowCost: A Cost-Based Placement Agent for YARN Reservations.
(Jonathan Yaniv and Ishai Menache via curino)
+ YARN-3852. Add docker container support to container-executor
+ (Abin Shahab via vvasudev)
+
IMPROVEMENTS
YARN-644. Basic null check is not performed on passed in arguments before
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f36835ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
index eaa1f19..2825367 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
@@ -291,27 +291,23 @@ char ** get_values(const char * key) {
return extract_values(value);
}
-/**
- * Extracts array of values from the '%' separated list of values.
- */
-char ** extract_values(char *value) {
+char ** extract_values_delim(char *value, const char *delim) {
char ** toPass = NULL;
char *tempTok = NULL;
char *tempstr = NULL;
int size = 0;
int toPassSize = MAX_SIZE;
-
//first allocate any array of 10
if(value != NULL) {
toPass = (char **) malloc(sizeof(char *) * toPassSize);
- tempTok = strtok_r((char *)value, "%", &tempstr);
+ tempTok = strtok_r((char *)value, delim, &tempstr);
while (tempTok != NULL) {
toPass[size++] = tempTok;
if(size == toPassSize) {
toPassSize += MAX_SIZE;
toPass = (char **) realloc(toPass,(sizeof(char *) * toPassSize));
}
- tempTok = strtok_r(NULL, "%", &tempstr);
+ tempTok = strtok_r(NULL, delim, &tempstr);
}
}
if (toPass != NULL) {
@@ -320,6 +316,13 @@ char ** extract_values(char *value) {
return toPass;
}
+/**
+ * Extracts array of values from the '%' separated list of values.
+ */
+char ** extract_values(char *value) {
+ extract_values_delim(value, "%");
+}
+
// free an entry set of values
void free_values(char** values) {
if (*values != NULL) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f36835ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
index 133e67b..390a5b5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
@@ -46,6 +46,8 @@ char ** get_values(const char* key);
// Extracts array of values from the comma separated list of values.
char ** extract_values(char *value);
+char ** extract_values_delim(char *value, const char *delim);
+
// free the memory returned by get_values
void free_values(char** values);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f36835ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 0663166..ffd7a2f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -207,16 +207,20 @@ static int write_pid_to_file_as_nm(const char* pid_file, pid_t pid) {
uid_t user = geteuid();
gid_t group = getegid();
if (change_effective_user(nm_uid, nm_gid) != 0) {
+ fprintf(ERRORFILE, "Could not change to effective users %d, %d\n", nm_uid, nm_gid);
+ fflush(ERRORFILE);
return -1;
}
char *temp_pid_file = concatenate("%s.tmp", "pid_file_path", 1, pid_file);
-
+ fprintf(LOGFILE, "Writing to tmp file %s\n", temp_pid_file);
+ fflush(LOGFILE);
// create with 700
int pid_fd = open(temp_pid_file, O_WRONLY|O_CREAT|O_EXCL, S_IRWXU);
if (pid_fd == -1) {
fprintf(LOGFILE, "Can't open file %s as node manager - %s\n", temp_pid_file,
strerror(errno));
+ fflush(LOGFILE);
free(temp_pid_file);
return -1;
}
@@ -229,6 +233,7 @@ static int write_pid_to_file_as_nm(const char* pid_file, pid_t pid) {
if (written == -1) {
fprintf(LOGFILE, "Failed to write pid to file %s as node manager - %s\n",
temp_pid_file, strerror(errno));
+ fflush(LOGFILE);
free(temp_pid_file);
return -1;
}
@@ -238,6 +243,7 @@ static int write_pid_to_file_as_nm(const char* pid_file, pid_t pid) {
if (rename(temp_pid_file, pid_file)) {
fprintf(LOGFILE, "Can't move pid file from %s to %s as node manager - %s\n",
temp_pid_file, pid_file, strerror(errno));
+ fflush(LOGFILE);
unlink(temp_pid_file);
free(temp_pid_file);
return -1;
@@ -848,12 +854,15 @@ static int copy_file(int input, const char* in_filename,
const char* out_filename, mode_t perm) {
const int buffer_size = 128*1024;
char buffer[buffer_size];
+
int out_fd = open(out_filename, O_WRONLY|O_CREAT|O_EXCL|O_NOFOLLOW, perm);
if (out_fd == -1) {
fprintf(LOGFILE, "Can't open %s for output - %s\n", out_filename,
strerror(errno));
+ fflush(LOGFILE);
return -1;
}
+
ssize_t len = read(input, buffer, buffer_size);
while (len > 0) {
ssize_t pos = 0;
@@ -1026,42 +1035,350 @@ int initialize_app(const char *user, const char *app_id,
return -1;
}
-int launch_container_as_user(const char *user, const char *app_id,
- const char *container_id, const char *work_dir,
- const char *script_name, const char *cred_file,
- const char* pid_file, char* const* local_dirs,
- char* const* log_dirs, const char *resources_key,
- char* const* resources_values) {
+char* parse_docker_command_file(const char* command_file) {
+ int i = 0;
+ size_t len = 0;
+ char *line = NULL;
+ ssize_t read;
+ FILE *stream;
+ stream = fopen(command_file, "r");
+ if (stream == NULL) {
+ fprintf(ERRORFILE, "Cannot open file %s - %s",
+ command_file, strerror(errno));
+ fflush(ERRORFILE);
+ exit(ERROR_OPENING_FILE);
+ }
+ if ((read = getline(&line, &len, stream)) == -1) {
+ fprintf(ERRORFILE, "Error reading command_file %s\n", command_file);
+ fflush(ERRORFILE);
+ exit(ERROR_READING_FILE);
+ }
+ fclose(stream);
+
+ return line;
+}
+
+int run_docker(const char *command_file) {
+ char* docker_command = parse_docker_command_file(command_file);
+ char* docker_binary = get_value(DOCKER_BINARY_KEY);
+ char* docker_command_with_binary = calloc(sizeof(char), PATH_MAX);
+ sprintf(docker_command_with_binary, "%s %s", docker_binary, docker_command);
+ char **args = extract_values_delim(docker_command_with_binary, " ");
+
int exit_code = -1;
- char *script_file_dest = NULL;
- char *cred_file_dest = NULL;
- char *exit_code_file = NULL;
+ if (execvp(docker_binary, args) != 0) {
+ fprintf(ERRORFILE, "Couldn't execute the container launch with args %s - %s",
+ docker_binary, strerror(errno));
+ fflush(LOGFILE);
+ fflush(ERRORFILE);
+ free(docker_binary);
+ free(args);
+ free(docker_command_with_binary);
+ free(docker_command);
+ exit_code = DOCKER_RUN_FAILED;
+ }
+ exit_code = 0;
+ return exit_code;
+}
- script_file_dest = get_container_launcher_file(work_dir);
+int create_script_paths(const char *work_dir,
+ const char *script_name, const char *cred_file,
+ char** script_file_dest, char** cred_file_dest,
+ int* container_file_source, int* cred_file_source ) {
+ int exit_code = -1;
+
+ *script_file_dest = get_container_launcher_file(work_dir);
if (script_file_dest == NULL) {
exit_code = OUT_OF_MEMORY;
- goto cleanup;
+ fprintf(ERRORFILE, "Could not create script_file_dest");
+ fflush(ERRORFILE);
+ return exit_code;
}
- cred_file_dest = get_container_credentials_file(work_dir);
+
+ *cred_file_dest = get_container_credentials_file(work_dir);
if (NULL == cred_file_dest) {
exit_code = OUT_OF_MEMORY;
+ fprintf(ERRORFILE, "Could not create cred_file_dest");
+ fflush(ERRORFILE);
+ return exit_code;
+ }
+ // open launch script
+ *container_file_source = open_file_as_nm(script_name);
+ if (*container_file_source == -1) {
+ exit_code = INVALID_NM_ROOT_DIRS;
+ fprintf(ERRORFILE, "Could not open container file");
+ fflush(ERRORFILE);
+ return exit_code;
+ }
+ // open credentials
+ *cred_file_source = open_file_as_nm(cred_file);
+ if (*cred_file_source == -1) {
+ exit_code = INVALID_ARGUMENT_NUMBER;
+ fprintf(ERRORFILE, "Could not open cred file");
+ fflush(ERRORFILE);
+ return exit_code;
+ }
+
+ exit_code = 0;
+ return exit_code;
+}
+
+int create_local_dirs(const char * user, const char *app_id,
+ const char *container_id, const char *work_dir,
+ const char *script_name, const char *cred_file,
+ char* const* local_dirs,
+ char* const* log_dirs, int effective_user,
+ char* script_file_dest, char* cred_file_dest,
+ int container_file_source, int cred_file_source) {
+ int exit_code = -1;
+ // create the user directory on all disks
+ int result = initialize_user(user, local_dirs);
+ if (result != 0) {
+ fprintf(ERRORFILE, "Could not create user dir");
+ fflush(ERRORFILE);
+ return result;
+ }
+
+ // initializing log dirs
+ int log_create_result = create_log_dirs(app_id, log_dirs);
+ if (log_create_result != 0) {
+ fprintf(ERRORFILE, "Could not create log dirs");
+ fflush(ERRORFILE);
+ return log_create_result;
+ }
+ if (effective_user == 1) {
+ if (change_effective_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+ fprintf(ERRORFILE, "Could not change to effective users %d, %d\n", user_detail->pw_uid, user_detail->pw_gid);
+ fflush(ERRORFILE);
+ goto cleanup;
+ }
+ } else {
+ // give up root privs
+ if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+ exit_code = SETUID_OPER_FAILED;
+ goto cleanup;
+ }
+ }
+ // Create container specific directories as user. If there are no resources
+ // to localize for this container, app-directories and log-directories are
+ // also created automatically as part of this call.
+ if (create_container_directories(user, app_id, container_id, local_dirs,
+ log_dirs, work_dir) != 0) {
+ fprintf(ERRORFILE, "Could not create container dirs");
+ fflush(ERRORFILE);
+ goto cleanup;
+ }
+
+ // 700
+ if (copy_file(container_file_source, script_name, script_file_dest,S_IRWXU) != 0) {
+ fprintf(ERRORFILE, "Could not create copy file %d %s\n", container_file_source, script_file_dest);
+ fflush(ERRORFILE);
+ exit_code = INVALID_COMMAND_PROVIDED;
+ goto cleanup;
+ }
+
+ // 600
+ if (copy_file(cred_file_source, cred_file, cred_file_dest,
+ S_IRUSR | S_IWUSR) != 0) {
+ exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+ fprintf(ERRORFILE, "Could not copy file");
+ fflush(ERRORFILE);
+ goto cleanup;
+ }
+
+ if (chdir(work_dir) != 0) {
+ fprintf(ERRORFILE, "Can't change directory to %s -%s\n", work_dir,
+ strerror(errno));
+ fflush(ERRORFILE);
+ goto cleanup;
+ }
+ exit_code = 0;
+ cleanup:
+ return exit_code;
+}
+
+int launch_docker_container_as_user(const char * user, const char *app_id,
+ const char *container_id, const char *work_dir,
+ const char *script_name, const char *cred_file,
+ const char *pid_file, char* const* local_dirs,
+ char* const* log_dirs, const char *command_file,
+ const char *resources_key,
+ char* const* resources_values) {
+ int exit_code = -1;
+ char *script_file_dest = NULL;
+ char *cred_file_dest = NULL;
+ char *exit_code_file = NULL;
+ char *docker_command_with_binary[PATH_MAX];
+ char *docker_wait_command[PATH_MAX];
+ char *docker_inspect_command[PATH_MAX];
+ char *docker_rm_command[PATH_MAX];
+ int container_file_source =-1;
+ int cred_file_source = -1;
+
+ char *docker_command = parse_docker_command_file(command_file);
+ char *docker_binary = get_value(DOCKER_BINARY_KEY);
+ if (docker_binary == NULL) {
+ docker_binary = "docker";
+ }
+ exit_code = create_script_paths(
+ work_dir, script_name, cred_file, &script_file_dest, &cred_file_dest,
+ &container_file_source, &cred_file_source);
+ if (exit_code != 0) {
+ fprintf(ERRORFILE, "Could not create script path\n");
+ fflush(ERRORFILE);
+ goto cleanup;
+ }
+ uid_t user_uid = geteuid();
+ gid_t user_gid = getegid();
+
+ exit_code = create_local_dirs(user, app_id, container_id,
+ work_dir, script_name, cred_file, local_dirs, log_dirs,
+ 1, script_file_dest, cred_file_dest,
+ container_file_source, cred_file_source);
+ if (exit_code != 0) {
+ fprintf(ERRORFILE, "Could not create local files and directories %d %d\n", container_file_source, cred_file_source);
+ fflush(ERRORFILE);
goto cleanup;
}
+
exit_code_file = get_exit_code_file(pid_file);
if (NULL == exit_code_file) {
exit_code = OUT_OF_MEMORY;
+ fprintf(ERRORFILE, "Container out of memory");
+ fflush(ERRORFILE);
goto cleanup;
}
- // open launch script
- int container_file_source = open_file_as_nm(script_name);
- if (container_file_source == -1) {
+ if (change_effective_user(0, user_gid) != 0) {
+ fprintf(ERRORFILE, "Could not change to effective users %d, %d\n", 0, user_gid);
+ fflush(ERRORFILE);
goto cleanup;
}
- // open credentials
- int cred_file_source = open_file_as_nm(cred_file);
- if (cred_file_source == -1) {
+ sprintf(docker_command_with_binary, "%s %s", docker_binary, docker_command);
+
+ FILE* start_docker = popen(docker_command_with_binary, "r");
+ if (pclose (start_docker) != 0)
+ {
+ fprintf (ERRORFILE,
+ "Could not invoke docker %s.\n", docker_command_with_binary);
+ fflush(ERRORFILE);
+ exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+ goto cleanup;
+ }
+
+ sprintf(docker_inspect_command,
+ "%s inspect --format {{.State.Pid}} %s",
+ docker_binary, container_id);
+
+ FILE* inspect_docker = popen(docker_inspect_command, "r");
+ int pid = 0;
+ fscanf (inspect_docker, "%d", &pid);
+ if (pclose (inspect_docker) != 0)
+ {
+ fprintf (ERRORFILE,
+ "Could not inspect docker %s.\n", docker_inspect_command);
+ fflush(ERRORFILE);
+ exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+ goto cleanup;
+ }
+
+ if (pid != 0) {
+ // cgroups-based resource enforcement
+ if (resources_key != NULL && ! strcmp(resources_key, "cgroups")) {
+ // write pid to cgroups
+ char* const* cgroup_ptr;
+ for (cgroup_ptr = resources_values; cgroup_ptr != NULL &&
+ *cgroup_ptr != NULL; ++cgroup_ptr) {
+ if (strcmp(*cgroup_ptr, "none") != 0 &&
+ write_pid_to_cgroup_as_root(*cgroup_ptr, pid) != 0) {
+ exit_code = WRITE_CGROUP_FAILED;
+ goto cleanup;
+ }
+ }
+ }
+ // write pid to pidfile
+ if (pid_file == NULL
+ || write_pid_to_file_as_nm(pid_file, (pid_t)pid) != 0) {
+ exit_code = WRITE_PIDFILE_FAILED;
+ fprintf(ERRORFILE, "Could not write pid to %s", pid_file);
+ fflush(ERRORFILE);
+ goto cleanup;
+ }
+
+ sprintf(docker_wait_command,
+ "%s wait %s", docker_binary, container_id);
+
+ FILE* wait_docker = popen(docker_wait_command, "r");
+ fscanf (wait_docker, "%d", &exit_code);
+ if (pclose (wait_docker) != 0) {
+ fprintf (ERRORFILE,
+ "Could not attach to docker is container dead? %s.\n", docker_wait_command);
+ fflush(ERRORFILE);
+ }
+ }
+
+ sprintf(docker_rm_command,
+ "%s rm %s", docker_binary, container_id);
+ FILE* rm_docker = popen(docker_rm_command, "w");
+ if (pclose (rm_docker) != 0)
+ {
+ fprintf (ERRORFILE,
+ "Could not remove container %s.\n", docker_rm_command);
+ fflush(ERRORFILE);
+ exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+ goto cleanup;
+ }
+
+cleanup:
+ if (exit_code_file != NULL && write_exit_code_file(exit_code_file, exit_code) < 0) {
+ fprintf (ERRORFILE,
+ "Could not write exit code to file %s.\n", exit_code_file);
+ fflush(ERRORFILE);
+ }
+#if HAVE_FCLOSEALL
+ fcloseall();
+#else
+ // only those fds are opened assuming no bug
+ fclose(LOGFILE);
+ fclose(ERRORFILE);
+ fclose(stdin);
+ fclose(stdout);
+ fclose(stderr);
+#endif
+ free(exit_code_file);
+ free(script_file_dest);
+ free(cred_file_dest);
+ return exit_code;
+}
+
+
+int launch_container_as_user(const char *user, const char *app_id,
+ const char *container_id, const char *work_dir,
+ const char *script_name, const char *cred_file,
+ const char* pid_file, char* const* local_dirs,
+ char* const* log_dirs, const char *resources_key,
+ char* const* resources_values) {
+ int exit_code = -1;
+ char *script_file_dest = NULL;
+ char *cred_file_dest = NULL;
+ char *exit_code_file = NULL;
+
+
+ exit_code_file = get_exit_code_file(pid_file);
+ if (NULL == exit_code_file) {
+ exit_code = OUT_OF_MEMORY;
+ goto cleanup;
+ }
+
+ int container_file_source =-1;
+ int cred_file_source = -1;
+ exit_code = create_script_paths(
+ work_dir, script_name, cred_file, &script_file_dest, &cred_file_dest,
+ &container_file_source, &cred_file_source);
+ if (exit_code != 0) {
+ fprintf(ERRORFILE, "Could not create local files and directories");
+ fflush(ERRORFILE);
goto cleanup;
}
@@ -1088,7 +1405,6 @@ int launch_container_as_user(const char *user, const char *app_id,
// cgroups-based resource enforcement
if (resources_key != NULL && ! strcmp(resources_key, "cgroups")) {
-
// write pid to cgroups
char* const* cgroup_ptr;
for (cgroup_ptr = resources_values; cgroup_ptr != NULL &&
@@ -1101,42 +1417,13 @@ int launch_container_as_user(const char *user, const char *app_id,
}
}
- // create the user directory on all disks
- int result = initialize_user(user, local_dirs);
- if (result != 0) {
- return result;
- }
-
- // initializing log dirs
- int log_create_result = create_log_dirs(app_id, log_dirs);
- if (log_create_result != 0) {
- return log_create_result;
- }
-
- // give up root privs
- if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
- exit_code = SETUID_OPER_FAILED;
- goto cleanup;
- }
-
- // Create container specific directories as user. If there are no resources
- // to localize for this container, app-directories and log-directories are
- // also created automatically as part of this call.
- if (create_container_directories(user, app_id, container_id, local_dirs,
- log_dirs, work_dir) != 0) {
- fprintf(LOGFILE, "Could not create container dirs");
- goto cleanup;
- }
-
-
- // 700
- if (copy_file(container_file_source, script_name, script_file_dest,S_IRWXU) != 0) {
- goto cleanup;
- }
-
- // 600
- if (copy_file(cred_file_source, cred_file, cred_file_dest,
- S_IRUSR | S_IWUSR) != 0) {
+ exit_code = create_local_dirs(user, app_id, container_id,
+ work_dir, script_name, cred_file, local_dirs, log_dirs,
+ 0, script_file_dest, cred_file_dest,
+ container_file_source, cred_file_source);
+ if (exit_code != 0) {
+ fprintf(ERRORFILE, "Could not create local files and directories");
+ fflush(ERRORFILE);
goto cleanup;
}
@@ -1151,24 +1438,20 @@ int launch_container_as_user(const char *user, const char *app_id,
fclose(stderr);
#endif
umask(0027);
- if (chdir(work_dir) != 0) {
- fprintf(LOGFILE, "Can't change directory to %s -%s\n", work_dir,
- strerror(errno));
- goto cleanup;
- }
+
if (execlp(script_file_dest, script_file_dest, NULL) != 0) {
- fprintf(LOGFILE, "Couldn't execute the container launch file %s - %s",
+ fprintf(LOGFILE, "Couldn't execute the container launch file %s - %s",
script_file_dest, strerror(errno));
exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
goto cleanup;
}
exit_code = 0;
- cleanup:
- free(exit_code_file);
- free(script_file_dest);
- free(cred_file_dest);
- return exit_code;
+ cleanup:
+ free(exit_code_file);
+ free(script_file_dest);
+ free(cred_file_dest);
+ return exit_code;
}
int signal_container_as_user(const char *user, int pid, int sig) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f36835ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
index b530f15..57327f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
@@ -25,6 +25,7 @@ enum command {
LAUNCH_CONTAINER = 1,
SIGNAL_CONTAINER = 2,
DELETE_AS_USER = 3,
+ LAUNCH_DOCKER_CONTAINER = 4
};
enum errorcodes {
@@ -55,7 +56,10 @@ enum errorcodes {
SETSID_OPER_FAILED = 25,
WRITE_PIDFILE_FAILED = 26,
WRITE_CGROUP_FAILED = 27,
- TRAFFIC_CONTROL_EXECUTION_FAILED = 28
+ TRAFFIC_CONTROL_EXECUTION_FAILED = 28,
+ DOCKER_RUN_FAILED=29,
+ ERROR_OPENING_FILE = 30,
+ ERROR_READING_FILE = 31
};
enum operations {
@@ -67,7 +71,9 @@ enum operations {
RUN_AS_USER_INITIALIZE_CONTAINER = 6,
RUN_AS_USER_LAUNCH_CONTAINER = 7,
RUN_AS_USER_SIGNAL_CONTAINER = 8,
- RUN_AS_USER_DELETE = 9
+ RUN_AS_USER_DELETE = 9,
+ RUN_AS_USER_LAUNCH_DOCKER_CONTAINER = 10,
+ RUN_DOCKER = 11
};
#define NM_GROUP_KEY "yarn.nodemanager.linux-container-executor.group"
@@ -79,6 +85,7 @@ enum operations {
#define MIN_USERID_KEY "min.user.id"
#define BANNED_USERS_KEY "banned.users"
#define ALLOWED_SYSTEM_USERS_KEY "allowed.system.users"
+#define DOCKER_BINARY_KEY "docker.binary"
#define TMP_DIR "tmp"
extern struct passwd *user_detail;
@@ -109,6 +116,14 @@ int initialize_app(const char *user, const char *app_id,
const char *credentials, char* const* local_dirs,
char* const* log_dirs, char* const* args);
+int launch_docker_container_as_user(const char * user, const char *app_id,
+ const char *container_id, const char *work_dir,
+ const char *script_name, const char *cred_file,
+ const char *pid_file, char* const* local_dirs,
+ char* const* log_dirs,
+ const char *command_file,const char *resources_key,
+ char* const* resources_values);
+
/*
* Function used to launch a container as the provided user. It does the following :
* 1) Creates container work dir and log dir to be accessible by the child
@@ -241,3 +256,9 @@ int traffic_control_read_state(char *command_file);
* calling process.
*/
int traffic_control_read_stats(char *command_file);
+
+
+/**
+ * Run a docker command passing the command file as an argument
+ */
+int run_docker(const char *command_file);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f36835ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index 63fbfe4..ab45c7e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -49,16 +49,19 @@ static void display_usage(FILE *stream) {
" container-executor --tc-modify-state <command-file>\n" \
" container-executor --tc-read-state <command-file>\n" \
" container-executor --tc-read-stats <command-file>\n" \
+ " container-executor --run-docker <command-file>\n" \
" container-executor <user> <yarn-user> <command> <command-args>\n" \
" where command and command-args: \n" \
" initialize container: %2d appid tokens nm-local-dirs nm-log-dirs cmd app...\n" \
" launch container: %2d appid containerid workdir container-script " \
"tokens pidfile nm-local-dirs nm-log-dirs resources optional-tc-command-file\n" \
+ " launch docker container: %2d appid containerid workdir container-script " \
+ "tokens pidfile nm-local-dirs nm-log-dirs docker-command-file resources optional-tc-command-file\n" \
" signal container: %2d container-pid signal\n" \
" delete as user: %2d relative-path\n" ;
- fprintf(stream, usage_template, INITIALIZE_CONTAINER, LAUNCH_CONTAINER,
+ fprintf(stream, usage_template, INITIALIZE_CONTAINER, LAUNCH_CONTAINER, LAUNCH_DOCKER_CONTAINER,
SIGNAL_CONTAINER, DELETE_AS_USER);
}
@@ -160,6 +163,7 @@ static struct {
const char *dir_to_be_deleted;
int container_pid;
int signal;
+ const char *docker_command_file;
} cmd_input;
static int validate_run_as_user_commands(int argc, char **argv, int *operation);
@@ -227,6 +231,16 @@ static int validate_arguments(int argc, char **argv , int *operation) {
return 0;
}
+ if (strcmp("--run-docker", argv[1]) == 0) {
+ if (argc != 3) {
+ display_usage(stdout);
+ return INVALID_ARGUMENT_NUMBER;
+ }
+ optind++;
+ cmd_input.docker_command_file = argv[optind++];
+ *operation = RUN_DOCKER;
+ return 0;
+ }
/* Now we have to validate 'run as user' operations that don't use
a 'long option' - we should fix this at some point. The validation/argument
parsing here is extensive enough that it done in a separate function */
@@ -252,7 +266,9 @@ static int validate_run_as_user_commands(int argc, char **argv, int *operation)
fprintf(LOGFILE, "main : run as user is %s\n", cmd_input.run_as_user_name);
fprintf(LOGFILE, "main : requested yarn user is %s\n", cmd_input.yarn_user_name);
fflush(LOGFILE);
-
+ char * resources = NULL;// key,value pair describing resources
+ char * resources_key = NULL;
+ char * resources_value = NULL;
switch (command) {
case INITIALIZE_CONTAINER:
if (argc < 9) {
@@ -268,6 +284,46 @@ static int validate_run_as_user_commands(int argc, char **argv, int *operation)
*operation = RUN_AS_USER_INITIALIZE_CONTAINER;
return 0;
+ case LAUNCH_DOCKER_CONTAINER:
+ //kill me now.
+ if (!(argc == 14 || argc == 15)) {
+ fprintf(ERRORFILE, "Wrong number of arguments (%d vs 14 or 15) for launch docker container\n",
+ argc);
+ fflush(ERRORFILE);
+ return INVALID_ARGUMENT_NUMBER;
+ }
+
+ cmd_input.app_id = argv[optind++];
+ cmd_input.container_id = argv[optind++];
+ cmd_input.current_dir = argv[optind++];
+ cmd_input.script_file = argv[optind++];
+ cmd_input.cred_file = argv[optind++];
+ cmd_input.pid_file = argv[optind++];
+ cmd_input.local_dirs = argv[optind++];// good local dirs as a comma separated list
+ cmd_input.log_dirs = argv[optind++];// good log dirs as a comma separated list
+ cmd_input.docker_command_file = argv[optind++];
+ resources = argv[optind++];// key,value pair describing resources
+ resources_key = malloc(strlen(resources));
+ resources_value = malloc(strlen(resources));
+ if (get_kv_key(resources, resources_key, strlen(resources)) < 0 ||
+ get_kv_value(resources, resources_value, strlen(resources)) < 0) {
+ fprintf(ERRORFILE, "Invalid arguments for cgroups resources: %s",
+ resources);
+ fflush(ERRORFILE);
+ free(resources_key);
+ free(resources_value);
+ return INVALID_ARGUMENT_NUMBER;
+ }
+ //network isolation through tc
+ if (argc == 15) {
+ cmd_input.traffic_control_command_file = argv[optind++];
+ }
+
+ cmd_input.resources_key = resources_key;
+ cmd_input.resources_value = resources_value;
+ cmd_input.resources_values = extract_values(resources_value);
+ *operation = RUN_AS_USER_LAUNCH_DOCKER_CONTAINER;
+ return 0;
case LAUNCH_CONTAINER:
//kill me now.
@@ -286,9 +342,9 @@ static int validate_run_as_user_commands(int argc, char **argv, int *operation)
cmd_input.pid_file = argv[optind++];
cmd_input.local_dirs = argv[optind++];// good local dirs as a comma separated list
cmd_input.log_dirs = argv[optind++];// good log dirs as a comma separated list
- char * resources = argv[optind++];// key,value pair describing resources
- char * resources_key = malloc(strlen(resources));
- char * resources_value = malloc(strlen(resources));
+ resources = argv[optind++];// key,value pair describing resources
+ resources_key = malloc(strlen(resources));
+ resources_value = malloc(strlen(resources));
if (get_kv_key(resources, resources_key, strlen(resources)) < 0 ||
get_kv_value(resources, resources_value, strlen(resources)) < 0) {
@@ -385,6 +441,9 @@ int main(int argc, char **argv) {
case TRAFFIC_CONTROL_READ_STATS:
exit_code = traffic_control_read_stats(cmd_input.traffic_control_command_file);
break;
+ case RUN_DOCKER:
+ exit_code = run_docker(cmd_input.docker_command_file);
+ break;
case RUN_AS_USER_INITIALIZE_CONTAINER:
exit_code = set_user(cmd_input.run_as_user_name);
if (exit_code != 0) {
@@ -398,6 +457,34 @@ int main(int argc, char **argv) {
extract_values(cmd_input.log_dirs),
argv + optind);
break;
+ case RUN_AS_USER_LAUNCH_DOCKER_CONTAINER:
+ if (cmd_input.traffic_control_command_file != NULL) {
+ //apply tc rules before switching users and launching the container
+ exit_code = traffic_control_modify_state(cmd_input.traffic_control_command_file);
+ if( exit_code != 0) {
+ //failed to apply tc rules - break out before launching the container
+ break;
+ }
+ }
+
+ exit_code = set_user(cmd_input.run_as_user_name);
+ if (exit_code != 0) {
+ break;
+ }
+
+ exit_code = launch_docker_container_as_user(cmd_input.yarn_user_name,
+ cmd_input.app_id,
+ cmd_input.container_id,
+ cmd_input.current_dir,
+ cmd_input.script_file,
+ cmd_input.cred_file,
+ cmd_input.pid_file,
+ extract_values(cmd_input.local_dirs),
+ extract_values(cmd_input.log_dirs),
+ cmd_input.docker_command_file,
+ cmd_input.resources_key,
+ cmd_input.resources_values);
+ break;
case RUN_AS_USER_LAUNCH_CONTAINER:
if (cmd_input.traffic_control_command_file != NULL) {
//apply tc rules before switching users and launching the container
[07/10] hadoop git commit: YARN-3982. container-executor parsing of
container-executor.cfg broken in trunk and branch-2. Contributed by Varun
Vasudev
Posted by aw...@apache.org.
YARN-3982. container-executor parsing of container-executor.cfg broken
in trunk and branch-2. Contributed by Varun Vasudev
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1709342
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1709342
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1709342
Branch: refs/heads/HADOOP-12111
Commit: f17093421521efcbdc813f6f2b8411e45ecc7863
Parents: 030fcfa
Author: Xuan <xg...@apache.org>
Authored: Mon Jul 27 23:45:58 2015 -0700
Committer: Xuan <xg...@apache.org>
Committed: Mon Jul 27 23:45:58 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../container-executor/impl/configuration.c | 4 ++--
.../test/test-container-executor.c | 22 +++++++++++++-------
3 files changed, 20 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1709342/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4f8484a..b4666e8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -698,6 +698,9 @@ Release 2.8.0 - UNRELEASED
YARN-3846. RM Web UI queue filter is not working for sub queue.
(Mohammad Shahid Khan via jianhe)
+ YARN-3982. container-executor parsing of container-executor.cfg broken in
+ trunk and branch-2. (Varun Vasudev via xgong)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1709342/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
index 2825367..373dbfd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
@@ -284,11 +284,11 @@ char * get_value(const char* key) {
/**
* Function to return an array of values for a key.
- * Value delimiter is assumed to be a '%'.
+ * Value delimiter is assumed to be a ','.
*/
char ** get_values(const char * key) {
char *value = get_value(key);
- return extract_values(value);
+ return extract_values_delim(value, ",");
}
char ** extract_values_delim(char *value, const char *delim) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1709342/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 99bcf34..001a37d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -145,7 +145,7 @@ void check_pid_file(const char* pid_file, pid_t mypid) {
}
char myPidBuf[33];
- snprintf(myPidBuf, 33, "%" PRId64, (int64_t)mypid);
+ snprintf(myPidBuf, 33, "%" PRId64, (int64_t)(mypid + 1));
if (strncmp(pidBuf, myPidBuf, strlen(myPidBuf)) != 0) {
printf("FAIL: failed to find matching pid in pid file\n");
printf("FAIL: Expected pid %" PRId64 " : Got %.*s", (int64_t)mypid,
@@ -212,15 +212,15 @@ void test_get_app_log_dir() {
free(logdir);
}
-void test_check_user() {
+void test_check_user(int expectedFailure) {
printf("\nTesting test_check_user\n");
struct passwd *user = check_user(username);
- if (user == NULL) {
+ if (user == NULL && !expectedFailure) {
printf("FAIL: failed check for user %s\n", username);
exit(1);
}
free(user);
- if (check_user("lp") != NULL) {
+ if (check_user("lp") != NULL && !expectedFailure) {
printf("FAIL: failed check for system user lp\n");
exit(1);
}
@@ -228,7 +228,7 @@ void test_check_user() {
printf("FAIL: failed check for system user root\n");
exit(1);
}
- if (check_user("daemon") == NULL) {
+ if (check_user("daemon") == NULL && !expectedFailure) {
printf("FAIL: failed check for whitelisted system user daemon\n");
exit(1);
}
@@ -467,6 +467,7 @@ void test_signal_container() {
printf("FAIL: fork failed\n");
exit(1);
} else if (child == 0) {
+ printf("\nSwitching to user %d\n", user_detail->pw_uid);
if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
exit(1);
}
@@ -474,6 +475,10 @@ void test_signal_container() {
exit(0);
} else {
printf("Child container launched as %" PRId64 "\n", (int64_t)child);
+ printf("Signaling container as user %s\n", yarn_username);
+ // there's a race condition for child calling change_user and us
+ // calling signal_container_as_user, hence sleeping
+ sleep(3);
if (signal_container_as_user(yarn_username, child, SIGQUIT) != 0) {
exit(1);
}
@@ -805,7 +810,7 @@ int main(int argc, char **argv) {
printf("\nTesting delete_app()\n");
test_delete_app();
- test_check_user();
+ test_check_user(0);
// the tests that change user need to be run in a subshell, so that
// when they change user they don't give up our privs
@@ -832,7 +837,10 @@ int main(int argc, char **argv) {
read_config(TEST_ROOT "/test.cfg");
username = "bin";
- test_check_user();
+ test_check_user(1);
+
+ username = "sys";
+ test_check_user(1);
run("rm -fr " TEST_ROOT);
printf("\nFinished tests\n");
[06/10] hadoop git commit: HDFS-7858. Improve HA Namenode Failover
detection on the client. (asuresh)
Posted by aw...@apache.org.
HDFS-7858. Improve HA Namenode Failover detection on the client. (asuresh)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/030fcfa9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/030fcfa9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/030fcfa9
Branch: refs/heads/HADOOP-12111
Commit: 030fcfa99c345ad57625486eeabedebf2fd4411f
Parents: e21dde5
Author: Arun Suresh <as...@apache.org>
Authored: Mon Jul 27 23:02:03 2015 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Mon Jul 27 23:02:03 2015 -0700
----------------------------------------------------------------------
.../apache/hadoop/io/retry/MultiException.java | 49 +++
.../hadoop/io/retry/RetryInvocationHandler.java | 99 +++++-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
.../ha/ConfiguredFailoverProxyProvider.java | 52 ++-
.../ha/RequestHedgingProxyProvider.java | 186 ++++++++++
.../markdown/HDFSHighAvailabilityWithNFS.md | 9 +-
.../markdown/HDFSHighAvailabilityWithQJM.md | 10 +-
.../ha/TestRequestHedgingProxyProvider.java | 350 +++++++++++++++++++
8 files changed, 724 insertions(+), 33 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java
new file mode 100644
index 0000000..4963a2d
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.io.retry;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Holder class that clients can use to return multiple exceptions.
+ */
+public class MultiException extends IOException {
+
+ private final Map<String, Exception> exes;
+
+ public MultiException(Map<String, Exception> exes) {
+ this.exes = exes;
+ }
+
+ public Map<String, Exception> getExceptions() {
+ return exes;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("{");
+ for (Exception e : exes.values()) {
+ sb.append(e.toString()).append(", ");
+ }
+ sb.append("}");
+ return "MultiException[" + sb.toString() + "]";
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 543567e..9256356 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -23,6 +23,8 @@ import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
@@ -101,7 +103,7 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
Object ret = invokeMethod(method, args);
hasMadeASuccessfulCall = true;
return ret;
- } catch (Exception e) {
+ } catch (Exception ex) {
boolean isIdempotentOrAtMostOnce = proxyProvider.getInterface()
.getMethod(method.getName(), method.getParameterTypes())
.isAnnotationPresent(Idempotent.class);
@@ -110,15 +112,16 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
.getMethod(method.getName(), method.getParameterTypes())
.isAnnotationPresent(AtMostOnce.class);
}
- RetryAction action = policy.shouldRetry(e, retries++,
- invocationFailoverCount, isIdempotentOrAtMostOnce);
- if (action.action == RetryAction.RetryDecision.FAIL) {
- if (action.reason != null) {
+ List<RetryAction> actions = extractActions(policy, ex, retries++,
+ invocationFailoverCount, isIdempotentOrAtMostOnce);
+ RetryAction failAction = getFailAction(actions);
+ if (failAction != null) {
+ if (failAction.reason != null) {
LOG.warn("Exception while invoking " + currentProxy.proxy.getClass()
+ "." + method.getName() + " over " + currentProxy.proxyInfo
- + ". Not retrying because " + action.reason, e);
+ + ". Not retrying because " + failAction.reason, ex);
}
- throw e;
+ throw ex;
} else { // retry or failover
// avoid logging the failover if this is the first call on this
// proxy object, and we successfully achieve the failover without
@@ -126,8 +129,9 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
boolean worthLogging =
!(invocationFailoverCount == 0 && !hasMadeASuccessfulCall);
worthLogging |= LOG.isDebugEnabled();
- if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY &&
- worthLogging) {
+ RetryAction failOverAction = getFailOverAction(actions);
+ long delay = getDelayMillis(actions);
+ if (failOverAction != null && worthLogging) {
String msg = "Exception while invoking " + method.getName()
+ " of class " + currentProxy.proxy.getClass().getSimpleName()
+ " over " + currentProxy.proxyInfo;
@@ -135,22 +139,22 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
if (invocationFailoverCount > 0) {
msg += " after " + invocationFailoverCount + " fail over attempts";
}
- msg += ". Trying to fail over " + formatSleepMessage(action.delayMillis);
- LOG.info(msg, e);
+ msg += ". Trying to fail over " + formatSleepMessage(delay);
+ LOG.info(msg, ex);
} else {
if(LOG.isDebugEnabled()) {
LOG.debug("Exception while invoking " + method.getName()
+ " of class " + currentProxy.proxy.getClass().getSimpleName()
+ " over " + currentProxy.proxyInfo + ". Retrying "
- + formatSleepMessage(action.delayMillis), e);
+ + formatSleepMessage(delay), ex);
}
}
-
- if (action.delayMillis > 0) {
- Thread.sleep(action.delayMillis);
+
+ if (delay > 0) {
+ Thread.sleep(delay);
}
- if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) {
+ if (failOverAction != null) {
// Make sure that concurrent failed method invocations only cause a
// single actual fail over.
synchronized (proxyProvider) {
@@ -169,7 +173,68 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
}
}
}
-
+
+ /**
+ * Obtain a retry delay from list of RetryActions.
+ */
+ private long getDelayMillis(List<RetryAction> actions) {
+ long retVal = 0;
+ for (RetryAction action : actions) {
+ if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY ||
+ action.action == RetryAction.RetryDecision.RETRY) {
+ if (action.delayMillis > retVal) {
+ retVal = action.delayMillis;
+ }
+ }
+ }
+ return retVal;
+ }
+
+ /**
+ * Return the first FAILOVER_AND_RETRY action.
+ */
+ private RetryAction getFailOverAction(List<RetryAction> actions) {
+ for (RetryAction action : actions) {
+ if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) {
+ return action;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Return the last FAIL action.. only if there are no RETRY actions.
+ */
+ private RetryAction getFailAction(List<RetryAction> actions) {
+ RetryAction fAction = null;
+ for (RetryAction action : actions) {
+ if (action.action == RetryAction.RetryDecision.FAIL) {
+ fAction = action;
+ } else {
+ // Atleast 1 RETRY
+ return null;
+ }
+ }
+ return fAction;
+ }
+
+ private List<RetryAction> extractActions(RetryPolicy policy, Exception ex,
+ int i, int invocationFailoverCount,
+ boolean isIdempotentOrAtMostOnce)
+ throws Exception {
+ List<RetryAction> actions = new LinkedList<>();
+ if (ex instanceof MultiException) {
+ for (Exception th : ((MultiException) ex).getExceptions().values()) {
+ actions.add(policy.shouldRetry(th, i, invocationFailoverCount,
+ isIdempotentOrAtMostOnce));
+ }
+ } else {
+ actions.add(policy.shouldRetry(ex, i,
+ invocationFailoverCount, isIdempotentOrAtMostOnce));
+ }
+ return actions;
+ }
+
private static String formatSleepMessage(long millis) {
if (millis > 0) {
return "after sleeping for " + millis + "ms.";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cc2a833..9b2de81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -753,6 +753,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8735. Inotify: All events classes should implement toString() API.
(Surendra Singh Lilhore via aajisaka)
+ HDFS-7858. Improve HA Namenode Failover detection on the client. (asuresh)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index 235c886..ccce736 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -38,6 +39,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
@@ -51,16 +53,40 @@ public class ConfiguredFailoverProxyProvider<T> extends
private static final Log LOG =
LogFactory.getLog(ConfiguredFailoverProxyProvider.class);
- private final Configuration conf;
- private final List<AddressRpcProxyPair<T>> proxies =
+ interface ProxyFactory<T> {
+ T createProxy(Configuration conf, InetSocketAddress nnAddr, Class<T> xface,
+ UserGroupInformation ugi, boolean withRetries,
+ AtomicBoolean fallbackToSimpleAuth) throws IOException;
+ }
+
+ static class DefaultProxyFactory<T> implements ProxyFactory<T> {
+ @Override
+ public T createProxy(Configuration conf, InetSocketAddress nnAddr,
+ Class<T> xface, UserGroupInformation ugi, boolean withRetries,
+ AtomicBoolean fallbackToSimpleAuth) throws IOException {
+ return NameNodeProxies.createNonHAProxy(conf,
+ nnAddr, xface, ugi, false, fallbackToSimpleAuth).getProxy();
+ }
+ }
+
+ protected final Configuration conf;
+ protected final List<AddressRpcProxyPair<T>> proxies =
new ArrayList<AddressRpcProxyPair<T>>();
private final UserGroupInformation ugi;
- private final Class<T> xface;
-
+ protected final Class<T> xface;
+
private int currentProxyIndex = 0;
+ private final ProxyFactory<T> factory;
public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
Class<T> xface) {
+ this(conf, uri, xface, new DefaultProxyFactory<T>());
+ }
+
+ @VisibleForTesting
+ ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
+ Class<T> xface, ProxyFactory<T> factory) {
+
Preconditions.checkArgument(
xface.isAssignableFrom(NamenodeProtocols.class),
"Interface class %s is not a valid NameNode protocol!");
@@ -78,9 +104,10 @@ public class ConfiguredFailoverProxyProvider<T> extends
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
this.conf.setInt(
- CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
- maxRetriesOnSocketTimeouts);
-
+ CommonConfigurationKeysPublic
+ .IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
+ maxRetriesOnSocketTimeouts);
+
try {
ugi = UserGroupInformation.getCurrentUser();
@@ -102,6 +129,7 @@ public class ConfiguredFailoverProxyProvider<T> extends
// URI of the cluster. Clone this token to apply to each of the
// underlying IPC addresses so that the IPC code can find it.
HAUtil.cloneDelegationTokenForLogicalUri(ugi, uri, addressesOfNns);
+ this.factory = factory;
} catch (IOException e) {
throw new RuntimeException(e);
}
@@ -120,8 +148,8 @@ public class ConfiguredFailoverProxyProvider<T> extends
AddressRpcProxyPair<T> current = proxies.get(currentProxyIndex);
if (current.namenode == null) {
try {
- current.namenode = NameNodeProxies.createNonHAProxy(conf,
- current.address, xface, ugi, false, fallbackToSimpleAuth).getProxy();
+ current.namenode = factory.createProxy(conf,
+ current.address, xface, ugi, false, fallbackToSimpleAuth);
} catch (IOException e) {
LOG.error("Failed to create RPC proxy to NameNode", e);
throw new RuntimeException(e);
@@ -131,7 +159,11 @@ public class ConfiguredFailoverProxyProvider<T> extends
}
@Override
- public synchronized void performFailover(T currentProxy) {
+ public void performFailover(T currentProxy) {
+ incrementProxyIndex();
+ }
+
+ synchronized void incrementProxyIndex() {
currentProxyIndex = (currentProxyIndex + 1) % proxies.size();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
new file mode 100644
index 0000000..b7216b0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.ha;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.net.URI;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.io.retry.MultiException;
+
+/**
+ * A FailoverProxyProvider implementation that technically does not "failover"
+ * per-se. It constructs a wrapper proxy that sends the request to ALL
+ * underlying proxies simultaneously. It assumes the in an HA setup, there will
+ * be only one Active, and the active should respond faster than any configured
+ * standbys. Once it recieve a response from any one of the configred proxies,
+ * outstanding requests to other proxies are immediately cancelled.
+ */
+public class RequestHedgingProxyProvider<T> extends
+ ConfiguredFailoverProxyProvider<T> {
+
+ private static final Log LOG =
+ LogFactory.getLog(RequestHedgingProxyProvider.class);
+
+ class RequestHedgingInvocationHandler implements InvocationHandler {
+
+ final Map<String, ProxyInfo<T>> targetProxies;
+
+ public RequestHedgingInvocationHandler(
+ Map<String, ProxyInfo<T>> targetProxies) {
+ this.targetProxies = new HashMap<>(targetProxies);
+ }
+
+ /**
+ * Creates a Executor and invokes all proxies concurrently. This
+ * implementation assumes that Clients have configured proper socket
+ * timeouts, else the call can block forever.
+ *
+ * @param proxy
+ * @param method
+ * @param args
+ * @return
+ * @throws Throwable
+ */
+ @Override
+ public Object
+ invoke(Object proxy, final Method method, final Object[] args)
+ throws Throwable {
+ Map<Future<Object>, ProxyInfo<T>> proxyMap = new HashMap<>();
+ int numAttempts = 0;
+
+ ExecutorService executor = null;
+ CompletionService<Object> completionService;
+ try {
+ // Optimization : if only 2 proxies are configured and one had failed
+ // over, then we dont need to create a threadpool etc.
+ targetProxies.remove(toIgnore);
+ if (targetProxies.size() == 1) {
+ ProxyInfo<T> proxyInfo = targetProxies.values().iterator().next();
+ Object retVal = method.invoke(proxyInfo.proxy, args);
+ successfulProxy = proxyInfo;
+ return retVal;
+ }
+ executor = Executors.newFixedThreadPool(proxies.size());
+ completionService = new ExecutorCompletionService<>(executor);
+ for (final Map.Entry<String, ProxyInfo<T>> pEntry :
+ targetProxies.entrySet()) {
+ Callable<Object> c = new Callable<Object>() {
+ @Override
+ public Object call() throws Exception {
+ return method.invoke(pEntry.getValue().proxy, args);
+ }
+ };
+ proxyMap.put(completionService.submit(c), pEntry.getValue());
+ numAttempts++;
+ }
+
+ Map<String, Exception> badResults = new HashMap<>();
+ while (numAttempts > 0) {
+ Future<Object> callResultFuture = completionService.take();
+ Object retVal;
+ try {
+ retVal = callResultFuture.get();
+ successfulProxy = proxyMap.get(callResultFuture);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Invocation successful on ["
+ + successfulProxy.proxyInfo + "]");
+ }
+ return retVal;
+ } catch (Exception ex) {
+ ProxyInfo<T> tProxyInfo = proxyMap.get(callResultFuture);
+ LOG.warn("Invocation returned exception on "
+ + "[" + tProxyInfo.proxyInfo + "]");
+ badResults.put(tProxyInfo.proxyInfo, ex);
+ numAttempts--;
+ }
+ }
+
+ // At this point we should have All bad results (Exceptions)
+ // Or should have returned with successful result.
+ if (badResults.size() == 1) {
+ throw badResults.values().iterator().next();
+ } else {
+ throw new MultiException(badResults);
+ }
+ } finally {
+ if (executor != null) {
+ executor.shutdownNow();
+ }
+ }
+ }
+ }
+
+
+ private volatile ProxyInfo<T> successfulProxy = null;
+ private volatile String toIgnore = null;
+
+ public RequestHedgingProxyProvider(
+ Configuration conf, URI uri, Class<T> xface) {
+ this(conf, uri, xface, new DefaultProxyFactory<T>());
+ }
+
+ @VisibleForTesting
+ RequestHedgingProxyProvider(Configuration conf, URI uri,
+ Class<T> xface, ProxyFactory<T> factory) {
+ super(conf, uri, xface, factory);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public synchronized ProxyInfo<T> getProxy() {
+ if (successfulProxy != null) {
+ return successfulProxy;
+ }
+ Map<String, ProxyInfo<T>> targetProxyInfos = new HashMap<>();
+ StringBuilder combinedInfo = new StringBuilder('[');
+ for (int i = 0; i < proxies.size(); i++) {
+ ProxyInfo<T> pInfo = super.getProxy();
+ incrementProxyIndex();
+ targetProxyInfos.put(pInfo.proxyInfo, pInfo);
+ combinedInfo.append(pInfo.proxyInfo).append(',');
+ }
+ combinedInfo.append(']');
+ T wrappedProxy = (T) Proxy.newProxyInstance(
+ RequestHedgingInvocationHandler.class.getClassLoader(),
+ new Class<?>[]{xface},
+ new RequestHedgingInvocationHandler(targetProxyInfos));
+ return new ProxyInfo<T>(wrappedProxy, combinedInfo.toString());
+ }
+
+ @Override
+ public synchronized void performFailover(T currentProxy) {
+ toIgnore = successfulProxy.proxyInfo;
+ successfulProxy = null;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
index cc53a38..51a88c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
@@ -195,9 +195,12 @@ The order in which you set these configurations is unimportant, but the values y
Configure the name of the Java class which will be used by the DFS Client to
determine which NameNode is the current Active, and therefore which NameNode is
- currently serving client requests. The only implementation which currently
- ships with Hadoop is the **ConfiguredFailoverProxyProvider**, so use this
- unless you are using a custom one. For example:
+ currently serving client requests. The two implementations which currently
+ ship with Hadoop are the **ConfiguredFailoverProxyProvider** and the
+ **RequestHedgingProxyProvider** (which, for the first call, concurrently invokes all
+ namenodes to determine the active one, and on subsequent requests, invokes the active
+ namenode until a fail-over happens), so use one of these unless you are using a custom
+ proxy provider.
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
index d9d9a67..8b42386 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
@@ -216,9 +216,13 @@ The order in which you set these configurations is unimportant, but the values y
Configure the name of the Java class which will be used by the DFS Client to
determine which NameNode is the current Active, and therefore which NameNode is
- currently serving client requests. The only implementation which currently
- ships with Hadoop is the **ConfiguredFailoverProxyProvider**, so use this
- unless you are using a custom one. For example:
+ currently serving client requests. The two implementations which currently
+ ship with Hadoop are the **ConfiguredFailoverProxyProvider** and the
+ **RequestHedgingProxyProvider** (which, for the first call, concurrently invokes all
+ namenodes to determine the active one, and on subsequent requests, invokes the active
+ namenode until a fail-over happens), so use one of these unless you are using a custom
+ proxy provider.
+ For example:
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
new file mode 100644
index 0000000..32f807a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -0,0 +1,350 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.ha;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Iterator;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider.ProxyFactory;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.io.retry.MultiException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import com.google.common.collect.Lists;
+
+public class TestRequestHedgingProxyProvider {
+
+ private Configuration conf;
+ private URI nnUri;
+ private String ns;
+
+ @Before
+ public void setup() throws URISyntaxException {
+ ns = "mycluster-" + Time.monotonicNow();
+ nnUri = new URI("hdfs://" + ns);
+ conf = new Configuration();
+ conf.set(DFSConfigKeys.DFS_NAMESERVICES, ns);
+ conf.set(
+ DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2");
+ conf.set(
+ DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn1",
+ "machine1.foo.bar:8020");
+ conf.set(
+ DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn2",
+ "machine2.foo.bar:8020");
+ }
+
+ @Test
+ public void testHedgingWhenOneFails() throws Exception {
+ final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
+ Mockito.when(goodMock.getStats()).thenReturn(new long[] {1});
+ final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
+ Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
+
+ RequestHedgingProxyProvider<NamenodeProtocols> provider =
+ new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
+ createFactory(goodMock, badMock));
+ long[] stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+ Mockito.verify(badMock).getStats();
+ Mockito.verify(goodMock).getStats();
+ }
+
+ @Test
+ public void testHedgingWhenOneIsSlow() throws Exception {
+ final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
+ Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
+ @Override
+ public long[] answer(InvocationOnMock invocation) throws Throwable {
+ Thread.sleep(1000);
+ return new long[]{1};
+ }
+ });
+ final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
+ Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
+
+ RequestHedgingProxyProvider<NamenodeProtocols> provider =
+ new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
+ createFactory(goodMock, badMock));
+ long[] stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+ Assert.assertEquals(1, stats[0]);
+ Mockito.verify(badMock).getStats();
+ Mockito.verify(goodMock).getStats();
+ }
+
+ @Test
+ public void testHedgingWhenBothFail() throws Exception {
+ NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
+ Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
+ NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
+ Mockito.when(worseMock.getStats()).thenThrow(
+ new IOException("Worse mock !!"));
+
+ RequestHedgingProxyProvider<NamenodeProtocols> provider =
+ new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
+ createFactory(badMock, worseMock));
+ try {
+ provider.getProxy().proxy.getStats();
+ Assert.fail("Should fail since both namenodes throw IOException !!");
+ } catch (Exception e) {
+ Assert.assertTrue(e instanceof MultiException);
+ }
+ Mockito.verify(badMock).getStats();
+ Mockito.verify(worseMock).getStats();
+ }
+
+ @Test
+ public void testPerformFailover() throws Exception {
+ final AtomicInteger counter = new AtomicInteger(0);
+ final int[] isGood = {1};
+ final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
+ Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
+ @Override
+ public long[] answer(InvocationOnMock invocation) throws Throwable {
+ counter.incrementAndGet();
+ if (isGood[0] == 1) {
+ Thread.sleep(1000);
+ return new long[]{1};
+ }
+ throw new IOException("Was Good mock !!");
+ }
+ });
+ final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
+ Mockito.when(badMock.getStats()).thenAnswer(new Answer<long[]>() {
+ @Override
+ public long[] answer(InvocationOnMock invocation) throws Throwable {
+ counter.incrementAndGet();
+ if (isGood[0] == 2) {
+ Thread.sleep(1000);
+ return new long[]{2};
+ }
+ throw new IOException("Bad mock !!");
+ }
+ });
+ RequestHedgingProxyProvider<NamenodeProtocols> provider =
+ new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
+ createFactory(goodMock, badMock));
+ long[] stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+ Assert.assertEquals(1, stats[0]);
+ Assert.assertEquals(2, counter.get());
+ Mockito.verify(badMock).getStats();
+ Mockito.verify(goodMock).getStats();
+
+ stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+ Assert.assertEquals(1, stats[0]);
+ // Ensure only the previous successful one is invoked
+ Mockito.verifyNoMoreInteractions(badMock);
+ Assert.assertEquals(3, counter.get());
+
+ // Flip to standby.. so now this should fail
+ isGood[0] = 2;
+ try {
+ provider.getProxy().proxy.getStats();
+ Assert.fail("Should fail since previously successful proxy now fails ");
+ } catch (Exception ex) {
+ Assert.assertTrue(ex instanceof IOException);
+ }
+
+ Assert.assertEquals(4, counter.get());
+
+ provider.performFailover(provider.getProxy().proxy);
+ stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+ Assert.assertEquals(2, stats[0]);
+
+ // Counter shuodl update only once
+ Assert.assertEquals(5, counter.get());
+
+ stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+ Assert.assertEquals(2, stats[0]);
+
+ // Counter updates only once now
+ Assert.assertEquals(6, counter.get());
+
+ // Flip back to old active.. so now this should fail
+ isGood[0] = 1;
+ try {
+ provider.getProxy().proxy.getStats();
+ Assert.fail("Should fail since previously successful proxy now fails ");
+ } catch (Exception ex) {
+ Assert.assertTrue(ex instanceof IOException);
+ }
+
+ Assert.assertEquals(7, counter.get());
+
+ provider.performFailover(provider.getProxy().proxy);
+ stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+ // Ensure correct proxy was called
+ Assert.assertEquals(1, stats[0]);
+ }
+
+ @Test
+ public void testPerformFailoverWith3Proxies() throws Exception {
+ conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
+ "nn1,nn2,nn3");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3",
+ "machine3.foo.bar:8020");
+
+ final AtomicInteger counter = new AtomicInteger(0);
+ final int[] isGood = {1};
+ final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
+ Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
+ @Override
+ public long[] answer(InvocationOnMock invocation) throws Throwable {
+ counter.incrementAndGet();
+ if (isGood[0] == 1) {
+ Thread.sleep(1000);
+ return new long[]{1};
+ }
+ throw new IOException("Was Good mock !!");
+ }
+ });
+ final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
+ Mockito.when(badMock.getStats()).thenAnswer(new Answer<long[]>() {
+ @Override
+ public long[] answer(InvocationOnMock invocation) throws Throwable {
+ counter.incrementAndGet();
+ if (isGood[0] == 2) {
+ Thread.sleep(1000);
+ return new long[]{2};
+ }
+ throw new IOException("Bad mock !!");
+ }
+ });
+ final NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
+ Mockito.when(worseMock.getStats()).thenAnswer(new Answer<long[]>() {
+ @Override
+ public long[] answer(InvocationOnMock invocation) throws Throwable {
+ counter.incrementAndGet();
+ if (isGood[0] == 3) {
+ Thread.sleep(1000);
+ return new long[]{3};
+ }
+ throw new IOException("Worse mock !!");
+ }
+ });
+
+ RequestHedgingProxyProvider<NamenodeProtocols> provider =
+ new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
+ createFactory(goodMock, badMock, worseMock));
+ long[] stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+ Assert.assertEquals(1, stats[0]);
+ Assert.assertEquals(3, counter.get());
+ Mockito.verify(badMock).getStats();
+ Mockito.verify(goodMock).getStats();
+ Mockito.verify(worseMock).getStats();
+
+ stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+ Assert.assertEquals(1, stats[0]);
+ // Ensure only the previous successful one is invoked
+ Mockito.verifyNoMoreInteractions(badMock);
+ Mockito.verifyNoMoreInteractions(worseMock);
+ Assert.assertEquals(4, counter.get());
+
+ // Flip to standby.. so now this should fail
+ isGood[0] = 2;
+ try {
+ provider.getProxy().proxy.getStats();
+ Assert.fail("Should fail since previously successful proxy now fails ");
+ } catch (Exception ex) {
+ Assert.assertTrue(ex instanceof IOException);
+ }
+
+ Assert.assertEquals(5, counter.get());
+
+ provider.performFailover(provider.getProxy().proxy);
+ stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+ Assert.assertEquals(2, stats[0]);
+
+ // Counter updates twice since both proxies are tried on failure
+ Assert.assertEquals(7, counter.get());
+
+ stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+ Assert.assertEquals(2, stats[0]);
+
+ // Counter updates only once now
+ Assert.assertEquals(8, counter.get());
+
+ // Flip to Other standby.. so now this should fail
+ isGood[0] = 3;
+ try {
+ provider.getProxy().proxy.getStats();
+ Assert.fail("Should fail since previously successful proxy now fails ");
+ } catch (Exception ex) {
+ Assert.assertTrue(ex instanceof IOException);
+ }
+
+ // Counter should ipdate only 1 time
+ Assert.assertEquals(9, counter.get());
+
+ provider.performFailover(provider.getProxy().proxy);
+ stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+
+ // Ensure correct proxy was called
+ Assert.assertEquals(3, stats[0]);
+
+ // Counter updates twice since both proxies are tried on failure
+ Assert.assertEquals(11, counter.get());
+
+ stats = provider.getProxy().proxy.getStats();
+ Assert.assertTrue(stats.length == 1);
+ Assert.assertEquals(3, stats[0]);
+
+ // Counter updates only once now
+ Assert.assertEquals(12, counter.get());
+ }
+
+ private ProxyFactory<NamenodeProtocols> createFactory(
+ NamenodeProtocols... protos) {
+ final Iterator<NamenodeProtocols> iterator =
+ Lists.newArrayList(protos).iterator();
+ return new ProxyFactory<NamenodeProtocols>() {
+ @Override
+ public NamenodeProtocols createProxy(Configuration conf,
+ InetSocketAddress nnAddr, Class<NamenodeProtocols> xface,
+ UserGroupInformation ugi, boolean withRetries,
+ AtomicBoolean fallbackToSimpleAuth) throws IOException {
+ return iterator.next();
+ }
+ };
+ }
+}
[08/10] hadoop git commit: Merge branch 'trunk' into HADOOP-12111
Posted by aw...@apache.org.
Merge branch 'trunk' into HADOOP-12111
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b6953a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b6953a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b6953a9
Branch: refs/heads/HADOOP-12111
Commit: 0b6953a9405a1fff7360da8a22174987c2ad9936
Parents: 4d4f288 f170934
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Jul 28 10:42:31 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Jul 28 10:42:31 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
.../apache/hadoop/io/retry/MultiException.java | 49 +++
.../hadoop/io/retry/RetryInvocationHandler.java | 99 ++++-
.../src/site/markdown/FileSystemShell.md | 2 +-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
.../ha/ConfiguredFailoverProxyProvider.java | 52 ++-
.../ha/RequestHedgingProxyProvider.java | 186 +++++++++
.../markdown/HDFSHighAvailabilityWithNFS.md | 9 +-
.../markdown/HDFSHighAvailabilityWithQJM.md | 10 +-
.../ha/TestRequestHedgingProxyProvider.java | 350 ++++++++++++++++
hadoop-yarn-project/CHANGES.txt | 13 +
.../server/nodemanager/ContainerExecutor.java | 23 +-
.../nodemanager/DefaultContainerExecutor.java | 2 +-
.../nodemanager/DockerContainerExecutor.java | 2 +-
.../nodemanager/LinuxContainerExecutor.java | 222 +++++-----
.../launcher/ContainerLaunch.java | 15 +
.../linux/privileged/PrivilegedOperation.java | 46 +-
.../PrivilegedOperationException.java | 30 +-
.../privileged/PrivilegedOperationExecutor.java | 30 +-
.../linux/resources/CGroupsHandler.java | 8 +
.../linux/resources/CGroupsHandlerImpl.java | 12 +-
.../runtime/DefaultLinuxContainerRuntime.java | 148 +++++++
.../DelegatingLinuxContainerRuntime.java | 110 +++++
.../runtime/DockerLinuxContainerRuntime.java | 273 ++++++++++++
.../linux/runtime/LinuxContainerRuntime.java | 38 ++
.../runtime/LinuxContainerRuntimeConstants.java | 69 +++
.../linux/runtime/docker/DockerClient.java | 82 ++++
.../linux/runtime/docker/DockerCommand.java | 66 +++
.../linux/runtime/docker/DockerLoadCommand.java | 30 ++
.../linux/runtime/docker/DockerRunCommand.java | 107 +++++
.../runtime/ContainerExecutionException.java | 85 ++++
.../runtime/ContainerRuntime.java | 50 +++
.../runtime/ContainerRuntimeConstants.java | 33 ++
.../runtime/ContainerRuntimeContext.java | 105 +++++
.../executor/ContainerLivenessContext.java | 13 +
.../executor/ContainerReacquisitionContext.java | 13 +
.../executor/ContainerSignalContext.java | 13 +
.../executor/ContainerStartContext.java | 23 +-
.../container-executor/impl/configuration.c | 21 +-
.../container-executor/impl/configuration.h | 2 +
.../impl/container-executor.c | 417 ++++++++++++++++---
.../impl/container-executor.h | 25 +-
.../main/native/container-executor/impl/main.c | 97 ++++-
.../test/test-container-executor.c | 22 +-
.../TestLinuxContainerExecutorWithMocks.java | 118 ++++--
.../TestPrivilegedOperationExecutor.java | 8 +-
.../runtime/TestDockerContainerRuntime.java | 219 ++++++++++
.../webapp/CapacitySchedulerPage.java | 5 +-
48 files changed, 3050 insertions(+), 307 deletions(-)
----------------------------------------------------------------------
[03/10] hadoop git commit: YARN-3853. Add docker container runtime
support to LinuxContainterExecutor. Contributed by Sidharta Seethana.
Posted by aw...@apache.org.
YARN-3853. Add docker container runtime support to LinuxContainterExecutor. Contributed by Sidharta Seethana.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e6fce91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e6fce91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e6fce91
Branch: refs/heads/HADOOP-12111
Commit: 3e6fce91a471b4a5099de109582e7c6417e8a822
Parents: f36835f
Author: Varun Vasudev <vv...@apache.org>
Authored: Mon Jul 27 11:57:40 2015 -0700
Committer: Varun Vasudev <vv...@apache.org>
Committed: Mon Jul 27 11:57:40 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 4 +
.../server/nodemanager/ContainerExecutor.java | 23 +-
.../nodemanager/DefaultContainerExecutor.java | 2 +-
.../nodemanager/DockerContainerExecutor.java | 2 +-
.../nodemanager/LinuxContainerExecutor.java | 222 +++++++--------
.../launcher/ContainerLaunch.java | 15 +
.../linux/privileged/PrivilegedOperation.java | 46 +++-
.../PrivilegedOperationException.java | 30 +-
.../privileged/PrivilegedOperationExecutor.java | 30 +-
.../linux/resources/CGroupsHandler.java | 8 +
.../linux/resources/CGroupsHandlerImpl.java | 12 +-
.../runtime/DefaultLinuxContainerRuntime.java | 148 ++++++++++
.../DelegatingLinuxContainerRuntime.java | 110 ++++++++
.../runtime/DockerLinuxContainerRuntime.java | 273 +++++++++++++++++++
.../linux/runtime/LinuxContainerRuntime.java | 38 +++
.../runtime/LinuxContainerRuntimeConstants.java | 69 +++++
.../linux/runtime/docker/DockerClient.java | 82 ++++++
.../linux/runtime/docker/DockerCommand.java | 66 +++++
.../linux/runtime/docker/DockerLoadCommand.java | 30 ++
.../linux/runtime/docker/DockerRunCommand.java | 107 ++++++++
.../runtime/ContainerExecutionException.java | 85 ++++++
.../runtime/ContainerRuntime.java | 50 ++++
.../runtime/ContainerRuntimeConstants.java | 33 +++
.../runtime/ContainerRuntimeContext.java | 105 +++++++
.../executor/ContainerLivenessContext.java | 13 +
.../executor/ContainerReacquisitionContext.java | 13 +
.../executor/ContainerSignalContext.java | 13 +
.../executor/ContainerStartContext.java | 23 +-
.../TestLinuxContainerExecutorWithMocks.java | 118 +++++---
.../TestPrivilegedOperationExecutor.java | 8 +-
.../runtime/TestDockerContainerRuntime.java | 219 +++++++++++++++
31 files changed, 1815 insertions(+), 182 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4e54aea..534c55a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -153,6 +153,10 @@ Release 2.8.0 - UNRELEASED
YARN-3852. Add docker container support to container-executor
(Abin Shahab via vvasudev)
+ YARN-3853. Add docker container runtime support to LinuxContainterExecutor.
+ (Sidharta Seethana via vvasudev)
+
+
IMPROVEMENTS
YARN-644. Basic null check is not performed on passed in arguments before
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 79f9b0d..68bfbbf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -24,8 +24,10 @@ import java.io.OutputStream;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -39,6 +41,7 @@ import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -160,7 +163,7 @@ public abstract class ContainerExecutor implements Configurable {
* @return true if container is still alive
* @throws IOException
*/
- public abstract boolean isContainerProcessAlive(ContainerLivenessContext ctx)
+ public abstract boolean isContainerAlive(ContainerLivenessContext ctx)
throws IOException;
/**
@@ -174,6 +177,7 @@ public abstract class ContainerExecutor implements Configurable {
*/
public int reacquireContainer(ContainerReacquisitionContext ctx)
throws IOException, InterruptedException {
+ Container container = ctx.getContainer();
String user = ctx.getUser();
ContainerId containerId = ctx.getContainerId();
@@ -193,10 +197,11 @@ public abstract class ContainerExecutor implements Configurable {
LOG.info("Reacquiring " + containerId + " with pid " + pid);
ContainerLivenessContext livenessContext = new ContainerLivenessContext
.Builder()
+ .setContainer(container)
.setUser(user)
.setPid(pid)
.build();
- while(isContainerProcessAlive(livenessContext)) {
+ while(isContainerAlive(livenessContext)) {
Thread.sleep(1000);
}
@@ -243,9 +248,20 @@ public abstract class ContainerExecutor implements Configurable {
Map<Path, List<String>> resources, List<String> command) throws IOException{
ContainerLaunch.ShellScriptBuilder sb =
ContainerLaunch.ShellScriptBuilder.create();
+ Set<String> whitelist = new HashSet<String>();
+ whitelist.add(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME);
+ whitelist.add(ApplicationConstants.Environment.HADOOP_YARN_HOME.name());
+ whitelist.add(ApplicationConstants.Environment.HADOOP_COMMON_HOME.name());
+ whitelist.add(ApplicationConstants.Environment.HADOOP_HDFS_HOME.name());
+ whitelist.add(ApplicationConstants.Environment.HADOOP_CONF_DIR.name());
+ whitelist.add(ApplicationConstants.Environment.JAVA_HOME.name());
if (environment != null) {
for (Map.Entry<String,String> env : environment.entrySet()) {
- sb.env(env.getKey().toString(), env.getValue().toString());
+ if (!whitelist.contains(env.getKey())) {
+ sb.env(env.getKey().toString(), env.getValue().toString());
+ } else {
+ sb.whitelistedEnv(env.getKey().toString(), env.getValue().toString());
+ }
}
}
if (resources != null) {
@@ -492,6 +508,7 @@ public abstract class ContainerExecutor implements Configurable {
try {
Thread.sleep(delay);
containerExecutor.signalContainer(new ContainerSignalContext.Builder()
+ .setContainer(container)
.setUser(user)
.setPid(pid)
.setSignal(signal)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index b9be2b1..5819f23 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -430,7 +430,7 @@ public class DefaultContainerExecutor extends ContainerExecutor {
}
@Override
- public boolean isContainerProcessAlive(ContainerLivenessContext ctx)
+ public boolean isContainerAlive(ContainerLivenessContext ctx)
throws IOException {
String pid = ctx.getPid();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
index d3b5d0a..9dffff3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
@@ -413,7 +413,7 @@ public class DockerContainerExecutor extends ContainerExecutor {
}
@Override
- public boolean isContainerProcessAlive(ContainerLivenessContext ctx)
+ public boolean isContainerAlive(ContainerLivenessContext ctx)
throws IOException {
String pid = ctx.getPid();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index b936969..0670d95 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -20,15 +20,6 @@ package org.apache.hadoop.yarn.server.nodemanager;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.regex.Pattern;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -46,10 +37,14 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandler;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DelegatingLinuxContainerRuntime;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntime;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
@@ -60,6 +55,22 @@ import org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler
import org.apache.hadoop.yarn.server.nodemanager.util.LCEResourcesHandler;
import org.apache.hadoop.yarn.util.ConverterUtils;
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Pattern;
+
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*;
+
+/** Container execution for Linux. Provides linux-specific localization
+ * mechanisms, resource management via cgroups and can switch between multiple
+ * container runtimes - e.g Standard "Process Tree", Docker etc
+ */
+
public class LinuxContainerExecutor extends ContainerExecutor {
private static final Log LOG = LogFactory
@@ -73,6 +84,15 @@ public class LinuxContainerExecutor extends ContainerExecutor {
private int containerSchedPriorityAdjustment = 0;
private boolean containerLimitUsers;
private ResourceHandler resourceHandlerChain;
+ private LinuxContainerRuntime linuxContainerRuntime;
+
+ public LinuxContainerExecutor() {
+ }
+
+ // created primarily for testing
+ public LinuxContainerExecutor(LinuxContainerRuntime linuxContainerRuntime) {
+ this.linuxContainerRuntime = linuxContainerRuntime;
+ }
@Override
public void setConf(Configuration conf) {
@@ -85,10 +105,10 @@ public class LinuxContainerExecutor extends ContainerExecutor {
resourcesHandler.setConf(conf);
if (conf.get(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY) != null) {
- containerSchedPriorityIsSet = true;
- containerSchedPriorityAdjustment = conf
- .getInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,
- YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY);
+ containerSchedPriorityIsSet = true;
+ containerSchedPriorityAdjustment = conf
+ .getInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,
+ YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY);
}
nonsecureLocalUser = conf.get(
YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY,
@@ -122,46 +142,6 @@ public class LinuxContainerExecutor extends ContainerExecutor {
}
}
- /**
- * List of commands that the setuid script will execute.
- */
- enum Commands {
- INITIALIZE_CONTAINER(0),
- LAUNCH_CONTAINER(1),
- SIGNAL_CONTAINER(2),
- DELETE_AS_USER(3);
-
- private int value;
- Commands(int value) {
- this.value = value;
- }
- int getValue() {
- return value;
- }
- }
-
- /**
- * Result codes returned from the C container-executor.
- * These must match the values in container-executor.h.
- */
- enum ResultCode {
- OK(0),
- INVALID_USER_NAME(2),
- UNABLE_TO_EXECUTE_CONTAINER_SCRIPT(7),
- INVALID_CONTAINER_PID(9),
- INVALID_CONTAINER_EXEC_PERMISSIONS(22),
- INVALID_CONFIG_FILE(24),
- WRITE_CGROUP_FAILED(27);
-
- private final int value;
- ResultCode(int value) {
- this.value = value;
- }
- int getValue() {
- return value;
- }
- }
-
protected String getContainerExecutorExecutablePath(Configuration conf) {
String yarnHomeEnvVar =
System.getenv(ApplicationConstants.Environment.HADOOP_YARN_HOME.key());
@@ -203,9 +183,9 @@ public class LinuxContainerExecutor extends ContainerExecutor {
+ " (error=" + exitCode + ")", e);
}
- try {
- Configuration conf = super.getConf();
+ Configuration conf = super.getConf();
+ try {
resourceHandlerChain = ResourceHandlerModule
.getConfiguredResourceHandlerChain(conf);
if (resourceHandlerChain != null) {
@@ -216,9 +196,20 @@ public class LinuxContainerExecutor extends ContainerExecutor {
throw new IOException("Failed to bootstrap configured resource subsystems!");
}
+ try {
+ if (linuxContainerRuntime == null) {
+ LinuxContainerRuntime runtime = new DelegatingLinuxContainerRuntime();
+
+ runtime.initialize(conf);
+ this.linuxContainerRuntime = runtime;
+ }
+ } catch (ContainerExecutionException e) {
+ throw new IOException("Failed to initialize linux container runtime(s)!");
+ }
+
resourcesHandler.init(this);
}
-
+
@Override
public void startLocalizer(LocalizerStartContext ctx)
throws IOException, InterruptedException {
@@ -238,7 +229,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
command.addAll(Arrays.asList(containerExecutorExe,
runAsUser,
user,
- Integer.toString(Commands.INITIALIZE_CONTAINER.getValue()),
+ Integer.toString(PrivilegedOperation.RunAsUserCommand.INITIALIZE_CONTAINER.getValue()),
appId,
nmPrivateContainerTokensPath.toUri().getPath().toString(),
StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
@@ -294,6 +285,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
Path containerWorkDir = ctx.getContainerWorkDir();
List<String> localDirs = ctx.getLocalDirs();
List<String> logDirs = ctx.getLogDirs();
+ Map<Path, List<String>> localizedResources = ctx.getLocalizedResources();
verifyUsernamePattern(user);
String runAsUser = getRunAsUser(user);
@@ -351,50 +343,48 @@ public class LinuxContainerExecutor extends ContainerExecutor {
throw new IOException("ResourceHandlerChain.preStart() failed!");
}
- ShellCommandExecutor shExec = null;
-
try {
Path pidFilePath = getPidFilePath(containerId);
if (pidFilePath != null) {
- List<String> command = new ArrayList<String>();
- addSchedPriorityCommand(command);
- command.addAll(Arrays.asList(
- containerExecutorExe, runAsUser, user, Integer
- .toString(Commands.LAUNCH_CONTAINER.getValue()), appId,
- containerIdStr, containerWorkDir.toString(),
- nmPrivateContainerScriptPath.toUri().getPath().toString(),
- nmPrivateTokensPath.toUri().getPath().toString(),
- pidFilePath.toString(),
- StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
- localDirs),
- StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
- logDirs),
- resourcesOptions));
+ List<String> prefixCommands= new ArrayList<>();
+ ContainerRuntimeContext.Builder builder = new ContainerRuntimeContext
+ .Builder(container);
+
+ addSchedPriorityCommand(prefixCommands);
+ if (prefixCommands.size() > 0) {
+ builder.setExecutionAttribute(CONTAINER_LAUNCH_PREFIX_COMMANDS,
+ prefixCommands);
+ }
+
+ builder.setExecutionAttribute(LOCALIZED_RESOURCES, localizedResources)
+ .setExecutionAttribute(RUN_AS_USER, runAsUser)
+ .setExecutionAttribute(USER, user)
+ .setExecutionAttribute(APPID, appId)
+ .setExecutionAttribute(CONTAINER_ID_STR, containerIdStr)
+ .setExecutionAttribute(CONTAINER_WORK_DIR, containerWorkDir)
+ .setExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH,
+ nmPrivateContainerScriptPath)
+ .setExecutionAttribute(NM_PRIVATE_TOKENS_PATH, nmPrivateTokensPath)
+ .setExecutionAttribute(PID_FILE_PATH, pidFilePath)
+ .setExecutionAttribute(LOCAL_DIRS, localDirs)
+ .setExecutionAttribute(LOG_DIRS, logDirs)
+ .setExecutionAttribute(RESOURCES_OPTIONS, resourcesOptions);
if (tcCommandFile != null) {
- command.add(tcCommandFile);
+ builder.setExecutionAttribute(TC_COMMAND_FILE, tcCommandFile);
}
- String[] commandArray = command.toArray(new String[command.size()]);
- shExec = new ShellCommandExecutor(commandArray, null, // NM's cwd
- container.getLaunchContext().getEnvironment()); // sanitized env
- if (LOG.isDebugEnabled()) {
- LOG.debug("launchContainer: " + Arrays.toString(commandArray));
- }
- shExec.execute();
- if (LOG.isDebugEnabled()) {
- logOutput(shExec.getOutput());
- }
+ linuxContainerRuntime.launchContainer(builder.build());
} else {
LOG.info("Container was marked as inactive. Returning terminated error");
return ExitCode.TERMINATED.getExitCode();
}
- } catch (ExitCodeException e) {
- int exitCode = shExec.getExitCode();
+ } catch (ContainerExecutionException e) {
+ int exitCode = e.getExitCode();
LOG.warn("Exit code from container " + containerId + " is : " + exitCode);
// 143 (SIGTERM) and 137 (SIGKILL) exit codes means the container was
// terminated/killed forcefully. In all other cases, log the
- // container-executor's output
+ // output
if (exitCode != ExitCode.FORCE_KILLED.getExitCode()
&& exitCode != ExitCode.TERMINATED.getExitCode()) {
LOG.warn("Exception from container-launch with container ID: "
@@ -404,13 +394,13 @@ public class LinuxContainerExecutor extends ContainerExecutor {
builder.append("Exception from container-launch.\n");
builder.append("Container id: " + containerId + "\n");
builder.append("Exit code: " + exitCode + "\n");
- if (!Optional.fromNullable(e.getMessage()).or("").isEmpty()) {
- builder.append("Exception message: " + e.getMessage() + "\n");
+ if (!Optional.fromNullable(e.getErrorOutput()).or("").isEmpty()) {
+ builder.append("Exception message: " + e.getErrorOutput() + "\n");
}
builder.append("Stack trace: "
+ StringUtils.stringifyException(e) + "\n");
- if (!shExec.getOutput().isEmpty()) {
- builder.append("Shell output: " + shExec.getOutput() + "\n");
+ if (!e.getOutput().isEmpty()) {
+ builder.append("Shell output: " + e.getOutput() + "\n");
}
String diagnostics = builder.toString();
logOutput(diagnostics);
@@ -433,10 +423,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
"containerId: " + containerId + ". Exception: " + e);
}
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Output from LinuxContainerExecutor's launchContainer follows:");
- logOutput(shExec.getOutput());
- }
+
return 0;
}
@@ -474,6 +461,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
@Override
public boolean signalContainer(ContainerSignalContext ctx)
throws IOException {
+ Container container = ctx.getContainer();
String user = ctx.getUser();
String pid = ctx.getPid();
Signal signal = ctx.getSignal();
@@ -481,30 +469,27 @@ public class LinuxContainerExecutor extends ContainerExecutor {
verifyUsernamePattern(user);
String runAsUser = getRunAsUser(user);
- String[] command =
- new String[] { containerExecutorExe,
- runAsUser,
- user,
- Integer.toString(Commands.SIGNAL_CONTAINER.getValue()),
- pid,
- Integer.toString(signal.getValue()) };
- ShellCommandExecutor shExec = new ShellCommandExecutor(command);
- if (LOG.isDebugEnabled()) {
- LOG.debug("signalContainer: " + Arrays.toString(command));
- }
+ ContainerRuntimeContext runtimeContext = new ContainerRuntimeContext
+ .Builder(container)
+ .setExecutionAttribute(RUN_AS_USER, runAsUser)
+ .setExecutionAttribute(USER, user)
+ .setExecutionAttribute(PID, pid)
+ .setExecutionAttribute(SIGNAL, signal)
+ .build();
+
try {
- shExec.execute();
- } catch (ExitCodeException e) {
- int ret_code = shExec.getExitCode();
- if (ret_code == ResultCode.INVALID_CONTAINER_PID.getValue()) {
+ linuxContainerRuntime.signalContainer(runtimeContext);
+ } catch (ContainerExecutionException e) {
+ int retCode = e.getExitCode();
+ if (retCode == PrivilegedOperation.ResultCode.INVALID_CONTAINER_PID.getValue()) {
return false;
}
LOG.warn("Error in signalling container " + pid + " with " + signal
- + "; exit = " + ret_code, e);
- logOutput(shExec.getOutput());
+ + "; exit = " + retCode, e);
+ logOutput(e.getOutput());
throw new IOException("Problem signalling container " + pid + " with "
- + signal + "; output: " + shExec.getOutput() + " and exitCode: "
- + ret_code, e);
+ + signal + "; output: " + e.getOutput() + " and exitCode: "
+ + retCode, e);
}
return true;
}
@@ -524,7 +509,8 @@ public class LinuxContainerExecutor extends ContainerExecutor {
Arrays.asList(containerExecutorExe,
runAsUser,
user,
- Integer.toString(Commands.DELETE_AS_USER.getValue()),
+ Integer.toString(PrivilegedOperation.
+ RunAsUserCommand.DELETE_AS_USER.getValue()),
dirString));
List<String> pathsToDelete = new ArrayList<String>();
if (baseDirs == null || baseDirs.size() == 0) {
@@ -558,13 +544,15 @@ public class LinuxContainerExecutor extends ContainerExecutor {
}
@Override
- public boolean isContainerProcessAlive(ContainerLivenessContext ctx)
+ public boolean isContainerAlive(ContainerLivenessContext ctx)
throws IOException {
String user = ctx.getUser();
String pid = ctx.getPid();
+ Container container = ctx.getContainer();
// Send a test signal to the process as the user to see if it's alive
return signalContainer(new ContainerSignalContext.Builder()
+ .setContainer(container)
.setUser(user)
.setPid(pid)
.setSignal(Signal.NULL)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index af168c5..bf00d74 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -303,6 +303,7 @@ public class ContainerLaunch implements Callable<Integer> {
exec.activateContainer(containerID, pidFilePath);
ret = exec.launchContainer(new ContainerStartContext.Builder()
.setContainer(container)
+ .setLocalizedResources(localResources)
.setNmPrivateContainerScriptPath(nmPrivateContainerScriptPath)
.setNmPrivateTokensPath(nmPrivateTokensPath)
.setUser(user)
@@ -427,6 +428,7 @@ public class ContainerLaunch implements Callable<Integer> {
boolean result = exec.signalContainer(
new ContainerSignalContext.Builder()
+ .setContainer(container)
.setUser(user)
.setPid(processId)
.setSignal(signal)
@@ -528,6 +530,8 @@ public class ContainerLaunch implements Callable<Integer> {
public abstract void command(List<String> command) throws IOException;
+ public abstract void whitelistedEnv(String key, String value) throws IOException;
+
public abstract void env(String key, String value) throws IOException;
public final void symlink(Path src, Path dst) throws IOException {
@@ -586,6 +590,11 @@ public class ContainerLaunch implements Callable<Integer> {
}
@Override
+ public void whitelistedEnv(String key, String value) {
+ line("export ", key, "=${", key, ":-", "\"", value, "\"}");
+ }
+
+ @Override
public void env(String key, String value) {
line("export ", key, "=\"", value, "\"");
}
@@ -627,6 +636,12 @@ public class ContainerLaunch implements Callable<Integer> {
}
@Override
+ public void whitelistedEnv(String key, String value) throws IOException {
+ lineWithLenCheck("@set ", key, "=", value);
+ errorCheck();
+ }
+
+ @Override
public void env(String key, String value) throws IOException {
lineWithLenCheck("@set ", key, "=", value);
errorCheck();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
index f220cbd..cbbf7a8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
@@ -45,10 +45,12 @@ public class PrivilegedOperation {
LAUNCH_CONTAINER(""), //no CLI switch supported yet
SIGNAL_CONTAINER(""), //no CLI switch supported yet
DELETE_AS_USER(""), //no CLI switch supported yet
+ LAUNCH_DOCKER_CONTAINER(""), //no CLI switch supported yet
TC_MODIFY_STATE("--tc-modify-state"),
TC_READ_STATE("--tc-read-state"),
TC_READ_STATS("--tc-read-stats"),
- ADD_PID_TO_CGROUP(""); //no CLI switch supported yet.
+ ADD_PID_TO_CGROUP(""), //no CLI switch supported yet.
+ RUN_DOCKER_CMD("--run-docker");
private final String option;
@@ -62,6 +64,7 @@ public class PrivilegedOperation {
}
public static final String CGROUP_ARG_PREFIX = "cgroups=";
+ public static final String CGROUP_ARG_NO_TASKS = "none";
private final OperationType opType;
private final List<String> args;
@@ -117,4 +120,45 @@ public class PrivilegedOperation {
public int hashCode() {
return opType.hashCode() + 97 * args.hashCode();
}
+
+ /**
+ * List of commands that the container-executor will execute.
+ */
+ public enum RunAsUserCommand {
+ INITIALIZE_CONTAINER(0),
+ LAUNCH_CONTAINER(1),
+ SIGNAL_CONTAINER(2),
+ DELETE_AS_USER(3),
+ LAUNCH_DOCKER_CONTAINER(4);
+
+ private int value;
+ RunAsUserCommand(int value) {
+ this.value = value;
+ }
+ public int getValue() {
+ return value;
+ }
+ }
+
+ /**
+ * Result codes returned from the C container-executor.
+ * These must match the values in container-executor.h.
+ */
+ public enum ResultCode {
+ OK(0),
+ INVALID_USER_NAME(2),
+ UNABLE_TO_EXECUTE_CONTAINER_SCRIPT(7),
+ INVALID_CONTAINER_PID(9),
+ INVALID_CONTAINER_EXEC_PERMISSIONS(22),
+ INVALID_CONFIG_FILE(24),
+ WRITE_CGROUP_FAILED(27);
+
+ private final int value;
+ ResultCode(int value) {
+ this.value = value;
+ }
+ public int getValue() {
+ return value;
+ }
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
index 20c234d..3622489 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
@@ -24,6 +24,9 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
public class PrivilegedOperationException extends YarnException {
private static final long serialVersionUID = 1L;
+ private Integer exitCode;
+ private String output;
+ private String errorOutput;
public PrivilegedOperationException() {
super();
@@ -33,11 +36,36 @@ public class PrivilegedOperationException extends YarnException {
super(message);
}
+ public PrivilegedOperationException(String message, Integer exitCode,
+ String output, String errorOutput) {
+ super(message);
+ this.exitCode = exitCode;
+ this.output = output;
+ this.errorOutput = errorOutput;
+ }
+
public PrivilegedOperationException(Throwable cause) {
super(cause);
}
+ public PrivilegedOperationException(Throwable cause, Integer exitCode, String
+ output, String errorOutput) {
+ super(cause);
+ this.exitCode = exitCode;
+ this.output = output;
+ this.errorOutput = errorOutput;
+ }
public PrivilegedOperationException(String message, Throwable cause) {
super(message, cause);
}
-}
+
+ public Integer getExitCode() {
+ return exitCode;
+ }
+
+ public String getOutput() {
+ return output;
+ }
+
+ public String getErrorOutput() { return errorOutput; }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
index 6fe0f5c..1d71938 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
@@ -20,6 +20,7 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -101,7 +102,13 @@ public class PrivilegedOperationExecutor {
}
fullCommand.add(containerExecutorExe);
- fullCommand.add(operation.getOperationType().getOption());
+
+ String cliSwitch = operation.getOperationType().getOption();
+
+ if (!cliSwitch.isEmpty()) {
+ fullCommand.add(cliSwitch);
+ }
+
fullCommand.addAll(operation.getArguments());
String[] fullCommandArray =
@@ -142,6 +149,8 @@ public class PrivilegedOperationExecutor {
try {
exec.execute();
if (LOG.isDebugEnabled()) {
+ LOG.debug("command array:");
+ LOG.debug(Arrays.toString(fullCommandArray));
LOG.debug("Privileged Execution Operation Output:");
LOG.debug(exec.getOutput());
}
@@ -152,7 +161,11 @@ public class PrivilegedOperationExecutor {
.append(System.lineSeparator()).append(exec.getOutput()).toString();
LOG.warn(logLine);
- throw new PrivilegedOperationException(e);
+
+ //stderr from shell executor seems to be stuffed into the exception
+ //'message' - so, we have to extract it and set it as the error out
+ throw new PrivilegedOperationException(e, e.getExitCode(),
+ exec.getOutput(), e.getMessage());
} catch (IOException e) {
LOG.warn("IOException executing command: ", e);
throw new PrivilegedOperationException(e);
@@ -202,7 +215,7 @@ public class PrivilegedOperationExecutor {
StringBuffer finalOpArg = new StringBuffer(PrivilegedOperation
.CGROUP_ARG_PREFIX);
- boolean noneArgsOnly = true;
+ boolean noTasks = true;
for (PrivilegedOperation op : ops) {
if (!op.getOperationType()
@@ -227,23 +240,24 @@ public class PrivilegedOperationExecutor {
throw new PrivilegedOperationException("Invalid argument: " + arg);
}
- if (tasksFile.equals("none")) {
+ if (tasksFile.equals(PrivilegedOperation.CGROUP_ARG_NO_TASKS)) {
//Don't append to finalOpArg
continue;
}
- if (noneArgsOnly == false) {
+ if (noTasks == false) {
//We have already appended at least one tasks file.
finalOpArg.append(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR);
finalOpArg.append(tasksFile);
} else {
finalOpArg.append(tasksFile);
- noneArgsOnly = false;
+ noTasks = false;
}
}
- if (noneArgsOnly) {
- finalOpArg.append("none"); //there were no tasks file to append
+ if (noTasks) {
+ finalOpArg.append(PrivilegedOperation.CGROUP_ARG_NO_TASKS); //there
+ // were no tasks file to append
}
PrivilegedOperation finalOp = new PrivilegedOperation(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
index 70dc818..6020bc1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
@@ -79,6 +79,14 @@ public interface CGroupsHandler {
ResourceHandlerException;
/**
+ * Gets the relative path for the cgroup, independent of a controller, for a
+ * given cgroup id.
+ * @param cGroupId - id of the cgroup
+ * @return path for the cgroup relative to the root of (any) controller.
+ */
+ public String getRelativePathForCGroup(String cGroupId);
+
+ /**
* Gets the full path for the cgroup, given a controller and a cgroup id
* @param controller - controller type for the cgroup
* @param cGroupId - id of the cgroup
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index ff56121..0d71a9d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -147,9 +147,9 @@ class CGroupsHandlerImpl implements CGroupsHandler {
} else {
String error =
new StringBuffer("Mount point Based on mtab file: ")
- .append(mtab)
- .append(". Controller mount point not writable for: ")
- .append(name).toString();
+ .append(mtab)
+ .append(". Controller mount point not writable for: ")
+ .append(name).toString();
LOG.error(error);
throw new ResourceHandlerException(error);
@@ -272,6 +272,12 @@ class CGroupsHandlerImpl implements CGroupsHandler {
}
@Override
+ public String getRelativePathForCGroup(String cGroupId) {
+ return new StringBuffer(cGroupPrefix).append("/")
+ .append(cGroupId).toString();
+ }
+
+ @Override
public String getPathForCGroup(CGroupController controller, String cGroupId) {
return new StringBuffer(getControllerPath(controller))
.append('/').append(cGroupPrefix).append("/")
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
new file mode 100644
index 0000000..633fa66
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
@@ -0,0 +1,148 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
+
+import java.util.List;
+
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
+ private static final Log LOG = LogFactory
+ .getLog(DefaultLinuxContainerRuntime.class);
+ private Configuration conf;
+ private final PrivilegedOperationExecutor privilegedOperationExecutor;
+
+ public DefaultLinuxContainerRuntime(PrivilegedOperationExecutor
+ privilegedOperationExecutor) {
+ this.privilegedOperationExecutor = privilegedOperationExecutor;
+ }
+
+ @Override
+ public void initialize(Configuration conf)
+ throws ContainerExecutionException {
+ this.conf = conf;
+ }
+
+ @Override
+ public void prepareContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException {
+ //nothing to do here at the moment.
+ }
+
+ @Override
+ public void launchContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException {
+ Container container = ctx.getContainer();
+ PrivilegedOperation launchOp = new PrivilegedOperation(
+ PrivilegedOperation.OperationType.LAUNCH_CONTAINER, (String) null);
+
+ //All of these arguments are expected to be available in the runtime context
+ launchOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER),
+ ctx.getExecutionAttribute(USER),
+ Integer.toString(PrivilegedOperation.
+ RunAsUserCommand.LAUNCH_CONTAINER.getValue()),
+ ctx.getExecutionAttribute(APPID),
+ ctx.getExecutionAttribute(CONTAINER_ID_STR),
+ ctx.getExecutionAttribute(CONTAINER_WORK_DIR).toString(),
+ ctx.getExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH).toUri()
+ .getPath(),
+ ctx.getExecutionAttribute(NM_PRIVATE_TOKENS_PATH).toUri().getPath(),
+ ctx.getExecutionAttribute(PID_FILE_PATH).toString(),
+ StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
+ ctx.getExecutionAttribute(LOCAL_DIRS)),
+ StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
+ ctx.getExecutionAttribute(LOG_DIRS)),
+ ctx.getExecutionAttribute(RESOURCES_OPTIONS));
+
+ String tcCommandFile = ctx.getExecutionAttribute(TC_COMMAND_FILE);
+
+ if (tcCommandFile != null) {
+ launchOp.appendArgs(tcCommandFile);
+ }
+
+ //List<String> -> stored as List -> fetched/converted to List<String>
+ //we can't do better here thanks to type-erasure
+ @SuppressWarnings("unchecked")
+ List<String> prefixCommands = (List<String>) ctx.getExecutionAttribute(
+ CONTAINER_LAUNCH_PREFIX_COMMANDS);
+
+ try {
+ privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
+ launchOp, null, container.getLaunchContext().getEnvironment(),
+ false);
+ } catch (PrivilegedOperationException e) {
+ LOG.warn("Launch container failed. Exception: ", e);
+
+ throw new ContainerExecutionException("Launch container failed", e
+ .getExitCode(), e.getOutput(), e.getErrorOutput());
+ }
+ }
+
+ @Override
+ public void signalContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException {
+ Container container = ctx.getContainer();
+ PrivilegedOperation signalOp = new PrivilegedOperation(
+ PrivilegedOperation.OperationType.SIGNAL_CONTAINER, (String) null);
+
+ signalOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER),
+ ctx.getExecutionAttribute(USER),
+ Integer.toString(PrivilegedOperation.RunAsUserCommand
+ .SIGNAL_CONTAINER.getValue()),
+ ctx.getExecutionAttribute(PID),
+ Integer.toString(ctx.getExecutionAttribute(SIGNAL).getValue()));
+
+ try {
+ PrivilegedOperationExecutor executor = PrivilegedOperationExecutor
+ .getInstance(conf);
+
+ executor.executePrivilegedOperation(null,
+ signalOp, null, container.getLaunchContext().getEnvironment(),
+ false);
+ } catch (PrivilegedOperationException e) {
+ LOG.warn("Signal container failed. Exception: ", e);
+
+ throw new ContainerExecutionException("Signal container failed", e
+ .getExitCode(), e.getOutput(), e.getErrorOutput());
+ }
+ }
+
+ @Override
+ public void reapContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException {
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
new file mode 100644
index 0000000..a59415f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
@@ -0,0 +1,110 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
+
+import java.util.Map;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class DelegatingLinuxContainerRuntime implements LinuxContainerRuntime {
+ private static final Log LOG = LogFactory
+ .getLog(DelegatingLinuxContainerRuntime.class);
+ private DefaultLinuxContainerRuntime defaultLinuxContainerRuntime;
+ private DockerLinuxContainerRuntime dockerLinuxContainerRuntime;
+
+ @Override
+ public void initialize(Configuration conf)
+ throws ContainerExecutionException {
+ PrivilegedOperationExecutor privilegedOperationExecutor =
+ PrivilegedOperationExecutor.getInstance(conf);
+
+ defaultLinuxContainerRuntime = new DefaultLinuxContainerRuntime(
+ privilegedOperationExecutor);
+ defaultLinuxContainerRuntime.initialize(conf);
+ dockerLinuxContainerRuntime = new DockerLinuxContainerRuntime(
+ privilegedOperationExecutor);
+ dockerLinuxContainerRuntime.initialize(conf);
+ }
+
+ private LinuxContainerRuntime pickContainerRuntime(Container container) {
+ Map<String, String> env = container.getLaunchContext().getEnvironment();
+ LinuxContainerRuntime runtime;
+
+ if (DockerLinuxContainerRuntime.isDockerContainerRequested(env)){
+ runtime = dockerLinuxContainerRuntime;
+ } else {
+ runtime = defaultLinuxContainerRuntime;
+ }
+
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Using container runtime: " + runtime.getClass()
+ .getSimpleName());
+ }
+
+ return runtime;
+ }
+
+ @Override
+ public void prepareContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException {
+ Container container = ctx.getContainer();
+ LinuxContainerRuntime runtime = pickContainerRuntime(container);
+
+ runtime.prepareContainer(ctx);
+ }
+
+ @Override
+ public void launchContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException {
+ Container container = ctx.getContainer();
+ LinuxContainerRuntime runtime = pickContainerRuntime(container);
+
+ runtime.launchContainer(ctx);
+ }
+
+ @Override
+ public void signalContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException {
+ Container container = ctx.getContainer();
+ LinuxContainerRuntime runtime = pickContainerRuntime(container);
+
+ runtime.signalContainer(ctx);
+ }
+
+ @Override
+ public void reapContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException {
+ Container container = ctx.getContainer();
+ LinuxContainerRuntime runtime = pickContainerRuntime(container);
+
+ runtime.reapContainer(ctx);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
new file mode 100644
index 0000000..2430a78
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -0,0 +1,273 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerClient;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerRunCommand;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
+
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
+ private static final Log LOG = LogFactory.getLog(
+ DockerLinuxContainerRuntime.class);
+
+ @InterfaceAudience.Private
+ public static final String ENV_DOCKER_CONTAINER_IMAGE =
+ "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE";
+ @InterfaceAudience.Private
+ public static final String ENV_DOCKER_CONTAINER_IMAGE_FILE =
+ "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE_FILE";
+ @InterfaceAudience.Private
+ public static final String ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE =
+ "YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE";
+
+
+ private Configuration conf;
+ private DockerClient dockerClient;
+ private PrivilegedOperationExecutor privilegedOperationExecutor;
+
+ public static boolean isDockerContainerRequested(
+ Map<String, String> env) {
+ if (env == null) {
+ return false;
+ }
+
+ String type = env.get(ContainerRuntimeConstants.ENV_CONTAINER_TYPE);
+
+ return type != null && type.equals("docker");
+ }
+
+ public DockerLinuxContainerRuntime(PrivilegedOperationExecutor
+ privilegedOperationExecutor) {
+ this.privilegedOperationExecutor = privilegedOperationExecutor;
+ }
+
+ @Override
+ public void initialize(Configuration conf)
+ throws ContainerExecutionException {
+ this.conf = conf;
+ dockerClient = new DockerClient(conf);
+ }
+
+ @Override
+ public void prepareContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException {
+
+ }
+
+ public void addCGroupParentIfRequired(String resourcesOptions,
+ String containerIdStr, DockerRunCommand runCommand)
+ throws ContainerExecutionException {
+ if (resourcesOptions.equals(
+ (PrivilegedOperation.CGROUP_ARG_PREFIX + PrivilegedOperation
+ .CGROUP_ARG_NO_TASKS))) {
+ if (LOG.isInfoEnabled()) {
+ LOG.info("no resource restrictions specified. not using docker's "
+ + "cgroup options");
+ }
+ } else {
+ if (LOG.isInfoEnabled()) {
+ LOG.info("using docker's cgroups options");
+ }
+
+ try {
+ CGroupsHandler cGroupsHandler = ResourceHandlerModule
+ .getCGroupsHandler(conf);
+ String cGroupPath = "/" + cGroupsHandler.getRelativePathForCGroup(
+ containerIdStr);
+
+ if (LOG.isInfoEnabled()) {
+ LOG.info("using cgroup parent: " + cGroupPath);
+ }
+
+ runCommand.setCGroupParent(cGroupPath);
+ } catch (ResourceHandlerException e) {
+ LOG.warn("unable to use cgroups handler. Exception: ", e);
+ throw new ContainerExecutionException(e);
+ }
+ }
+ }
+
+
+ @Override
+ public void launchContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException {
+ Container container = ctx.getContainer();
+ Map<String, String> environment = container.getLaunchContext()
+ .getEnvironment();
+ String imageName = environment.get(ENV_DOCKER_CONTAINER_IMAGE);
+
+ if (imageName == null) {
+ throw new ContainerExecutionException(ENV_DOCKER_CONTAINER_IMAGE
+ + " not set!");
+ }
+
+ String containerIdStr = container.getContainerId().toString();
+ String runAsUser = ctx.getExecutionAttribute(RUN_AS_USER);
+ Path containerWorkDir = ctx.getExecutionAttribute(CONTAINER_WORK_DIR);
+ //List<String> -> stored as List -> fetched/converted to List<String>
+ //we can't do better here thanks to type-erasure
+ @SuppressWarnings("unchecked")
+ List<String> localDirs = ctx.getExecutionAttribute(LOCAL_DIRS);
+ @SuppressWarnings("unchecked")
+ List<String> logDirs = ctx.getExecutionAttribute(LOG_DIRS);
+ @SuppressWarnings("unchecked")
+ DockerRunCommand runCommand = new DockerRunCommand(containerIdStr,
+ runAsUser, imageName)
+ .detachOnRun()
+ .setContainerWorkDir(containerWorkDir.toString())
+ .setNetworkType("host")
+ .addMountLocation("/etc/passwd", "/etc/password:ro");
+ List<String> allDirs = new ArrayList<>(localDirs);
+
+ allDirs.add(containerWorkDir.toString());
+ allDirs.addAll(logDirs);
+ for (String dir: allDirs) {
+ runCommand.addMountLocation(dir, dir);
+ }
+
+ String resourcesOpts = ctx.getExecutionAttribute(RESOURCES_OPTIONS);
+
+ /** Disabling docker's cgroup parent support for the time being. Docker
+ * needs to use a more recent libcontainer that supports net_cls. In
+ * addition we also need to revisit current cgroup creation in YARN.
+ */
+ //addCGroupParentIfRequired(resourcesOpts, containerIdStr, runCommand);
+
+ Path nmPrivateContainerScriptPath = ctx.getExecutionAttribute(
+ NM_PRIVATE_CONTAINER_SCRIPT_PATH);
+
+ String disableOverride = environment.get(
+ ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE);
+
+ if (disableOverride != null && disableOverride.equals("true")) {
+ if (LOG.isInfoEnabled()) {
+ LOG.info("command override disabled");
+ }
+ } else {
+ List<String> overrideCommands = new ArrayList<>();
+ Path launchDst =
+ new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT);
+
+ overrideCommands.add("bash");
+ overrideCommands.add(launchDst.toUri().getPath());
+ runCommand.setOverrideCommandWithArgs(overrideCommands);
+ }
+
+ String commandFile = dockerClient.writeCommandToTempFile(runCommand,
+ containerIdStr);
+ PrivilegedOperation launchOp = new PrivilegedOperation(
+ PrivilegedOperation.OperationType.LAUNCH_DOCKER_CONTAINER, (String)
+ null);
+
+ launchOp.appendArgs(runAsUser, ctx.getExecutionAttribute(USER),
+ Integer.toString(PrivilegedOperation
+ .RunAsUserCommand.LAUNCH_DOCKER_CONTAINER.getValue()),
+ ctx.getExecutionAttribute(APPID),
+ containerIdStr, containerWorkDir.toString(),
+ nmPrivateContainerScriptPath.toUri().getPath(),
+ ctx.getExecutionAttribute(NM_PRIVATE_TOKENS_PATH).toUri().getPath(),
+ ctx.getExecutionAttribute(PID_FILE_PATH).toString(),
+ StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
+ localDirs),
+ StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
+ logDirs),
+ commandFile,
+ resourcesOpts);
+
+ String tcCommandFile = ctx.getExecutionAttribute(TC_COMMAND_FILE);
+
+ if (tcCommandFile != null) {
+ launchOp.appendArgs(tcCommandFile);
+ }
+
+ try {
+ privilegedOperationExecutor.executePrivilegedOperation(null,
+ launchOp, null, container.getLaunchContext().getEnvironment(),
+ false);
+ } catch (PrivilegedOperationException e) {
+ LOG.warn("Launch container failed. Exception: ", e);
+
+ throw new ContainerExecutionException("Launch container failed", e
+ .getExitCode(), e.getOutput(), e.getErrorOutput());
+ }
+ }
+
+ @Override
+ public void signalContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException {
+ Container container = ctx.getContainer();
+ PrivilegedOperation signalOp = new PrivilegedOperation(
+ PrivilegedOperation.OperationType.SIGNAL_CONTAINER, (String) null);
+
+ signalOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER),
+ ctx.getExecutionAttribute(USER),
+ Integer.toString(PrivilegedOperation
+ .RunAsUserCommand.SIGNAL_CONTAINER.getValue()),
+ ctx.getExecutionAttribute(PID),
+ Integer.toString(ctx.getExecutionAttribute(SIGNAL).getValue()));
+
+ try {
+ PrivilegedOperationExecutor executor = PrivilegedOperationExecutor
+ .getInstance(conf);
+
+ executor.executePrivilegedOperation(null,
+ signalOp, null, container.getLaunchContext().getEnvironment(),
+ false);
+ } catch (PrivilegedOperationException e) {
+ LOG.warn("Signal container failed. Exception: ", e);
+
+ throw new ContainerExecutionException("Signal container failed", e
+ .getExitCode(), e.getOutput(), e.getErrorOutput());
+ }
+ }
+
+ @Override
+ public void reapContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException {
+
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java
new file mode 100644
index 0000000..38aea9d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java
@@ -0,0 +1,38 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntime;
+
+/** Linux-specific container runtime implementations must implement this
+ * interface.
+ */
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface LinuxContainerRuntime extends ContainerRuntime {
+ void initialize(Configuration conf) throws ContainerExecutionException;
+}
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
new file mode 100644
index 0000000..d2069a9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
@@ -0,0 +1,69 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext.Attribute;
+
+import java.util.List;
+import java.util.Map;
+
+public final class LinuxContainerRuntimeConstants {
+ private LinuxContainerRuntimeConstants() {
+ }
+
+ public static final Attribute<Map> LOCALIZED_RESOURCES = Attribute
+ .attribute(Map.class, "localized_resources");
+ public static final Attribute<List> CONTAINER_LAUNCH_PREFIX_COMMANDS =
+ Attribute.attribute(List.class, "container_launch_prefix_commands");
+ public static final Attribute<String> RUN_AS_USER =
+ Attribute.attribute(String.class, "run_as_user");
+ public static final Attribute<String> USER = Attribute.attribute(String.class,
+ "user");
+ public static final Attribute<String> APPID =
+ Attribute.attribute(String.class, "appid");
+ public static final Attribute<String> CONTAINER_ID_STR = Attribute
+ .attribute(String.class, "container_id_str");
+ public static final Attribute<Path> CONTAINER_WORK_DIR = Attribute
+ .attribute(Path.class, "container_work_dir");
+ public static final Attribute<Path> NM_PRIVATE_CONTAINER_SCRIPT_PATH =
+ Attribute.attribute(Path.class, "nm_private_container_script_path");
+ public static final Attribute<Path> NM_PRIVATE_TOKENS_PATH = Attribute
+ .attribute(Path.class, "nm_private_tokens_path");
+ public static final Attribute<Path> PID_FILE_PATH = Attribute.attribute(
+ Path.class, "pid_file_path");
+ public static final Attribute<List> LOCAL_DIRS = Attribute.attribute(
+ List.class, "local_dirs");
+ public static final Attribute<List> LOG_DIRS = Attribute.attribute(
+ List.class, "log_dirs");
+ public static final Attribute<String> RESOURCES_OPTIONS = Attribute.attribute(
+ String.class, "resources_options");
+ public static final Attribute<String> TC_COMMAND_FILE = Attribute.attribute(
+ String.class, "tc_command_file");
+ public static final Attribute<String> CGROUP_RELATIVE_PATH = Attribute
+ .attribute(String.class, "cgroup_relative_path");
+
+ public static final Attribute<String> PID = Attribute.attribute(
+ String.class, "pid");
+ public static final Attribute<ContainerExecutor.Signal> SIGNAL = Attribute
+ .attribute(ContainerExecutor.Signal.class, "signal");
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
new file mode 100644
index 0000000..faf955f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
@@ -0,0 +1,82 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.io.Writer;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class DockerClient {
+ private static final Log LOG = LogFactory.getLog(DockerClient.class);
+ private static final String TMP_FILE_PREFIX = "docker.";
+ private static final String TMP_FILE_SUFFIX = ".cmd";
+ private final String tmpDirPath;
+
+ public DockerClient(Configuration conf) throws ContainerExecutionException {
+
+ String tmpDirBase = conf.get("hadoop.tmp.dir");
+ if (tmpDirBase == null) {
+ throw new ContainerExecutionException("hadoop.tmp.dir not set!");
+ }
+ tmpDirPath = tmpDirBase + "/nm-docker-cmds";
+
+ File tmpDir = new File(tmpDirPath);
+ if (!(tmpDir.exists() || tmpDir.mkdirs())) {
+ LOG.warn("Unable to create directory: " + tmpDirPath);
+ throw new ContainerExecutionException("Unable to create directory: " +
+ tmpDirPath);
+ }
+ }
+
+ public String writeCommandToTempFile(DockerCommand cmd, String filePrefix)
+ throws ContainerExecutionException {
+ File dockerCommandFile = null;
+ try {
+ dockerCommandFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix,
+ TMP_FILE_SUFFIX, new
+ File(tmpDirPath));
+
+ Writer writer = new OutputStreamWriter(new FileOutputStream(dockerCommandFile),
+ "UTF-8");
+ PrintWriter printWriter = new PrintWriter(writer);
+ printWriter.print(cmd.getCommandWithArguments());
+ printWriter.close();
+
+ return dockerCommandFile.getAbsolutePath();
+ } catch (IOException e) {
+ LOG.warn("Unable to write docker command to temporary file!");
+ throw new ContainerExecutionException(e);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
new file mode 100644
index 0000000..3b76a5c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
@@ -0,0 +1,66 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.StringUtils;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+
+/** Represents a docker sub-command
+ * e.g 'run', 'load', 'inspect' etc.,
+ */
+
+public abstract class DockerCommand {
+ private final String command;
+ private final List<String> commandWithArguments;
+
+ protected DockerCommand(String command) {
+ this.command = command;
+ this.commandWithArguments = new ArrayList<>();
+ commandWithArguments.add(command);
+ }
+
+ /** Returns the docker sub-command string being used
+ * e.g 'run'
+ */
+ public final String getCommandOption() {
+ return this.command;
+ }
+
+ /** Add command commandWithArguments - this method is only meant for use by
+ * sub-classes
+ * @param arguments to be added
+ */
+ protected final void addCommandArguments(String... arguments) {
+ this.commandWithArguments.addAll(Arrays.asList(arguments));
+ }
+
+ public String getCommandWithArguments() {
+ return StringUtils.join(" ", commandWithArguments);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerLoadCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerLoadCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerLoadCommand.java
new file mode 100644
index 0000000..e4d92e0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerLoadCommand.java
@@ -0,0 +1,30 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+public class DockerLoadCommand extends DockerCommand {
+ private static final String LOAD_COMMAND = "load";
+
+ public DockerLoadCommand(String localImageFile) {
+ super(LOAD_COMMAND);
+ super.addCommandArguments("--i=" + localImageFile);
+ }
+}
[10/10] hadoop git commit: HADOOP-12254. test-patch.sh should run
findbugs if only findbugs-exclude.xml has changed (Kengo Seki via aw)
Posted by aw...@apache.org.
HADOOP-12254. test-patch.sh should run findbugs if only findbugs-exclude.xml has changed (Kengo Seki via aw)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03335bb4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03335bb4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03335bb4
Branch: refs/heads/HADOOP-12111
Commit: 03335bb4d5a047569519ee6775e6edd0c939cf03
Parents: 7c92f0f
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Jul 28 10:44:37 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Jul 28 10:44:37 2015 -0700
----------------------------------------------------------------------
dev-support/test-patch.d/findbugs.sh | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/03335bb4/dev-support/test-patch.d/findbugs.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.d/findbugs.sh b/dev-support/test-patch.d/findbugs.sh
index 1d7118b..4fa5428 100755
--- a/dev-support/test-patch.d/findbugs.sh
+++ b/dev-support/test-patch.d/findbugs.sh
@@ -20,13 +20,14 @@ FINDBUGS_WARNINGS_FAIL_PRECHECK=false
add_plugin findbugs
-function findbugs_file_filter
+function findbugs_filefilter
{
local filename=$1
if [[ ${BUILDTOOL} == maven
|| ${BUILDTOOL} == ant ]]; then
- if [[ ${filename} =~ \.java$ ]]; then
+ if [[ ${filename} =~ \.java$
+ || ${filename} =~ (^|/)findbugs-exclude.xml$ ]]; then
add_test findbugs
fi
fi
@@ -139,7 +140,7 @@ function findbugs_runner
savestop=$(stop_clock)
MODULE_STATUS_TIMER[${i}]=${savestop}
module_status ${i} -1 "" "${name}/${module} cannot run setBugDatabaseInfo from findbugs"
- ((retval = retval + 1))
+ ((result=result+1))
((i=i+1))
continue
fi
@@ -151,7 +152,7 @@ function findbugs_runner
savestop=$(stop_clock)
MODULE_STATUS_TIMER[${i}]=${savestop}
module_status ${i} -1 "" "${name}/${module} cannot run convertXmlToText from findbugs"
- ((result = result + 1))
+ ((result=result+1))
fi
if [[ -z ${FINDBUGS_VERSION}
@@ -182,7 +183,7 @@ function findbugs_preapply
local i=0
local warnings_file
local module_findbugs_warnings
- local results=0
+ local result=0
big_console_header "Pre-patch findbugs detection"
@@ -199,7 +200,7 @@ function findbugs_preapply
fi
findbugs_runner branch
- results=$?
+ result=$?
if [[ "${FINDBUGS_WARNINGS_FAIL_PRECHECK}" == "true" ]]; then
until [[ $i -eq ${#MODULE[@]} ]]; do
@@ -222,7 +223,7 @@ function findbugs_preapply
if [[ ${module_findbugs_warnings} -gt 0 ]] ; then
module_status ${i} -1 "branch-findbugs-${fn}.html" "${module} in ${PATCH_BRANCH} cannot run convertXmlToText from findbugs"
- ((results=results+1))
+ ((result=result+1))
fi
savestop=$(stop_clock)
MODULE_STATUS_TIMER[${i}]=${savestop}
@@ -231,7 +232,7 @@ function findbugs_preapply
modules_messages branch findbugs true
fi
- if [[ ${results} != 0 ]]; then
+ if [[ ${result} != 0 ]]; then
return 1
fi
return 0
@@ -256,7 +257,7 @@ function findbugs_postinstall
local firstpart
local secondpart
local i=0
- local results=0
+ local result=0
local savestop
big_console_header "Patch findbugs detection"
[09/10] hadoop git commit: HADOOP-12273. releasedocmaker.py fails
with stacktrace if --project option is not specified (Kengo Seki via aw)
Posted by aw...@apache.org.
HADOOP-12273. releasedocmaker.py fails with stacktrace if --project option is not specified (Kengo Seki via aw)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c92f0fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c92f0fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c92f0fe
Branch: refs/heads/HADOOP-12111
Commit: 7c92f0fe9176c55f7c16bc20211c854cdbfe0141
Parents: 0b6953a
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Jul 28 10:43:22 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Jul 28 10:43:22 2015 -0700
----------------------------------------------------------------------
dev-support/releasedocmaker.py | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c92f0fe/dev-support/releasedocmaker.py
----------------------------------------------------------------------
diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py
index d2e5dda..c59ae99 100755
--- a/dev-support/releasedocmaker.py
+++ b/dev-support/releasedocmaker.py
@@ -400,12 +400,6 @@ def main():
(options, args) = parser.parse_args()
if (options.versions is None):
- options.versions = []
-
- if (len(args) > 2):
- options.versions.append(args[2])
-
- if (len(options.versions) <= 0):
parser.error("At least one version needs to be supplied")
proxy = urllib2.ProxyHandler()
@@ -413,6 +407,8 @@ def main():
urllib2.install_opener(opener)
projects = options.projects
+ if projects is None:
+ parser.error("At least one project needs to be supplied")
if (options.range is True):
versions = [ Version(v) for v in GetVersions(options.versions, projects).getlist() ]
[05/10] hadoop git commit: HADOOP-12245. References to misspelled
REMAINING_QUATA in FileSystemShell.md. Contributed by Gabor Liptak.
Posted by aw...@apache.org.
HADOOP-12245. References to misspelled REMAINING_QUATA in FileSystemShell.md. Contributed by Gabor Liptak.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e21dde50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e21dde50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e21dde50
Branch: refs/heads/HADOOP-12111
Commit: e21dde501aa9323b7f34b4bc4ba9d282ec4f2707
Parents: 3572ebd
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jul 28 11:33:10 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jul 28 11:33:10 2015 +0900
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../hadoop-common/src/site/markdown/FileSystemShell.md | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e21dde50/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index baf39e3..aeaa5b9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1017,6 +1017,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12239. StorageException complaining " no lease ID" when updating
FolderLastModifiedTime in WASB. (Duo Xu via cnauroth)
+ HADOOP-12245. References to misspelled REMAINING_QUATA in
+ FileSystemShell.md. (Gabor Liptak via aajisaka)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e21dde50/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 144cb73..fb89ca1 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -174,7 +174,7 @@ Usage: `hadoop fs -count [-q] [-h] [-v] <paths> `
Count the number of directories, files and bytes under the paths that match the specified file pattern. The output columns with -count are: DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
-The output columns with -count -q are: QUOTA, REMAINING\_QUATA, SPACE\_QUOTA, REMAINING\_SPACE\_QUOTA, DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
+The output columns with -count -q are: QUOTA, REMAINING\_QUOTA, SPACE\_QUOTA, REMAINING\_SPACE\_QUOTA, DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
The -h option shows sizes in human readable format.
[02/10] hadoop git commit: YARN-3853. Add docker container runtime
support to LinuxContainterExecutor. Contributed by Sidharta Seethana.
Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
new file mode 100644
index 0000000..f9a890e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
@@ -0,0 +1,107 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+import org.apache.hadoop.util.StringUtils;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class DockerRunCommand extends DockerCommand {
+ private static final String RUN_COMMAND = "run";
+ private final String image;
+ private List<String> overrrideCommandWithArgs;
+
+ /** The following are mandatory: */
+ public DockerRunCommand(String containerId, String user, String image) {
+ super(RUN_COMMAND);
+ super.addCommandArguments("--name=" + containerId, "--user=" + user);
+ this.image = image;
+ }
+
+ public DockerRunCommand removeContainerOnExit() {
+ super.addCommandArguments("--rm");
+ return this;
+ }
+
+ public DockerRunCommand detachOnRun() {
+ super.addCommandArguments("-d");
+ return this;
+ }
+
+ public DockerRunCommand setContainerWorkDir(String workdir) {
+ super.addCommandArguments("--workdir=" + workdir);
+ return this;
+ }
+
+ public DockerRunCommand setNetworkType(String type) {
+ super.addCommandArguments("--net=" + type);
+ return this;
+ }
+
+ public DockerRunCommand addMountLocation(String sourcePath, String
+ destinationPath) {
+ super.addCommandArguments("-v", sourcePath + ":" + destinationPath);
+ return this;
+ }
+
+ public DockerRunCommand setCGroupParent(String parentPath) {
+ super.addCommandArguments("--cgroup-parent=" + parentPath);
+ return this;
+ }
+
+ public DockerRunCommand addDevice(String sourceDevice, String
+ destinationDevice) {
+ super.addCommandArguments("--device=" + sourceDevice + ":" +
+ destinationDevice);
+ return this;
+ }
+
+ public DockerRunCommand enableDetach() {
+ super.addCommandArguments("--detach=true");
+ return this;
+ }
+
+ public DockerRunCommand disableDetach() {
+ super.addCommandArguments("--detach=false");
+ return this;
+ }
+
+ public DockerRunCommand setOverrideCommandWithArgs(
+ List<String> overrideCommandWithArgs) {
+ this.overrrideCommandWithArgs = overrideCommandWithArgs;
+ return this;
+ }
+
+ @Override
+ public String getCommandWithArguments() {
+ List<String> argList = new ArrayList<>();
+
+ argList.add(super.getCommandWithArguments());
+ argList.add(image);
+
+ if (overrrideCommandWithArgs != null) {
+ argList.addAll(overrrideCommandWithArgs);
+ }
+
+ return StringUtils.join(" ", argList);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
new file mode 100644
index 0000000..1fbece2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
@@ -0,0 +1,85 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+/** Exception caused in a container runtime impl. 'Runtime' is not used in
+ * the class name to avoid confusion with a java RuntimeException
+ */
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class ContainerExecutionException extends YarnException {
+ private static final long serialVersionUID = 1L;
+ private static final Integer EXIT_CODE_UNSET = -1;
+ private static final String OUTPUT_UNSET = "<unknown>";
+
+ private Integer exitCode;
+ private String output;
+ private String errorOutput;
+
+ public ContainerExecutionException(String message) {
+ super(message);
+ exitCode = EXIT_CODE_UNSET;
+ output = OUTPUT_UNSET;
+ errorOutput = OUTPUT_UNSET;
+ }
+
+ public ContainerExecutionException(Throwable throwable) {
+ super(throwable);
+ exitCode = EXIT_CODE_UNSET;
+ output = OUTPUT_UNSET;
+ errorOutput = OUTPUT_UNSET;
+ }
+
+
+ public ContainerExecutionException(String message, Integer exitCode, String
+ output, String errorOutput) {
+ super(message);
+ this.exitCode = exitCode;
+ this.output = output;
+ this.errorOutput = errorOutput;
+ }
+
+ public ContainerExecutionException(Throwable cause, Integer exitCode, String
+ output, String errorOutput) {
+ super(cause);
+ this.exitCode = exitCode;
+ this.output = output;
+ this.errorOutput = errorOutput;
+ }
+
+ public Integer getExitCode() {
+ return exitCode;
+ }
+
+ public String getOutput() {
+ return output;
+ }
+
+ public String getErrorOutput() {
+ return errorOutput;
+ }
+
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java
new file mode 100644
index 0000000..e05f3fc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java
@@ -0,0 +1,50 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/** An abstraction for various container runtime implementations. Examples
+ * include Process Tree, Docker, Appc runtimes etc., These implementations
+ * are meant for low-level OS container support - dependencies on
+ * higher-level nodemananger constructs should be avoided.
+ */
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface ContainerRuntime {
+ /** Prepare a container to be ready for launch */
+ void prepareContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException;
+
+ /** Launch a container. */
+ void launchContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException;
+
+ /** Signal a container - request to terminate, status check etc., */
+ void signalContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException;
+
+ /** Any container cleanup that may be required. */
+ void reapContainer(ContainerRuntimeContext ctx)
+ throws ContainerExecutionException;
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java
new file mode 100644
index 0000000..4473856
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java
@@ -0,0 +1,33 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+
+public class ContainerRuntimeConstants {
+
+ /* Switch container runtimes. Work in progress: These
+ * parameters may be changed/removed in the future. */
+
+ @Private
+ public static final String ENV_CONTAINER_TYPE =
+ "YARN_CONTAINER_RUNTIME_TYPE";
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeContext.java
new file mode 100644
index 0000000..4194b99
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeContext.java
@@ -0,0 +1,105 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class ContainerRuntimeContext {
+ private final Container container;
+ private final Map<Attribute<?>, Object> executionAttributes;
+
+ /** An attribute class that attempts to provide better type safety as compared
+ * with using a map of string to object.
+ * @param <T>
+ */
+ public static final class Attribute<T> {
+ private final Class<T> valueClass;
+ private final String id;
+
+ private Attribute(Class<T> valueClass, String id) {
+ this.valueClass = valueClass;
+ this.id = id;
+ }
+
+ @Override
+ public int hashCode() {
+ return valueClass.hashCode() + 31 * id.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || !(obj instanceof Attribute)){
+ return false;
+ }
+
+ Attribute<?> attribute = (Attribute<?>) obj;
+
+ return valueClass.equals(attribute.valueClass) && id.equals(attribute.id);
+ }
+ public static <T> Attribute<T> attribute(Class<T> valueClass, String id) {
+ return new Attribute<T>(valueClass, id);
+ }
+ }
+
+ public static final class Builder {
+ private final Container container;
+ private Map<Attribute<?>, Object> executionAttributes;
+
+ public Builder(Container container) {
+ executionAttributes = new HashMap<>();
+ this.container = container;
+ }
+
+ public <E> Builder setExecutionAttribute(Attribute<E> attribute, E value) {
+ this.executionAttributes.put(attribute, attribute.valueClass.cast(value));
+ return this;
+ }
+
+ public ContainerRuntimeContext build() {
+ return new ContainerRuntimeContext(this);
+ }
+ }
+
+ private ContainerRuntimeContext(Builder builder) {
+ this.container = builder.container;
+ this.executionAttributes = builder.executionAttributes;
+ }
+
+ public Container getContainer() {
+ return this.container;
+ }
+
+ public Map<Attribute<?>, Object> getExecutionAttributes() {
+ return Collections.unmodifiableMap(this.executionAttributes);
+ }
+
+ public <E> E getExecutionAttribute(Attribute<E> attribute) {
+ return attribute.valueClass.cast(executionAttributes.get(attribute));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerLivenessContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerLivenessContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerLivenessContext.java
index acadae9..43113ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerLivenessContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerLivenessContext.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.yarn.server.nodemanager.executor;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
/**
* Encapsulates information required for container liveness checks.
@@ -30,16 +31,23 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class ContainerLivenessContext {
+ private final Container container;
private final String user;
private final String pid;
public static final class Builder {
+ private Container container;
private String user;
private String pid;
public Builder() {
}
+ public Builder setContainer(Container container) {
+ this.container = container;
+ return this;
+ }
+
public Builder setUser(String user) {
this.user = user;
return this;
@@ -56,10 +64,15 @@ public final class ContainerLivenessContext {
}
private ContainerLivenessContext(Builder builder) {
+ this.container = builder.container;
this.user = builder.user;
this.pid = builder.pid;
}
+ public Container getContainer() {
+ return this.container;
+ }
+
public String getUser() {
return this.user;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerReacquisitionContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerReacquisitionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerReacquisitionContext.java
index 8adcab7..d93cdaf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerReacquisitionContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerReacquisitionContext.java
@@ -23,6 +23,7 @@ package org.apache.hadoop.yarn.server.nodemanager.executor;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
/**
* Encapsulates information required for container reacquisition.
@@ -31,16 +32,23 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class ContainerReacquisitionContext {
+ private final Container container;
private final String user;
private final ContainerId containerId;
public static final class Builder {
+ private Container container;
private String user;
private ContainerId containerId;
public Builder() {
}
+ public Builder setContainer(Container container) {
+ this.container = container;
+ return this;
+ }
+
public Builder setUser(String user) {
this.user = user;
return this;
@@ -57,10 +65,15 @@ public final class ContainerReacquisitionContext {
}
private ContainerReacquisitionContext(Builder builder) {
+ this.container = builder.container;
this.user = builder.user;
this.containerId = builder.containerId;
}
+ public Container getContainer() {
+ return this.container;
+ }
+
public String getUser() {
return this.user;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java
index cc40af5..56b571b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java
@@ -23,6 +23,7 @@ package org.apache.hadoop.yarn.server.nodemanager.executor;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
/**
* Encapsulates information required for container signaling.
@@ -31,11 +32,13 @@ import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class ContainerSignalContext {
+ private final Container container;
private final String user;
private final String pid;
private final Signal signal;
public static final class Builder {
+ private Container container;
private String user;
private String pid;
private Signal signal;
@@ -43,6 +46,11 @@ public final class ContainerSignalContext {
public Builder() {
}
+ public Builder setContainer(Container container) {
+ this.container = container;
+ return this;
+ }
+
public Builder setUser(String user) {
this.user = user;
return this;
@@ -64,11 +72,16 @@ public final class ContainerSignalContext {
}
private ContainerSignalContext(Builder builder) {
+ this.container = builder.container;
this.user = builder.user;
this.pid = builder.pid;
this.signal = builder.signal;
}
+ public Container getContainer() {
+ return this.container;
+ }
+
public String getUser() {
return this.user;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java
index 7dfff02..ffcc519 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java
@@ -25,7 +25,9 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import java.util.Collections;
import java.util.List;
+import java.util.Map;
/**
* Encapsulates information required for starting/launching containers.
@@ -35,6 +37,7 @@ import java.util.List;
@InterfaceStability.Unstable
public final class ContainerStartContext {
private final Container container;
+ private final Map<Path, List<String>> localizedResources;
private final Path nmPrivateContainerScriptPath;
private final Path nmPrivateTokensPath;
private final String user;
@@ -45,6 +48,7 @@ public final class ContainerStartContext {
public static final class Builder {
private Container container;
+ private Map<Path, List<String>> localizedResources;
private Path nmPrivateContainerScriptPath;
private Path nmPrivateTokensPath;
private String user;
@@ -61,6 +65,12 @@ public final class ContainerStartContext {
return this;
}
+ public Builder setLocalizedResources(Map<Path,
+ List<String>> localizedResources) {
+ this.localizedResources = localizedResources;
+ return this;
+ }
+
public Builder setNmPrivateContainerScriptPath(
Path nmPrivateContainerScriptPath) {
this.nmPrivateContainerScriptPath = nmPrivateContainerScriptPath;
@@ -104,6 +114,7 @@ public final class ContainerStartContext {
private ContainerStartContext(Builder builder) {
this.container = builder.container;
+ this.localizedResources = builder.localizedResources;
this.nmPrivateContainerScriptPath = builder.nmPrivateContainerScriptPath;
this.nmPrivateTokensPath = builder.nmPrivateTokensPath;
this.user = builder.user;
@@ -117,6 +128,14 @@ public final class ContainerStartContext {
return this.container;
}
+ public Map<Path, List<String>> getLocalizedResources() {
+ if (this.localizedResources != null) {
+ return Collections.unmodifiableMap(this.localizedResources);
+ } else {
+ return null;
+ }
+ }
+
public Path getNmPrivateContainerScriptPath() {
return this.nmPrivateContainerScriptPath;
}
@@ -138,10 +157,10 @@ public final class ContainerStartContext {
}
public List<String> getLocalDirs() {
- return this.localDirs;
+ return Collections.unmodifiableList(this.localDirs);
}
public List<String> getLogDirs() {
- return this.logDirs;
+ return Collections.unmodifiableList(this.logDirs);
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
index 82b7fd9..0ef788b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
@@ -32,6 +32,8 @@ import java.io.FileReader;
import java.io.IOException;
import java.io.LineNumberReader;
import java.net.InetSocketAddress;
+import java.nio.file.Files;
+import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -50,6 +52,10 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DefaultLinuxContainerRuntime;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntime;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
@@ -61,11 +67,19 @@ import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
+import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
+
public class TestLinuxContainerExecutorWithMocks {
private static final Log LOG = LogFactory
.getLog(TestLinuxContainerExecutorWithMocks.class);
+ private static final String MOCK_EXECUTOR =
+ "./src/test/resources/mock-container-executor";
+ private static final String MOCK_EXECUTOR_WITH_ERROR =
+ "./src/test/resources/mock-container-executer-with-error";
+
+ private String tmpMockExecutor;
private LinuxContainerExecutor mockExec = null;
private final File mockParamFile = new File("./params.txt");
private LocalDirsHandlerService dirsHandler;
@@ -88,20 +102,42 @@ public class TestLinuxContainerExecutorWithMocks {
reader.close();
return ret;
}
-
+
+ private void setupMockExecutor(String executorPath, Configuration conf)
+ throws IOException {
+ //we'll always use the tmpMockExecutor - since
+ // PrivilegedOperationExecutor can only be initialized once.
+
+ Files.copy(Paths.get(executorPath), Paths.get(tmpMockExecutor),
+ REPLACE_EXISTING);
+
+ File executor = new File(tmpMockExecutor);
+
+ if (!FileUtil.canExecute(executor)) {
+ FileUtil.setExecutable(executor, true);
+ }
+ String executorAbsolutePath = executor.getAbsolutePath();
+ conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH,
+ executorAbsolutePath);
+ }
+
@Before
- public void setup() {
+ public void setup() throws IOException, ContainerExecutionException {
assumeTrue(!Path.WINDOWS);
- File f = new File("./src/test/resources/mock-container-executor");
- if(!FileUtil.canExecute(f)) {
- FileUtil.setExecutable(f, true);
- }
- String executorPath = f.getAbsolutePath();
+
+ tmpMockExecutor = System.getProperty("test.build.data") +
+ "/tmp-mock-container-executor";
+
Configuration conf = new Configuration();
- conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath);
- mockExec = new LinuxContainerExecutor();
+ LinuxContainerRuntime linuxContainerRuntime;
+
+ setupMockExecutor(MOCK_EXECUTOR, conf);
+ linuxContainerRuntime = new DefaultLinuxContainerRuntime(
+ PrivilegedOperationExecutor.getInstance(conf));
dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
+ linuxContainerRuntime.initialize(conf);
+ mockExec = new LinuxContainerExecutor(linuxContainerRuntime);
mockExec.setConf(conf);
}
@@ -114,7 +150,7 @@ public class TestLinuxContainerExecutorWithMocks {
public void testContainerLaunch() throws IOException {
String appSubmitter = "nobody";
String cmd = String.valueOf(
- LinuxContainerExecutor.Commands.LAUNCH_CONTAINER.getValue());
+ PrivilegedOperation.RunAsUserCommand.LAUNCH_CONTAINER.getValue());
String appId = "APP_ID";
String containerId = "CONTAINER_ID";
Container container = mock(Container.class);
@@ -161,13 +197,8 @@ public class TestLinuxContainerExecutorWithMocks {
public void testContainerLaunchWithPriority() throws IOException {
// set the scheduler priority to make sure still works with nice -n prio
- File f = new File("./src/test/resources/mock-container-executor");
- if (!FileUtil.canExecute(f)) {
- FileUtil.setExecutable(f, true);
- }
- String executorPath = f.getAbsolutePath();
Configuration conf = new Configuration();
- conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath);
+ setupMockExecutor(MOCK_EXECUTOR, conf);
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, 2);
mockExec.setConf(conf);
@@ -175,8 +206,8 @@ public class TestLinuxContainerExecutorWithMocks {
mockExec.addSchedPriorityCommand(command);
assertEquals("first should be nice", "nice", command.get(0));
assertEquals("second should be -n", "-n", command.get(1));
- assertEquals("third should be the priority", Integer.toString(2),
- command.get(2));
+ assertEquals("third should be the priority", Integer.toString(2),
+ command.get(2));
testContainerLaunch();
}
@@ -185,11 +216,10 @@ public class TestLinuxContainerExecutorWithMocks {
public void testLaunchCommandWithoutPriority() throws IOException {
// make sure the command doesn't contain the nice -n since priority
// not specified
- List<String> command = new ArrayList<String>();
+ List<String> command = new ArrayList<String>();
mockExec.addSchedPriorityCommand(command);
assertEquals("addSchedPriority should be empty", 0, command.size());
}
-
@Test (timeout = 5000)
public void testStartLocalizer() throws IOException {
@@ -232,20 +262,25 @@ public class TestLinuxContainerExecutorWithMocks {
@Test
- public void testContainerLaunchError() throws IOException {
+ public void testContainerLaunchError()
+ throws IOException, ContainerExecutionException {
// reinitialize executer
- File f = new File("./src/test/resources/mock-container-executer-with-error");
- if (!FileUtil.canExecute(f)) {
- FileUtil.setExecutable(f, true);
- }
- String executorPath = f.getAbsolutePath();
Configuration conf = new Configuration();
- conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath);
+ setupMockExecutor(MOCK_EXECUTOR_WITH_ERROR, conf);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, "file:///bin/echo");
conf.set(YarnConfiguration.NM_LOG_DIRS, "file:///dev/null");
- mockExec = spy(new LinuxContainerExecutor());
+
+ LinuxContainerExecutor exec;
+ LinuxContainerRuntime linuxContainerRuntime = new
+ DefaultLinuxContainerRuntime(PrivilegedOperationExecutor.getInstance
+ (conf));
+
+ linuxContainerRuntime.initialize(conf);
+ exec = new LinuxContainerExecutor(linuxContainerRuntime);
+
+ mockExec = spy(exec);
doAnswer(
new Answer() {
@Override
@@ -264,7 +299,7 @@ public class TestLinuxContainerExecutorWithMocks {
String appSubmitter = "nobody";
String cmd = String
- .valueOf(LinuxContainerExecutor.Commands.LAUNCH_CONTAINER.getValue());
+ .valueOf(PrivilegedOperation.RunAsUserCommand.LAUNCH_CONTAINER.getValue());
String appId = "APP_ID";
String containerId = "CONTAINER_ID";
Container container = mock(Container.class);
@@ -300,6 +335,7 @@ public class TestLinuxContainerExecutorWithMocks {
Path pidFile = new Path(workDir, "pid.txt");
mockExec.activateContainer(cId, pidFile);
+
int ret = mockExec.launchContainer(new ContainerStartContext.Builder()
.setContainer(container)
.setNmPrivateContainerScriptPath(scriptPath)
@@ -331,16 +367,23 @@ public class TestLinuxContainerExecutorWithMocks {
}
-
@Test
public void testContainerKill() throws IOException {
String appSubmitter = "nobody";
String cmd = String.valueOf(
- LinuxContainerExecutor.Commands.SIGNAL_CONTAINER.getValue());
+ PrivilegedOperation.RunAsUserCommand.SIGNAL_CONTAINER.getValue());
ContainerExecutor.Signal signal = ContainerExecutor.Signal.QUIT;
String sigVal = String.valueOf(signal.getValue());
-
+
+ Container container = mock(Container.class);
+ ContainerId cId = mock(ContainerId.class);
+ ContainerLaunchContext context = mock(ContainerLaunchContext.class);
+
+ when(container.getContainerId()).thenReturn(cId);
+ when(container.getLaunchContext()).thenReturn(context);
+
mockExec.signalContainer(new ContainerSignalContext.Builder()
+ .setContainer(container)
.setUser(appSubmitter)
.setPid("1000")
.setSignal(signal)
@@ -354,7 +397,7 @@ public class TestLinuxContainerExecutorWithMocks {
public void testDeleteAsUser() throws IOException {
String appSubmitter = "nobody";
String cmd = String.valueOf(
- LinuxContainerExecutor.Commands.DELETE_AS_USER.getValue());
+ PrivilegedOperation.RunAsUserCommand.DELETE_AS_USER.getValue());
Path dir = new Path("/tmp/testdir");
Path testFile = new Path("testfile");
Path baseDir0 = new Path("/grid/0/BaseDir");
@@ -396,14 +439,9 @@ public class TestLinuxContainerExecutorWithMocks {
Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, "", baseDir0.toString(), baseDir1.toString()),
readMockParams());
-
- File f = new File("./src/test/resources/mock-container-executer-with-error");
- if (!FileUtil.canExecute(f)) {
- FileUtil.setExecutable(f, true);
- }
- String executorPath = f.getAbsolutePath();
+ ;
Configuration conf = new Configuration();
- conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath);
+ setupMockExecutor(MOCK_EXECUTOR, conf);
mockExec.setConf(conf);
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
index 8f297ed..849dbab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
@@ -118,7 +118,7 @@ public class TestPrivilegedOperationExecutor {
PrivilegedOperationExecutor exec = PrivilegedOperationExecutor
.getInstance(confWithExecutorPath);
PrivilegedOperation op = new PrivilegedOperation(PrivilegedOperation
- .OperationType.LAUNCH_CONTAINER, (String) null);
+ .OperationType.TC_MODIFY_STATE, (String) null);
String[] cmdArray = exec.getPrivilegedOperationExecutionCommand(null, op);
//No arguments added - so the resulting array should consist of
@@ -127,10 +127,8 @@ public class TestPrivilegedOperationExecutor {
Assert.assertEquals(customExecutorPath, cmdArray[0]);
Assert.assertEquals(op.getOperationType().getOption(), cmdArray[1]);
- //other (dummy) arguments to launch container
- String[] additionalArgs = { "test_user", "yarn", "1", "app_01",
- "container_01", "workdir", "launch_script.sh", "tokens", "pidfile",
- "nm-local-dirs", "nm-log-dirs", "resource-spec" };
+ //other (dummy) arguments to tc modify state
+ String[] additionalArgs = { "cmd_file_1", "cmd_file_2", "cmd_file_3"};
op.appendArgs(additionalArgs);
cmdArray = exec.getPrivilegedOperationExecutionCommand(null, op);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6fce91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
new file mode 100644
index 0000000..31ed496
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -0,0 +1,219 @@
+/*
+ * *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mockito;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.*;
+
+public class TestDockerContainerRuntime {
+ private Configuration conf;
+ PrivilegedOperationExecutor mockExecutor;
+ String containerId;
+ Container container;
+ ContainerId cId;
+ ContainerLaunchContext context;
+ HashMap<String, String> env;
+ String image;
+ String runAsUser;
+ String user;
+ String appId;
+ String containerIdStr = containerId;
+ Path containerWorkDir;
+ Path nmPrivateContainerScriptPath;
+ Path nmPrivateTokensPath;
+ Path pidFilePath;
+ List<String> localDirs;
+ List<String> logDirs;
+ String resourcesOptions;
+
+ @Before
+ public void setup() {
+ String tmpPath = new StringBuffer(System.getProperty("test.build.data"))
+ .append
+ ('/').append("hadoop.tmp.dir").toString();
+
+ conf = new Configuration();
+ conf.set("hadoop.tmp.dir", tmpPath);
+
+ mockExecutor = Mockito
+ .mock(PrivilegedOperationExecutor.class);
+ containerId = "container_id";
+ container = mock(Container.class);
+ cId = mock(ContainerId.class);
+ context = mock(ContainerLaunchContext.class);
+ env = new HashMap<String, String>();
+ image = "busybox:latest";
+
+ env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_IMAGE, image);
+ when(container.getContainerId()).thenReturn(cId);
+ when(cId.toString()).thenReturn(containerId);
+ when(container.getLaunchContext()).thenReturn(context);
+ when(context.getEnvironment()).thenReturn(env);
+
+ runAsUser = "run_as_user";
+ user = "user";
+ appId = "app_id";
+ containerIdStr = containerId;
+ containerWorkDir = new Path("/test_container_work_dir");
+ nmPrivateContainerScriptPath = new Path("/test_script_path");
+ nmPrivateTokensPath = new Path("/test_private_tokens_path");
+ pidFilePath = new Path("/test_pid_file_path");
+ localDirs = new ArrayList<>();
+ logDirs = new ArrayList<>();
+ resourcesOptions = "cgroups:none";
+
+ localDirs.add("/test_local_dir");
+ logDirs.add("/test_log_dir");
+ }
+
+ @Test
+ public void testSelectDockerContainerType() {
+ Map<String, String> envDockerType = new HashMap<>();
+ Map<String, String> envOtherType = new HashMap<>();
+
+ envDockerType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, "docker");
+ envOtherType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, "other");
+
+ Assert.assertEquals(false, DockerLinuxContainerRuntime
+ .isDockerContainerRequested(null));
+ Assert.assertEquals(true, DockerLinuxContainerRuntime
+ .isDockerContainerRequested(envDockerType));
+ Assert.assertEquals(false, DockerLinuxContainerRuntime
+ .isDockerContainerRequested(envOtherType));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testDockerContainerLaunch()
+ throws ContainerExecutionException, PrivilegedOperationException,
+ IOException {
+ DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
+ mockExecutor);
+ runtime.initialize(conf);
+
+ ContainerRuntimeContext.Builder builder = new ContainerRuntimeContext
+ .Builder(container);
+
+ builder.setExecutionAttribute(RUN_AS_USER, runAsUser)
+ .setExecutionAttribute(USER, user)
+ .setExecutionAttribute(APPID, appId)
+ .setExecutionAttribute(CONTAINER_ID_STR, containerIdStr)
+ .setExecutionAttribute(CONTAINER_WORK_DIR, containerWorkDir)
+ .setExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH,
+ nmPrivateContainerScriptPath)
+ .setExecutionAttribute(NM_PRIVATE_TOKENS_PATH, nmPrivateTokensPath)
+ .setExecutionAttribute(PID_FILE_PATH, pidFilePath)
+ .setExecutionAttribute(LOCAL_DIRS, localDirs)
+ .setExecutionAttribute(LOG_DIRS, logDirs)
+ .setExecutionAttribute(RESOURCES_OPTIONS, resourcesOptions);
+
+ runtime.launchContainer(builder.build());
+
+ ArgumentCaptor<PrivilegedOperation> opCaptor = ArgumentCaptor.forClass(
+ PrivilegedOperation.class);
+
+ //single invocation expected
+ //due to type erasure + mocking, this verification requires a suppress
+ // warning annotation on the entire method
+ verify(mockExecutor, times(1))
+ .executePrivilegedOperation(anyList(), opCaptor.capture(), any(
+ File.class), any(Map.class), eq(false));
+
+ PrivilegedOperation op = opCaptor.getValue();
+
+ Assert.assertEquals(PrivilegedOperation.OperationType
+ .LAUNCH_DOCKER_CONTAINER, op.getOperationType());
+
+ List<String> args = op.getArguments();
+
+ //This invocation of container-executor should use 13 arguments in a
+ // specific order (sigh.)
+ Assert.assertEquals(13, args.size());
+
+ //verify arguments
+ Assert.assertEquals(runAsUser, args.get(0));
+ Assert.assertEquals(user, args.get(1));
+ Assert.assertEquals(Integer.toString(PrivilegedOperation.RunAsUserCommand
+ .LAUNCH_DOCKER_CONTAINER.getValue()), args.get(2));
+ Assert.assertEquals(appId, args.get(3));
+ Assert.assertEquals(containerId, args.get(4));
+ Assert.assertEquals(containerWorkDir.toString(), args.get(5));
+ Assert.assertEquals(nmPrivateContainerScriptPath.toUri()
+ .toString(), args.get(6));
+ Assert.assertEquals(nmPrivateTokensPath.toUri().getPath(), args.get(7));
+ Assert.assertEquals(pidFilePath.toString(), args.get(8));
+ Assert.assertEquals(localDirs.get(0), args.get(9));
+ Assert.assertEquals(logDirs.get(0), args.get(10));
+ Assert.assertEquals(resourcesOptions, args.get(12));
+
+ String dockerCommandFile = args.get(11);
+
+ //This is the expected docker invocation for this case
+ StringBuffer expectedCommandTemplate = new StringBuffer("run --name=%1$s ")
+ .append("--user=%2$s -d ")
+ .append("--workdir=%3$s ")
+ .append("--net=host -v /etc/passwd:/etc/password:ro ")
+ .append("-v %4$s:%4$s ")
+ .append("-v %5$s:%5$s ")
+ .append("-v %6$s:%6$s ")
+ .append("%7$s ")
+ .append("bash %8$s/launch_container.sh");
+
+ String expectedCommand = String.format(expectedCommandTemplate.toString(),
+ containerId, runAsUser, containerWorkDir, localDirs.get(0),
+ containerWorkDir, logDirs.get(0), image, containerWorkDir);
+
+ List<String> dockerCommands = Files.readAllLines(Paths.get
+ (dockerCommandFile), Charset.forName("UTF-8"));
+
+ Assert.assertEquals(1, dockerCommands.size());
+ Assert.assertEquals(expectedCommand, dockerCommands.get(0));
+ }
+}
[04/10] hadoop git commit: YARN-3846. RM Web UI queue filter is not
working for sub queue. Contributed by Mohammad Shahid Khan
Posted by aw...@apache.org.
YARN-3846. RM Web UI queue filter is not working for sub queue. Contributed by Mohammad Shahid Khan
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3572ebd7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3572ebd7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3572ebd7
Branch: refs/heads/HADOOP-12111
Commit: 3572ebd738aa5fa8b0906d75fb12cc6cbb991573
Parents: 3e6fce9
Author: Jian He <ji...@apache.org>
Authored: Mon Jul 27 16:57:11 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Mon Jul 27 17:12:05 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../server/resourcemanager/webapp/CapacitySchedulerPage.java | 5 ++++-
2 files changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3572ebd7/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 534c55a..4f8484a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -695,6 +695,9 @@ Release 2.8.0 - UNRELEASED
YARN-3958. TestYarnConfigurationFields should be moved to hadoop-yarn-api
module. (Varun Saxena via aajisaka)
+ YARN-3846. RM Web UI queue filter is not working for sub queue.
+ (Mohammad Shahid Khan via jianhe)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3572ebd7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 12a3013..d8971b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -516,7 +516,10 @@ class CapacitySchedulerPage extends RmView {
" $('#cs').bind('select_node.jstree', function(e, data) {",
" var q = $('.q', data.rslt.obj).first().text();",
" if (q == 'Queue: root') q = '';",
- " else q = '^' + q.substr(q.lastIndexOf(':') + 2) + '$';",
+ " else {",
+ " q = q.substr(q.lastIndexOf(':') + 2);",
+ " q = '^' + q.substr(q.lastIndexOf('.') + 1) + '$';",
+ " }",
" $('#apps').dataTable().fnFilter(q, 4, true);",
" });",
" $('#cs').show();",