You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2017/06/12 16:45:10 UTC

[03/50] [abbrv] ambari git commit: AMBARI-21113. hdfs_user_nofile_limit is not picking as expected for datanode process in a secure cluster (aonishuk)

AMBARI-21113. hdfs_user_nofile_limit is not picking as expected for datanode process in a secure cluster (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4dba161a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4dba161a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4dba161a

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 4dba161a6fbeab2ab5507c9ff50f524242b7f450
Parents: b3425c9
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Jun 6 13:57:00 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Jun 6 13:57:00 2017 +0300

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog250.java       | 29 +++++++++++++++-----
 1 file changed, 22 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4dba161a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
index e911a21..1f3a99d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
@@ -801,18 +801,20 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
       Map<String, Cluster> clusterMap = clusters.getClusters();
       Map<String, String> prop = new HashMap<>();
 
+
       if (clusterMap != null && !clusterMap.isEmpty()) {
         for (final Cluster cluster : clusterMap.values()) {
-          /*
-           * Append "ulimit -l" from hadoop-env.sh
-           */
+
           String content = null;
+          Boolean contentUpdated = false;
+
           if (cluster.getDesiredConfigByType(HADOOP_ENV) != null) {
             content = cluster.getDesiredConfigByType(HADOOP_ENV).getProperties().get("content");
           }
 
-          if (content != null && !content.contains("ulimit")) {
-            content += "\n" +
+          if (content != null) {
+            if (!content.contains("ulimit -l")) {  // Append "ulimit -l" to hadoop-env.sh
+              content += "\n" +
                 "{% if is_datanode_max_locked_memory_set %}\n" +
                 "# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n" +
                 "# Makes sense to fix only when runing DN as root \n" +
@@ -821,9 +823,22 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
                 "fi\n" +
                 "{% endif %}";
 
-            prop.put("content", content);
-            updateConfigurationPropertiesForCluster(cluster, "hadoop-env",
+              contentUpdated = true;
+            }
+
+            if (!content.contains("ulimit -n")){  // Append "ulimit -n" to hadoop-env.sh
+              content += "\n" +
+                "if [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then \n" +
+                "  ulimit -n {{hdfs_user_nofile_limit}}\n" +
+                "fi";
+              contentUpdated = true;
+            }
+
+            if (contentUpdated){
+              prop.put("content", content);
+              updateConfigurationPropertiesForCluster(cluster, "hadoop-env",
                 prop, true, false);
+            }
           }
         }
       }