You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2017/06/06 10:57:07 UTC

[2/2] ambari git commit: AMBARI-21113. hdfs_user_nofile_limit is not picking as expected for datanode process in a secure cluster (aonishuk)

AMBARI-21113. hdfs_user_nofile_limit is not picking as expected for datanode process in a secure cluster (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9496c017
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9496c017
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9496c017

Branch: refs/heads/branch-2.5
Commit: 9496c0170439633a114332d13e46c7d3a4f4d339
Parents: c51e0b8
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Jun 6 13:57:04 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Jun 6 13:57:04 2017 +0300

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog250.java       | 28 +++++++++++++++-----
 1 file changed, 21 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9496c017/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
index 53abe01..cd7ae45 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
@@ -772,16 +772,16 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
 
       if (clusterMap != null && !clusterMap.isEmpty()) {
         for (final Cluster cluster : clusterMap.values()) {
-          /***
-           * Append "ulimit -l" from hadoop-env.sh
-           */
           String content = null;
+          Boolean contentUpdated = false;
+
           if (cluster.getDesiredConfigByType(HADOOP_ENV) != null) {
             content = cluster.getDesiredConfigByType(HADOOP_ENV).getProperties().get("content");
           }
 
-          if (content != null && !content.contains("ulimit")) {
-            content += "\n" +
+          if (content != null) {
+            if (!content.contains("ulimit -l")) {  // Append "ulimit -l" to hadoop-env.sh
+              content += "\n" +
                 "{% if is_datanode_max_locked_memory_set %}\n" +
                 "# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n" +
                 "# Makes sense to fix only when runing DN as root \n" +
@@ -790,9 +790,22 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
                 "fi\n" +
                 "{% endif %}";
 
-            prop.put("content", content);
-            updateConfigurationPropertiesForCluster(cluster, "hadoop-env",
+              contentUpdated = true;
+            }
+
+            if (!content.contains("ulimit -n")){  // Append "ulimit -n" to hadoop-env.sh
+              content += "\n" +
+                "if [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then \n" +
+                "  ulimit -n {{hdfs_user_nofile_limit}}\n" +
+                "fi";
+              contentUpdated = true;
+            }
+
+            if (contentUpdated){
+              prop.put("content", content);
+              updateConfigurationPropertiesForCluster(cluster, "hadoop-env",
                 prop, true, false);
+            }
           }
         }
       }
@@ -836,6 +849,7 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
     if (clusters != null) {
       Map<String, Cluster> clusterMap = clusters.getClusters();
 
+
       if (clusterMap != null && !clusterMap.isEmpty()) {
         for (final Cluster cluster : clusterMap.values()) {