You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2015/10/15 18:59:23 UTC

ambari git commit: AMBARI-13437. HDFS File caching does not work because of ulimit not being passed into the start command for datanode. (aonishuk)

Repository: ambari
Updated Branches:
  refs/heads/trunk 81280ea39 -> 52083d1ff


AMBARI-13437. HDFS File caching does not work because of ulimit not being passed into the start command for datanode. (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/52083d1f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/52083d1f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/52083d1f

Branch: refs/heads/trunk
Commit: 52083d1ff8425948485149cd1c862ba9ddcb58db
Parents: 81280ea
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Oct 15 19:58:01 2015 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Thu Oct 15 19:58:27 2015 +0300

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog213.java       | 25 ++++++++++++++++++++
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |  8 +++++++
 .../2.0.6/hooks/before-ANY/scripts/params.py    |  3 +++
 .../services/HDFS/configuration/hadoop-env.xml  |  8 +++++++
 .../services/HDFS/configuration/hadoop-env.xml  |  8 +++++++
 .../server/upgrade/UpgradeCatalog213Test.java   |  4 ++++
 6 files changed, 56 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/52083d1f/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
index 90a75be..803e5f4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
@@ -58,7 +58,15 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
   private static final String AMS_ENV = "ams-env";
   private static final String AMS_HBASE_ENV = "ams-hbase-env";
   private static final String HBASE_ENV_CONFIG = "hbase-env";
+  private static final String HADOOP_ENV_CONFIG = "hadoop-env";
   private static final String CONTENT_PROPERTY = "content";
+  private static final String HADOOP_ENV_CONTENT_TO_APPEND = "\n{% if is_datanode_max_locked_memory_set %}\n" +
+                                    "# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n" +
+                                    "# Makes sense to fix only when runing DN as root \n" +
+                                    "if [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n" +
+                                    "  ulimit -l {{datanode_max_locked_memory}}\n" +
+                                    "fi\n" +
+                                    "{% endif %};\n";
 
   private static final String KERBEROS_DESCRIPTOR_TABLE = "kerberos_descriptor";
   private static final String KERBEROS_DESCRIPTOR_NAME_COLUMN = "kerberos_descriptor_name";
@@ -141,6 +149,7 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
     updateAMSConfigs();
     updateHDFSConfigs();
     updateHbaseEnvConfig();
+    updateHadoopEnv();
     updateKafkaConfigs();
   }
 
@@ -212,6 +221,22 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
 
     return rootJson.toString();
   }
+  
+  protected void updateHadoopEnv() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      Config hadoopEnvConfig = cluster.getDesiredConfigByType(HADOOP_ENV_CONFIG);
+      if (hadoopEnvConfig != null) {
+        String content = hadoopEnvConfig.getProperties().get(CONTENT_PROPERTY);
+        if (content != null) {
+          content += HADOOP_ENV_CONTENT_TO_APPEND;
+          Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
+          updateConfigurationPropertiesForCluster(cluster, HADOOP_ENV_CONFIG, updates, true, false);
+        }
+      }
+    }
+  }
 
   protected void updateHDFSConfigs() throws AmbariException {
     AmbariManagementController ambariManagementController = injector.getInstance(

http://git-wip-us.apache.org/repos/asf/ambari/blob/52083d1f/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
index a8f7951..5319da9 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
@@ -318,6 +318,14 @@ export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
 
 #Mostly required for hadoop 2.0
 export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+
+{% if is_datanode_max_locked_memory_set %}
+# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. 
+# Makes sense to fix only when runing DN as root 
+if [ "$command" == "datanode" ] &#038;&#038; [ "$EUID" -eq 0 ] &#038;&#038; [ -n "$HADOOP_SECURE_DN_USER" ]; then
+  ulimit -l {{datanode_max_locked_memory}}
+fi
+{% endif %}
     </value>
   </property>
   

http://git-wip-us.apache.org/repos/asf/ambari/blob/52083d1f/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
index fb70dac..2593ee4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
@@ -29,6 +29,7 @@ from resource_management.libraries.functions import format
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.is_empty import is_empty
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.version import compare_versions
 from ambari_commons.os_check import OSCheck
@@ -98,6 +99,8 @@ hadoop_secure_dn_user = hdfs_user
 hadoop_dir = "/etc/hadoop"
 versioned_hdp_root = '/usr/hdp/current'
 hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
+datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
+is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
 
 # HDP 2.2+ params
 if Script.is_hdp_stack_greater_or_equal("2.2"):

http://git-wip-us.apache.org/repos/asf/ambari/blob/52083d1f/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
index d91ca71..060d651 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
@@ -172,6 +172,14 @@ export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
 export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}
 
 export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
+
+{% if is_datanode_max_locked_memory_set %}
+# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. 
+# Makes sense to fix only when runing DN as root 
+if [ "$command" == "datanode" ] &#038;&#038; [ "$EUID" -eq 0 ] &#038;&#038; [ -n "$HADOOP_SECURE_DN_USER" ]; then
+  ulimit -l {{datanode_max_locked_memory}}
+fi
+{% endif %}
     </value>
   </property>
   

http://git-wip-us.apache.org/repos/asf/ambari/blob/52083d1f/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml
index 9222b1d..ec5aa29 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml
@@ -151,6 +151,14 @@ export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
 export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}
 
 export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
+
+{% if is_datanode_max_locked_memory_set %}
+# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. 
+# Makes sense to fix only when runing DN as root 
+if [ "$command" == "datanode" ] &#038;&#038; [ "$EUID" -eq 0 ] &#038;&#038; [ -n "$HADOOP_SECURE_DN_USER" ]; then
+  ulimit -l {{datanode_max_locked_memory}}
+fi
+{% endif %}
     </value>
   </property>
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/52083d1f/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
index f8f47c7..7a60b8c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
@@ -110,6 +110,7 @@ public class UpgradeCatalog213Test {
     Method updateStormConfigs = UpgradeCatalog213.class.getDeclaredMethod("updateStormConfigs");
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
     Method updateHbaseEnvConfig = UpgradeCatalog213.class.getDeclaredMethod("updateHbaseEnvConfig");
+    Method updateHadoopEnvConfig = UpgradeCatalog213.class.getDeclaredMethod("updateHadoopEnv");
     Method updateAlertDefinitions = UpgradeCatalog213.class.getDeclaredMethod("updateAlertDefinitions");
 
     UpgradeCatalog213 upgradeCatalog213 = createMockBuilder(UpgradeCatalog213.class)
@@ -120,6 +121,7 @@ public class UpgradeCatalog213Test {
         .addMockedMethod(updateHbaseEnvConfig)
         .addMockedMethod(updateAlertDefinitions)
         .addMockedMethod(updateKafkaConfigs)
+        .addMockedMethod(updateHadoopEnvConfig)
         .createMock();
 
     upgradeCatalog213.updateHbaseEnvConfig();
@@ -128,6 +130,8 @@ public class UpgradeCatalog213Test {
     expectLastCall().once();
     upgradeCatalog213.updateStormConfigs();
     expectLastCall().once();
+    upgradeCatalog213.updateHadoopEnv();
+    expectLastCall().once();
     upgradeCatalog213.updateAMSConfigs();
     expectLastCall().once();
     upgradeCatalog213.updateAlertDefinitions();