You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2015/06/06 15:41:59 UTC

ambari git commit: AMBARI-11750. Can not start RM and HiveServer2 after upgrade from 1.7.0 to 2.1.0 (it may be not only Upgrade issue.) (aonishuk)

Repository: ambari
Updated Branches:
  refs/heads/trunk c0ff2b7e8 -> f8a329484


AMBARI-11750. Can not start RM and HiveServer2 after upgrade from 1.7.0 to 2.1.0 (it may be not only Upgrade issue.) (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f8a32948
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f8a32948
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f8a32948

Branch: refs/heads/trunk
Commit: f8a32948440192a1071dc6a25fbc80684ca7c2f0
Parents: c0ff2b7
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Sat Jun 6 16:41:20 2015 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Sat Jun 6 16:41:20 2015 +0300

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog210.java       | 29 +++++++++--
 .../server/upgrade/UpgradeCatalog210Test.java   | 55 ++++++++++++++++++++
 2 files changed, 81 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f8a32948/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
index a7a8cee..e873fc5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
@@ -66,6 +66,8 @@ import com.google.gson.JsonParser;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.persist.Transactional;
+import java.net.URI;
+import java.net.URISyntaxException;
 
 
 /**
@@ -1044,9 +1046,6 @@ public class UpgradeCatalog210 extends AbstractUpgradeCatalog {
   }
 
   protected void updateHdfsConfigs() throws AmbariException {
-    /***
-     * Append -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 to HADOOP_NAMENODE_OPTS from hadoop-env.sh
-     */
     AmbariManagementController ambariManagementController = injector.getInstance(
         AmbariManagementController.class);
     Clusters clusters = ambariManagementController.getClusters();
@@ -1058,6 +1057,9 @@ public class UpgradeCatalog210 extends AbstractUpgradeCatalog {
 
       if (clusterMap != null && !clusterMap.isEmpty()) {
         for (final Cluster cluster : clusterMap.values()) {
+          /***
+           * Append -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 to HADOOP_NAMENODE_OPTS from hadoop-env.sh
+           */
           content = null;
           if (cluster.getDesiredConfigByType("hadoop-env") != null) {
             content = cluster.getDesiredConfigByType(
@@ -1071,6 +1073,27 @@ public class UpgradeCatalog210 extends AbstractUpgradeCatalog {
             updateConfigurationPropertiesForCluster(cluster, "hadoop-env",
                 prop, true, false);
           }
+          /***
+           * Update dfs.namenode.rpc-address set hostname instead of localhost
+           */
+          if (cluster.getDesiredConfigByType("hdfs-site") != null && !cluster.getHosts("HDFS","NAMENODE").isEmpty()) {
+
+            URI nameNodeRpc = null;
+            String hostName = cluster.getHosts("HDFS","NAMENODE").iterator().next();
+            // Try to generate dfs.namenode.rpc-address
+            if (cluster.getDesiredConfigByType("core-site").getProperties() != null &&
+                      cluster.getDesiredConfigByType("core-site").getProperties().get("fs.defaultFS") != null) {
+              try {
+                nameNodeRpc = new URI(cluster.getDesiredConfigByType("core-site").getProperties().get("fs.defaultFS"));
+                Map<String, String> hdfsProp = new HashMap<String, String>();
+                hdfsProp.put("dfs.namenode.rpc-address", hostName + ":" + nameNodeRpc.getPort());
+                updateConfigurationPropertiesForCluster(cluster, "hdfs-site",
+                        hdfsProp, true, false);
+              } catch (URISyntaxException e) {
+                e.printStackTrace();
+              }
+            }
+          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f8a32948/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
index d8a2565..d967694 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
@@ -36,11 +36,13 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 
 import javax.persistence.EntityManager;
 
+import com.google.inject.AbstractModule;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
@@ -63,10 +65,13 @@ import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntityP
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.easymock.Capture;
+import org.easymock.EasyMockSupport;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -306,6 +311,56 @@ public class UpgradeCatalog210Test {
   }
 
 
+  @Test
+  public void testUpdateClusterEnvConfiguration() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
+
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+
+    final Config mockHdfsSite = easyMockSupport.createNiceMock(Config.class);
+    final Config mockCoreSite = easyMockSupport.createStrictMock(Config.class);
+
+    final Map<String, String> propertiesExpectedHdfs = new HashMap<String, String>();
+    final Map<String, String> propertiesExpectedCoreSite = new HashMap<String, String>();
+    propertiesExpectedCoreSite.put("fs.defaultFS", "hdfs://EXAMPLE.COM:8020");
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(ConfigHelper.class).toInstance(mockConfigHelper);
+        bind(Clusters.class).toInstance(mockClusters);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).once();
+
+    // Expected operation
+    expect(mockClusterExpected.getDesiredConfigByType("hadoop-env")).andReturn(null).once();
+
+    // Expected operation
+    expect(mockClusterExpected.getDesiredConfigByType("hdfs-site")).andReturn(mockHdfsSite).atLeastOnce();
+    expect(mockClusterExpected.getHosts("HDFS", "NAMENODE")).andReturn( new HashSet<String>() {{
+      add("host1");
+    }}).atLeastOnce();
+    expect(mockHdfsSite.getProperties()).andReturn(propertiesExpectedHdfs).anyTimes();
+
+    expect(mockClusterExpected.getDesiredConfigByType("core-site")).andReturn(mockCoreSite).anyTimes();
+    expect(mockCoreSite.getProperties()).andReturn(propertiesExpectedCoreSite).anyTimes();
+
+    easyMockSupport.replayAll();
+    mockInjector.getInstance(UpgradeCatalog210.class).updateHdfsConfigs();
+    easyMockSupport.verifyAll();
+  }
+
   /**
    * @param dbAccessor
    * @return