You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2018/05/24 18:49:54 UTC

[ambari] branch trunk updated: Revert "AMBARI-23943. Hiveserver2 fails to start on viewFS enabled cluster: {hive_server2_zookeeper_namespace} is not ready yet (aonishuk)" (#1373)

This is an automated email from the ASF dual-hosted git repository.

jonathanhurley pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
     new e9374c9  Revert "AMBARI-23943. Hiveserver2 fails to start on viewFS enabled cluster: {hive_server2_zookeeper_namespace} is not ready yet (aonishuk)" (#1373)
e9374c9 is described below

commit e9374c954c38d911c6ec1802c8d8452aa20b9fcd
Author: Jonathan Hurley <jo...@apache.org>
AuthorDate: Thu May 24 14:49:35 2018 -0400

    Revert "AMBARI-23943. Hiveserver2 fails to start on viewFS enabled cluster: {hive_server2_zookeeper_namespace} is not ready yet (aonishuk)" (#1373)
    
    This reverts commit 66badb99b71c0c0d89b6f28d7893cab1c009dd33.
---
 .../dummy_files/alert_definitions.json             |   4 +-
 .../libraries/providers/hdfs_resource.py           |  44 ++-------
 .../common-services/HDFS/2.1.0.2.0/metainfo.xml    |   1 -
 .../4.0.0.2.0/package/scripts/oozie_service.py     |   2 +-
 .../SPARK/1.2.1/package/scripts/livy_server.py     |   2 +-
 .../SPARK2/2.0.0/package/scripts/livy2_server.py   |   2 +-
 .../2.1.0.2.0/package/scripts/resourcemanager.py   |   2 +-
 .../before-START/files/fast-hdfs-resource.jar      | Bin 19286899 -> 28296600 bytes
 .../before-START/scripts/shared_initialization.py  |   2 +-
 .../2.0.6/hooks/before-START/test_before_start.py  |  16 ---
 .../apache/ambari/fast_hdfs_resource/Resource.java |   9 --
 .../apache/ambari/fast_hdfs_resource/Runner.java   | 110 +++++++--------------
 12 files changed, 47 insertions(+), 147 deletions(-)

diff --git a/ambari-agent/src/test/python/ambari_agent/dummy_files/alert_definitions.json b/ambari-agent/src/test/python/ambari_agent/dummy_files/alert_definitions.json
index 341017c..d9a82a7 100644
--- a/ambari-agent/src/test/python/ambari_agent/dummy_files/alert_definitions.json
+++ b/ambari-agent/src/test/python/ambari_agent/dummy_files/alert_definitions.json
@@ -7,9 +7,9 @@
       {
         "name": "namenode_process", 
         "service": "HDFS", 
-        "component": "NAMENODE", 
-        "interval": 6, 
         "enabled": true, 
+        "interval": 6, 
+        "component": "NAMENODE", 
         "label": "NameNode process", 
         "source": {
           "reporting": {
diff --git a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
index a7b43c7..23bfbc5 100644
--- a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
+++ b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
@@ -74,32 +74,6 @@ class HdfsResourceJar:
   while execute does all the expensive creating/deleting work executing the jar with the json as parameter.
   """
   def action_delayed(self, action_name, main_resource):
-    dfs_type = main_resource.resource.dfs_type
-
-    if main_resource.resource.nameservices is None: # all nameservices
-      nameservices = namenode_ha_utils.get_nameservices(main_resource.resource.hdfs_site)
-    else:
-      nameservices = main_resource.resource.nameservices
-
-    # non-federated cluster
-    if not nameservices:
-      self.action_delayed_for_nameservice(None, action_name, main_resource)
-    else:
-      for nameservice in nameservices:
-        try:
-          if not dfs_type:
-            raise Fail("<serviceType> for fileSystem service should be set in metainfo.xml")
-          nameservice = dfs_type.lower() + "://" + nameservice
-
-          self.action_delayed_for_nameservice(nameservice, action_name, main_resource)
-        except namenode_ha_utils.NoActiveNamenodeException as ex:
-          # one of ns can be down (during initial start forexample) no need to worry for federated cluster
-          if len(nameservices) > 1:
-            Logger.exception("Cannot run HdfsResource for nameservice {0}. Due to no active namenode present".format(nameservice))
-          else:
-            raise
-
-  def action_delayed_for_nameservice(self, nameservice, action_name, main_resource):
     resource = {}
     env = Environment.get_instance()
     if not 'hdfs_files' in env.config:
@@ -116,8 +90,6 @@ class HdfsResourceJar:
       elif getattr(main_resource.resource, field_name):
         resource[json_field_name] = getattr(main_resource.resource, field_name)
 
-    resource['nameservice'] = nameservice
-
     # Add resource to create
     env.config['hdfs_files'].append(resource)
     
@@ -187,9 +159,9 @@ class WebHDFSUtil:
     self.logoutput = logoutput
     
   @staticmethod
-  def is_webhdfs_available(is_webhdfs_enabled, dfs_type):
+  def is_webhdfs_available(is_webhdfs_enabled, default_fs):
     # only hdfs seems to support webHDFS
-    return (is_webhdfs_enabled and dfs_type == 'HDFS')
+    return (is_webhdfs_enabled and default_fs.startswith("hdfs"))
     
   def run_command(self, *args, **kwargs):
     """
@@ -590,17 +562,11 @@ class HdfsResourceWebHDFS:
 class HdfsResourceProvider(Provider):
   def __init__(self, resource):
     super(HdfsResourceProvider,self).__init__(resource)
-
-    self.assert_parameter_is_set('dfs_type')
     self.fsType = getattr(resource, 'dfs_type')
-
     self.ignored_resources_list = HdfsResourceProvider.get_ignored_resources_list(self.resource.hdfs_resource_ignore_file)
-
-    if self.fsType == 'HDFS':
+    if self.fsType != 'HCFS':
       self.assert_parameter_is_set('hdfs_site')
       self.webhdfs_enabled = self.resource.hdfs_site['dfs.webhdfs.enabled']
-    else:
-      self.webhdfs_enabled = False
       
   @staticmethod
   def parse_path(path):
@@ -663,7 +629,9 @@ class HdfsResourceProvider(Provider):
     self.get_hdfs_resource_executor().action_execute(self)
 
   def get_hdfs_resource_executor(self):
-    if WebHDFSUtil.is_webhdfs_available(self.webhdfs_enabled, self.fsType):
+    if self.fsType == 'HCFS':
+      return HdfsResourceJar()
+    elif WebHDFSUtil.is_webhdfs_available(self.webhdfs_enabled, self.resource.default_fs):
       return HdfsResourceWebHDFS()
     else:
       return HdfsResourceJar()
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
index f26eee9..19b378b 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
@@ -21,7 +21,6 @@
     <service>
       <name>HDFS</name>
       <displayName>HDFS</displayName>
-      <serviceType>HDFS</serviceType> <!-- This tag is used only for main fileSystem service. It sets filesystem schema for ambari -->
       <comment>Apache Hadoop Distributed File System</comment>
       <version>2.1.0.2.0</version>
 
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_service.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_service.py
index 612bb29..29d6f4b 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_service.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_service.py
@@ -139,7 +139,7 @@ def oozie_service(action = 'start', upgrade_type=None):
         params.HdfsResource(None, action="execute")
 
         hdfs_share_dir_exists = True # skip time-expensive hadoop fs -ls check
-      elif WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.dfs_type):
+      elif WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
         # check with webhdfs is much faster than executing hadoop fs -ls. 
         util = WebHDFSUtil(params.hdfs_site, nameservice, params.oozie_user, params.security_enabled)
         list_status = util.run_command(params.hdfs_share_dir, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/livy_server.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/livy_server.py
index 3e45774..7c858c5 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/livy_server.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/livy_server.py
@@ -114,7 +114,7 @@ class LivyServer(Script):
       nameservices = namenode_ha_utils.get_nameservices(params.hdfs_site)
       nameservice = None if not nameservices else nameservices[-1]
 
-      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.dfs_type):
+      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
         # check with webhdfs is much faster than executing hdfs dfs -test
         util = WebHDFSUtil(params.hdfs_site, nameservice, params.hdfs_user, params.security_enabled)
         list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/livy2_server.py b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/livy2_server.py
index 27b1d25..492fd67 100644
--- a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/livy2_server.py
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/livy2_server.py
@@ -113,7 +113,7 @@ class LivyServer(Script):
       nameservices = namenode_ha_utils.get_nameservices(params.hdfs_site)
       nameservice = None if not nameservices else nameservices[-1]
 
-      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.dfs_type):
+      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
         # check with webhdfs is much faster than executing hdfs dfs -test
         util = WebHDFSUtil(params.hdfs_site, nameservice, params.hdfs_user, params.security_enabled)
         list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index 99ad69f..601ced8 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -230,7 +230,7 @@ class ResourcemanagerDefault(Resourcemanager):
       nameservices = namenode_ha_utils.get_nameservices(params.hdfs_site)
       nameservice = None if not nameservices else nameservices[-1]
       
-      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.dfs_type):
+      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
         # check with webhdfs is much faster than executing hdfs dfs -test
         util = WebHDFSUtil(params.hdfs_site, nameservice, params.hdfs_user, params.security_enabled)
         list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar b/ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar
index b8f633f..6c993bf 100644
Binary files a/ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar and b/ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar differ
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
index c26265a..541de9c 100644
--- a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
@@ -65,7 +65,7 @@ def setup_hadoop():
     # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS.
     if params.sysprep_skip_copy_fast_jar_hdfs:
       print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
-    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.dfs_type):
+    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
       # for source-code of jar goto contrib/fast-hdfs-resource
       File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
            mode=0644,
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
index 6329ee4..8e20d17 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
@@ -60,10 +60,6 @@ class TestHookBeforeStart(RMFTestCase):
                               create_parents = True,
                               cd_access = 'a',
                               )
-    self.assertResourceCalled('File', '/var/lib/ambari-agent/lib/fast-hdfs-resource.jar',
-        content = StaticFile('fast-hdfs-resource.jar'),
-        mode = 0644,
-    )
     self.assertResourceCalled('File', '/etc/hadoop/conf/commons-logging.properties',
                               content = Template('commons-logging.properties.j2'),
                               owner = 'hdfs',
@@ -141,10 +137,6 @@ class TestHookBeforeStart(RMFTestCase):
                               create_parents = True,
                               cd_access = 'a',
                               )
-    self.assertResourceCalled('File', '/var/lib/ambari-agent/lib/fast-hdfs-resource.jar',
-        content = StaticFile('fast-hdfs-resource.jar'),
-        mode = 0644,
-    )
     self.assertResourceCalled('File', '/etc/hadoop/conf/commons-logging.properties',
                               content = Template('commons-logging.properties.j2'),
                               owner = 'root',
@@ -227,10 +219,6 @@ class TestHookBeforeStart(RMFTestCase):
                               create_parents = True,
                               cd_access = 'a',
                               )
-    self.assertResourceCalled('File', '/var/lib/ambari-agent/lib/fast-hdfs-resource.jar',
-        content = StaticFile('fast-hdfs-resource.jar'),
-        mode = 0644,
-    )
     self.assertResourceCalled('File', '/etc/hadoop/conf/commons-logging.properties',
                               content = Template('commons-logging.properties.j2'),
                               owner = 'hdfs',
@@ -315,10 +303,6 @@ class TestHookBeforeStart(RMFTestCase):
                               create_parents = True,
                               cd_access = 'a',
                               )
-    self.assertResourceCalled('File', '/var/lib/ambari-agent/lib/fast-hdfs-resource.jar',
-        content = StaticFile('fast-hdfs-resource.jar'),
-        mode = 0644,
-    )
     self.assertResourceCalled('File', '/etc/hadoop/conf/commons-logging.properties',
                               content = Template('commons-logging.properties.j2'),
                               owner = 'hdfs',
diff --git a/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Resource.java b/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Resource.java
index 5c7cbda..9cbfab2 100644
--- a/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Resource.java
+++ b/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Resource.java
@@ -44,7 +44,6 @@ public class Resource {
   private String owner;
   private String group;
   private String mode;
-  private String nameservice;
   private boolean recursiveChown;
   private boolean recursiveChmod;
   private boolean changePermissionforParents;
@@ -106,14 +105,6 @@ public class Resource {
     this.mode = mode;
   }
 
-  public String getNameservice() {
-    return nameservice;
-  }
-
-  public void setNameservice(String nameservice) {
-    this.nameservice = nameservice;
-  }
-
   public boolean isRecursiveChown() {
     return recursiveChown;
   }
diff --git a/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Runner.java b/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Runner.java
index 9cf0a73..98119b0 100644
--- a/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Runner.java
+++ b/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Runner.java
@@ -22,10 +22,6 @@ import java.io.FileReader;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -56,88 +52,52 @@ public class Runner {
 
     Gson gson = new Gson();
     Resource[] resources = null;
-    Map<String, FileSystem> fileSystemNameToInstance = new HashMap<String, FileSystem>();
-    Map<String, List<Resource>> fileSystemToResource = new HashMap<String, List<Resource>>();
-
+    FileSystem dfs = null;
 
     try {
+      Configuration conf = new Configuration();
+      dfs = FileSystem.get(conf);
+
       // 3 - Load data from JSON
       resources = (Resource[]) gson.fromJson(new FileReader(jsonFilePath),
           Resource[].class);
-      
-      Configuration conf = new Configuration();
-      FileSystem dfs = null;
 
-      // Creating connections
+      // 4 - Connect to HDFS
+      System.out.println("Using filesystem uri: " + FileSystem.getDefaultUri(conf).toString());
+      dfs.initialize(FileSystem.getDefaultUri(conf), conf);
+      
       for (Resource resource : resources) {
-        String nameservice = resource.getNameservice();
-
-        if(!fileSystemNameToInstance.containsKey(nameservice)) {
-          URI fileSystemUrl;
-          if(nameservice == null) {
-            fileSystemUrl = FileSystem.getDefaultUri(conf);
-          } else {
-            fileSystemUrl = new URI(nameservice);
-          }
+        System.out.println("Creating: " + resource);
 
-          dfs = FileSystem.get(fileSystemUrl, conf);
+        Resource.checkResourceParameters(resource, dfs);
 
-          // 4 - Connect to DFS
-          System.out.println("Initializing filesystem uri: " + fileSystemUrl);
-          dfs.initialize(fileSystemUrl, conf);
+        Path pathHadoop = null;
 
-          fileSystemNameToInstance.put(nameservice, dfs);
+        if (resource.getAction().equals("download")) {
+          pathHadoop = new Path(resource.getSource());
         }
-
-        if(!fileSystemToResource.containsKey(nameservice)) {
-          fileSystemToResource.put(nameservice, new ArrayList<Resource>());
-        }
-        fileSystemToResource.get(nameservice).add(resource);
-      }
-
-      //for (Resource resource : resources) {
-      for (Map.Entry<String, List<Resource>> entry : fileSystemToResource.entrySet()) {
-        String nameservice = entry.getKey();
-        List<Resource> resourcesNameservice = entry.getValue();
-
-        for(Resource resource: resourcesNameservice) {
-          if (nameservice != null) {
-            System.out.println("Creating: " + resource + " in " + nameservice);
-          } else {
-            System.out.println("Creating: " + resource + " in default filesystem");
-          }
-
-          dfs = fileSystemNameToInstance.get(nameservice);
-
-          Resource.checkResourceParameters(resource, dfs);
-
-          Path pathHadoop = null;
-
-          if (resource.getAction().equals("download")) {
-            pathHadoop = new Path(resource.getSource());
-          } else {
-            String path = resource.getTarget();
-            pathHadoop = new Path(path);
-            if (!resource.isManageIfExists() && dfs.exists(pathHadoop)) {
-              System.out.println(
-                  String.format("Skipping the operation for not managed DFS directory %s  since immutable_paths contains it.", path)
-              );
-              continue;
-            }
+        else {
+          String path = resource.getTarget();
+          pathHadoop = new Path(path);
+          if (!resource.isManageIfExists() && dfs.exists(pathHadoop)) {
+            System.out.println(
+                String.format("Skipping the operation for not managed DFS directory %s  since immutable_paths contains it.", path)
+            );
+            continue;
           }
+        }
 
-          if (resource.getAction().equals("create")) {
-            // 5 - Create
-            Resource.createResource(resource, dfs, pathHadoop);
-            Resource.setMode(resource, dfs, pathHadoop);
-            Resource.setOwner(resource, dfs, pathHadoop);
-          } else if (resource.getAction().equals("delete")) {
-            // 6 - Delete
-            dfs.delete(pathHadoop, true);
-          } else if (resource.getAction().equals("download")) {
-            // 7 - Download
-            dfs.copyToLocalFile(pathHadoop, new Path(resource.getTarget()));
-          }
+        if (resource.getAction().equals("create")) {
+          // 5 - Create
+          Resource.createResource(resource, dfs, pathHadoop);
+          Resource.setMode(resource, dfs, pathHadoop);
+          Resource.setOwner(resource, dfs, pathHadoop);
+        } else if (resource.getAction().equals("delete")) {
+          // 6 - Delete
+          dfs.delete(pathHadoop, true);
+        } else if (resource.getAction().equals("download")) {
+          // 7 - Download
+          dfs.copyToLocalFile(pathHadoop, new Path(resource.getTarget()));
         }
       }
     } 
@@ -146,9 +106,7 @@ public class Runner {
        e.printStackTrace();
     }
     finally {
-      for(FileSystem dfs:fileSystemNameToInstance.values()) {
-        dfs.close();
-      }
+      dfs.close();
     }
 
     System.out.println("All resources created.");

-- 
To stop receiving notification emails like this one, please contact
jonathanhurley@apache.org.