You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2017/04/24 12:01:16 UTC

[01/50] [abbrv] ambari git commit: AMBARI-20737 Able to hide the Use Public Repository option on Register Version page on Ambari Admin View (dili)

Repository: ambari
Updated Branches:
  refs/heads/branch-3.0-perf 04f8c53fe -> 8cc384cae


AMBARI-20737 Able to hide the Use Public Repository option on Register Version page on Ambari Admin View (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1f4a72fd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1f4a72fd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1f4a72fd

Branch: refs/heads/branch-3.0-perf
Commit: 1f4a72fd0cca8d822ea8ebb7368452c54d53f8bb
Parents: b64e856
Author: Di Li <di...@apache.org>
Authored: Mon Apr 17 12:17:27 2017 -0400
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../stackVersions/StackVersionsCreateCtrl.js     | 19 +++++++++++++++++++
 .../ui/admin-web/app/scripts/services/Stack.js   |  3 ++-
 .../views/stackVersions/stackVersionPage.html    |  3 +--
 .../VersionDefinitionResourceProvider.java       |  4 ++++
 .../ambari/server/stack/StackManagerTest.java    | 12 ++++++++++++
 5 files changed, 38 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1f4a72fd/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
index b3c27dc..69c35c0 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
@@ -29,6 +29,7 @@ angular.module('ambariAdminConsole')
   $scope.stackIds = [];
   $scope.allVersions = [];
   $scope.networkLost = false;
+  $scope.stackRepoUpdateLinkExists = true;
   $scope.skipValidation = false;
   $scope.useRedhatSatellite = false;
 
@@ -543,6 +544,23 @@ angular.module('ambariAdminConsole')
     })[0];
   };
 
+  /**
+   * Return true if at least one stacks have the repo URL link in the repoinfo.xml
+   * @return boolean
+   * */
+  $scope.setStackRepoUpdateLinkExists = function (versions) {
+    var stackRepoUpdateLinkExists = versions.find(function(_version){
+      return _version.stackRepoUpdateLinkExists;
+    });
+
+    //Found at least one version with the stack repo update link
+    if (stackRepoUpdateLinkExists){
+      $scope.stackRepoUpdateLinkExists = true;
+    } else {
+      $scope.stackRepoUpdateLinkExists = false;
+    }
+  };
+
   $scope.setNetworkIssues = function (versions) {
    $scope.networkLost = !versions.find(function(_version){
      return !_version.stackDefault;
@@ -576,6 +594,7 @@ angular.module('ambariAdminConsole')
         $scope.selectedPublicRepoVersion = $scope.activeStackVersion;
         $scope.setVersionSelected($scope.activeStackVersion);
         $scope.setNetworkIssues(versions);
+        $scope.setStackRepoUpdateLinkExists(versions);
         $scope.validateRepoUrl();
         $scope.availableStackRepoList = versions.length == 1 ? [] : versions;
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f4a72fd/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js
index e028906..b496987 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js
@@ -88,7 +88,7 @@ angular.module('ambariAdminConsole')
     },
 
     allPublicStackVersions: function() {
-      var url = '/version_definitions?fields=VersionDefinition/stack_default,operating_systems/repositories/Repositories/*,VersionDefinition/stack_services,VersionDefinition/repository_version' +
+      var url = '/version_definitions?fields=VersionDefinition/stack_default,VersionDefinition/stack_repo_update_link_exists,operating_systems/repositories/Repositories/*,VersionDefinition/stack_services,VersionDefinition/repository_version' +
         '&VersionDefinition/show_available=true';
       var deferred = $q.defer();
       $http.get(Settings.baseUrl + url, {mock: 'version/versions.json'})
@@ -100,6 +100,7 @@ angular.module('ambariAdminConsole')
               stackName: version.VersionDefinition.stack_name,
               stackVersion: version.VersionDefinition.stack_version,
               stackDefault: version.VersionDefinition.stack_default,
+              stackRepoUpdateLinkExists: version.VersionDefinition.stack_repo_update_link_exists,
               stackNameVersion:  version.VersionDefinition.stack_name + '-' + version.VersionDefinition.stack_version,
               displayName: version.VersionDefinition.stack_name + '-' + version.VersionDefinition.repository_version.split('-')[0], //HDP-2.3.4.0
               displayNameFull: version.VersionDefinition.stack_name + '-' + version.VersionDefinition.repository_version, //HDP-2.3.4.0-23

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f4a72fd/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
index 3bee2a1..fe08802 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
@@ -115,8 +115,7 @@
   </div>
 
   <div id="upload-definition-file-panel" ng-if="createController">
-
-    <div class="col-sm-12 big-radio clearfix" ng-class="{'disabled' : networkLost || useRedhatSatellite}">
+    <div class="col-sm-12 big-radio clearfix hide-soft" ng-class="{'disabled' : networkLost || useRedhatSatellite,'visible':stackRepoUpdateLinkExists}">
       <input type="radio" ng-model="selectedOption.index" value="1" ng-change="togglePublicLocalOptionSelect()" ng-disabled="networkLost || useRedhatSatellite">
       <span>{{'versions.usePublic' | translate}}</span>
       <a id="public-disabled-link" href="javascript:void(0);" ng-if="networkLost" ng-click="showPublicRepoDisabledDialog()">{{'versions.networkIssues.networkLost'| translate}}</a>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f4a72fd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
index 352aa2d..7914fd9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
@@ -102,6 +102,7 @@ public class VersionDefinitionResourceProvider extends AbstractAuthorizedResourc
   protected static final String VERSION_DEF_AVAILABLE_SERVICES       = "VersionDefinition/services";
   protected static final String VERSION_DEF_STACK_SERVICES           = "VersionDefinition/stack_services";
   protected static final String VERSION_DEF_STACK_DEFAULT            = "VersionDefinition/stack_default";
+  protected static final String VERSION_DEF_STACK_REPO_UPDATE_LINK_EXISTS = "VersionDefinition/stack_repo_update_link_exists";
   protected static final String VERSION_DEF_DISPLAY_NAME             = "VersionDefinition/display_name";
   protected static final String VERSION_DEF_VALIDATION               = "VersionDefinition/validation";
   protected static final String SHOW_AVAILABLE                       = "VersionDefinition/show_available";
@@ -157,6 +158,7 @@ public class VersionDefinitionResourceProvider extends AbstractAuthorizedResourc
       VERSION_DEF_AVAILABLE_SERVICES,
       VERSION_DEF_STACK_SERVICES,
       VERSION_DEF_STACK_DEFAULT,
+      VERSION_DEF_STACK_REPO_UPDATE_LINK_EXISTS,
       VERSION_DEF_DISPLAY_NAME,
       VERSION_DEF_VALIDATION,
       VERSION_DEF_MIN_JDK,
@@ -617,6 +619,7 @@ public class VersionDefinitionResourceProvider extends AbstractAuthorizedResourc
     setResourceProperty(resource, VERSION_DEF_RELEASE_NOTES, xml.release.releaseNotes, requestedIds);
     setResourceProperty(resource, VERSION_DEF_RELEASE_VERSION, xml.release.version, requestedIds);
     setResourceProperty(resource, VERSION_DEF_STACK_DEFAULT, xml.isStackDefault(), requestedIds);
+    setResourceProperty(resource, VERSION_DEF_STACK_REPO_UPDATE_LINK_EXISTS, (stack.getRepositoryXml().getLatestURI() != null), requestedIds);
     setResourceProperty(resource, VERSION_DEF_DISPLAY_NAME, xml.release.display, requestedIds);
 
     if (null != validations) {
@@ -686,6 +689,7 @@ public class VersionDefinitionResourceProvider extends AbstractAuthorizedResourc
       setResourceProperty(resource, VERSION_DEF_STACK_SERVICES, xml.getStackServices(stack), requestedIds);
       setResourceProperty(resource, VERSION_DEF_MIN_JDK, stack.getMinJdk(), requestedIds);
       setResourceProperty(resource, VERSION_DEF_MAX_JDK, stack.getMaxJdk(), requestedIds);
+      setResourceProperty(resource, VERSION_DEF_STACK_REPO_UPDATE_LINK_EXISTS, (stack.getRepositoryXml().getLatestURI() != null), requestedIds);
     }
 
     return resource;

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f4a72fd/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
index 507c560..8c7ab9d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -1025,4 +1025,16 @@ public class StackManagerTest {
     assertTrue(logsearchLogfeederRoleCommand + " should be dependent of " + infraSolrRoleCommand, logsearchLogfeederBlockers.contains(infraSolrRoleCommand));
     assertTrue(logsearchLogfeederRoleCommand + " should be dependent of " + logsearchServerRoleCommand, logsearchLogfeederBlockers.contains(logsearchServerRoleCommand));
   }
+
+  @Test
+  public void testVersionDefinitionStackRepoUpdateLinkExists(){
+    // Get the base sqoop service
+    StackInfo stack = stackManager.getStack("HDP", "2.1.1");
+    String latestUri = stack.getRepositoryXml().getLatestURI();
+    assertTrue(latestUri != null);
+
+    stack = stackManager.getStack("HDP", "2.0.8");
+    latestUri = stack.getRepositoryXml().getLatestURI();
+    assertTrue(latestUri == null);
+  }
 }


[42/50] [abbrv] ambari git commit: AMBARI-20729.Need way to explicitly ask for workflow name(Supreeth Sharma via Venkata Sairam)

Posted by ao...@apache.org.
AMBARI-20729.Need way to explicitly ask for workflow name(Supreeth Sharma via Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c53b68ea
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c53b68ea
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c53b68ea

Branch: refs/heads/branch-3.0-perf
Commit: c53b68ea200ca85b3aa3148f84e616981c6c72d2
Parents: abe5e55
Author: Venkata Sairam <ve...@gmail.com>
Authored: Fri Apr 21 17:00:38 2017 +0530
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../src/main/resources/ui/app/components/bundle-config.js         | 3 ++-
 .../src/main/resources/ui/app/components/coord-config.js          | 3 ++-
 2 files changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c53b68ea/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js
index 99f4fa7..41012b2 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js
@@ -47,6 +47,7 @@ export default Ember.Component.extend(Ember.Evented, Validations, {
   fileBrowser : Ember.inject.service('file-browser'),
   workspaceManager : Ember.inject.service('workspace-manager'),
   jobConfigProperties: Ember.A([]),
+  isDefaultNameForBundleEnabled : false,
   initialize : function(){
     var self = this;
     this.set('errors', Ember.A([]));
@@ -95,7 +96,7 @@ export default Ember.Component.extend(Ember.Evented, Validations, {
     }else{
       this.set('bundle', this.createBundle());
     }
-    if(Ember.isBlank(this.get('bundle.name'))){
+    if(Ember.isBlank(this.get('bundle.name')) && this.get('isDefaultNameForBundleEnabled')){
       this.set('bundle.name', Ember.copy(this.get('tabInfo.name')));
     }
     this.schedulePersistWorkInProgress();

http://git-wip-us.apache.org/repos/asf/ambari/blob/c53b68ea/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
index fba4db5..1aeca5b 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
@@ -51,6 +51,7 @@ export default Ember.Component.extend(Validations, Ember.Evented, {
   workspaceManager : Ember.inject.service('workspace-manager'),
   showErrorMessage: Ember.computed.alias('saveAttempted'),
   jobConfigProperties: Ember.A([]),
+  isDefaultNameForCoordinatorEnabled : false,
   datasetsForInputs : Ember.computed('coordinator.datasets.[]','coordinator.dataOutputs.[]',function(){
     var datasetsForInputs = Ember.copy(this.get('coordinator.datasets'));
     this.get('coordinator.dataOutputs').forEach((dataOutput)=>{
@@ -145,7 +146,7 @@ export default Ember.Component.extend(Validations, Ember.Evented, {
       {'name':'throttle', 'displayName':'Throttle', 'value':''}
     ]);
     this.set('timezoneList', Ember.copy(Constants.timezoneList));
-    if(Ember.isBlank(this.get('coordinator.name'))){
+    if(Ember.isBlank(this.get('coordinator.name')) && this.get('isDefaultNameForCoordinatorEnabled')){
       this.set('coordinator.name', Ember.copy(this.get('tabInfo.name')));
     }
     this.schedulePersistWorkInProgress();


[46/50] [abbrv] ambari git commit: AMBARI-20326. HDP 3.0 TP - support for HBase with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/draining_servers.rb
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/draining_servers.rb b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/draining_servers.rb
new file mode 100644
index 0000000..5bcb5b6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/draining_servers.rb
@@ -0,0 +1,164 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove servers from draining mode via zookeeper 
+
+require 'optparse'
+include Java
+
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.client.HBaseAdmin
+import org.apache.hadoop.hbase.zookeeper.ZKUtil
+import org.apache.commons.logging.Log
+import org.apache.commons.logging.LogFactory
+
+# Name of this script
+NAME = "draining_servers"
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
+  opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
+                 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
+  opts.on('-h', '--help', 'Display usage information') do
+    puts opts
+    exit
+  end
+  options[:debug] = false
+  opts.on('-d', '--debug', 'Display extra debug logging') do
+    options[:debug] = true
+  end
+end
+optparse.parse!
+
+# Return array of servernames where servername is hostname+port+startcode
+# comma-delimited
+def getServers(admin)
+  serverInfos = admin.getClusterStatus().getServerInfo()
+  servers = []
+  for server in serverInfos
+    servers << server.getServerName()
+  end
+  return servers
+end
+
+def getServerNames(hostOrServers, config)
+  ret = []
+  
+  for hostOrServer in hostOrServers
+    # check whether it is already serverName. No need to connect to cluster
+    parts = hostOrServer.split(',')
+    if parts.size() == 3
+      ret << hostOrServer
+    else 
+      admin = HBaseAdmin.new(config) if not admin
+      servers = getServers(admin)
+
+      hostOrServer = hostOrServer.gsub(/:/, ",")
+      for server in servers 
+        ret << server if server.start_with?(hostOrServer)
+      end
+    end
+  end
+  
+  admin.close() if admin
+  return ret
+end
+
+def addServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.createAndFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+def removeServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.deleteNodeFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+# list servers in draining mode
+def listServers(options)
+  config = HBaseConfiguration.create()
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
+  servers.each {|server| puts server}
+end
+
+hostOrServers = ARGV[1..ARGV.size()]
+
+# Create a logger and disable the DEBUG-level annoying client logging
+def configureLogging(options)
+  apacheLogger = LogFactory.getLog(NAME)
+  # Configure log4j to not spew so much
+  unless (options[:debug]) 
+    logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+    logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+  end
+  return apacheLogger
+end
+
+# Create a logger and save it to ruby global
+$LOG = configureLogging(options)
+case ARGV[0]
+  when 'add'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    addServers(options, hostOrServers)
+  when 'remove'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    removeServers(options, hostOrServers)
+  when 'list'
+    listServers(options)
+  else
+    puts optparse
+    exit 3
+end

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbase-smoke-cleanup.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbase-smoke-cleanup.sh b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbase-smoke-cleanup.sh
new file mode 100644
index 0000000..cde19e4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbase-smoke-cleanup.sh
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbaseSmokeVerify.sh
new file mode 100644
index 0000000..8b085e8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+tr -d '\n|\t| ' < /tmp/hbase_chk_verify | grep -q $data
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/functions.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/functions.py
new file mode 100644
index 0000000..f98b9b9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/functions.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit
+
+def ensure_unit_for_memory(memory_size):
+  memory_size_values = re.findall('\d+', str(memory_size))
+  memory_size_unit = re.findall('\D+', str(memory_size))
+
+  if len(memory_size_values) > 0:
+    unit = 'm'
+    if len(memory_size_unit) > 0:
+      unit = memory_size_unit[0]
+    if unit not in ['b', 'k', 'm', 'g', 't', 'p']:
+      raise Exception("Memory size unit error. %s - wrong unit" % unit)
+    return "%s%s" % (memory_size_values[0], unit)
+  else:
+    raise Exception('Memory size can not be calculated')

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase.py
new file mode 100644
index 0000000..fced4fc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.resources.template_config import TemplateConfig
+from resource_management.libraries.functions.format import format
+from resource_management.core.source import Template, InlineTemplate
+from resource_management.core.resources import Package
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import Directory, Execute, File
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase(name=None):
+  import params
+  XmlConfig("hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site']
+  )
+
+  if params.service_map.has_key(name):
+    # Manually overriding service logon user & password set by the installation package
+    service_name = params.service_map[name]
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.hbase_user,
+                  password = Script.get_password(params.hbase_user))
+
+# name is 'master' or 'regionserver' or 'queryserver' or 'client'
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hbase(name=None):
+  import params
+
+  Directory( params.etc_prefix_dir,
+      mode=0755
+  )
+
+  Directory( params.hbase_conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      create_parents = True
+  )
+   
+  Directory(params.java_io_tmpdir,
+      create_parents = True,
+      mode=0777
+  )
+
+  # If a file location is specified in ioengine parameter,
+  # ensure that directory exists. Otherwise create the
+  # directory with permissions assigned to hbase:hadoop.
+  ioengine_input = params.ioengine_param
+  if ioengine_input != None:
+    if ioengine_input.startswith("file:/"):
+      ioengine_fullpath = ioengine_input[5:]
+      ioengine_dir = os.path.dirname(ioengine_fullpath)
+      Directory(ioengine_dir,
+          owner = params.hbase_user,
+          group = params.user_group,
+          create_parents = True,
+          mode = 0755
+      )
+  
+  parent_dir = os.path.dirname(params.tmp_dir)
+  # In case if we have several placeholders in path
+  while ("${" in parent_dir):
+    parent_dir = os.path.dirname(parent_dir)
+  if parent_dir != os.path.abspath(os.sep) :
+    Directory (parent_dir,
+          create_parents = True,
+          cd_access="a",
+    )
+    Execute(("chmod", "1777", parent_dir), sudo=True)
+
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  if check_stack_feature(StackFeature.PHOENIX_CORE_HDFS_SITE_REQUIRED, params.version_for_stack_feature_checks):
+    XmlConfig( "core-site.xml",
+               conf_dir = params.hbase_conf_dir,
+               configurations = params.config['configurations']['core-site'],
+               configuration_attributes=params.config['configuration_attributes']['core-site'],
+               owner = params.hbase_user,
+               group = params.user_group
+    )
+    if 'hdfs-site' in params.config['configurations']:
+      XmlConfig( "hdfs-site.xml",
+              conf_dir = params.hbase_conf_dir,
+              configurations = params.config['configurations']['hdfs-site'],
+              configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+              owner = params.hbase_user,
+              group = params.user_group
+      )
+  else:
+    File(format("{params.hbase_conf_dir}/hdfs-site.xml"),
+         action="delete"
+    )
+    File(format("{params.hbase_conf_dir}/core-site.xml"),
+         action="delete"
+    )
+
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-policy'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-policy'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else: 
+    File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+
+  File(format("{hbase_conf_dir}/hbase-env.sh"),
+       owner = params.hbase_user,
+       content=InlineTemplate(params.hbase_env_sh_template),
+       group = params.user_group,
+  )
+  
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents = True,
+            owner='root',
+            group='root'
+            )
+  
+  File(os.path.join(params.limits_conf_dir, 'hbase.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hbase.conf.j2")
+       )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{name}_jaas.conf"))
+  
+  if name != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      create_parents = True,
+      cd_access = "a",
+      mode = 0755,
+    )
+  
+    Directory (params.log_dir,
+      owner = params.hbase_user,
+      create_parents = True,
+      cd_access = "a",
+      mode = 0755,
+    )
+
+  if (params.log4j_props != None):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hbase_user,
+         content=InlineTemplate(params.log4j_props)
+    )
+  elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hbase_user
+    )
+  if name == "master":
+    params.HdfsResource(params.hbase_hdfs_root_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user
+    )
+    params.HdfsResource(params.hbase_staging_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user,
+                         mode=0711
+    )
+    if params.create_hbase_home_directory:
+      params.HdfsResource(params.hbase_home_directory,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.hbase_user,
+                          mode=0755
+      )
+    params.HdfsResource(None, action="execute")
+
+  if params.phoenix_enabled:
+    Package(params.phoenix_package,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
+
+def hbase_TemplateConfig(name, tag=None):
+  import params
+
+  TemplateConfig( format("{hbase_conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_client.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_client.py
new file mode 100644
index 0000000..c8128ab
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_client.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from hbase import hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+class HbaseClient(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseClientWindows(HbaseClient):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseClientDefault(HbaseClient):
+  def get_component_name(self):
+    return "hbase-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): 
+      conf_select.select(params.stack_name, "hbase", params.version)
+      stack_select.select("hbase-client", params.version)
+
+      # phoenix may not always be deployed
+      try:
+        stack_select.select("phoenix-client", params.version)
+      except Exception as e:
+        print "Ignoring error due to missing phoenix-client"
+        print str(e)
+
+
+      # set all of the hadoop clients since hbase client is upgraded as part
+      # of the final "CLIENTS" group and we need to ensure that hadoop-client
+      # is also set
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_decommission.py
new file mode 100644
index 0000000..7358674
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_decommission.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import StaticFile
+from resource_management.libraries.functions.format import format
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  File(params.region_drainer, content=StaticFile("draining_servers.rb"), owner=params.hbase_user, mode="f")
+
+  hosts = params.hbase_excluded_hosts.split(",")
+  for host in hosts:
+    if host:
+      if params.hbase_drain_only == True:
+        regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True)
+      else:
+        regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_mover} unload {host}")
+        Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True)
+        Execute(regionmover_cmd, user=params.hbase_user, logoutput=True)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  kinit_cmd = params.kinit_cmd_master
+
+  File(params.region_drainer,
+       content=StaticFile("draining_servers.rb"),
+       mode=0755
+  )
+  
+  if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
+    hosts = params.hbase_excluded_hosts.split(",")
+  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
+    hosts = params.hbase_included_hosts.split(",")
+
+  if params.hbase_drain_only:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+        pass
+    pass
+
+  else:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_mover} unload {host}")
+
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+      pass
+    pass
+  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_master.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_master.py
new file mode 100644
index 0000000..d2c8089
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_master.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+import upgrade
+from setup_ranger_hbase import setup_ranger_hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HbaseMaster(Script):
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='master')
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+    hbase_decommission(env)
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseMasterWindows(HbaseMaster):
+  def start(self, env):
+    import status_params
+    self.configure(env)
+    Service(status_params.hbase_master_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.hbase_master_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.hbase_master_win_service_name)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseMasterDefault(HbaseMaster):
+  def get_component_name(self):
+    return "hbase-master"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-master")
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+    setup_ranger_hbase(upgrade_type=upgrade_type, service_name="hbase-master")
+    hbase_service('master', action = 'start')
+    
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    hbase_service('master', action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    check_process_status(status_params.hbase_master_pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hbase.security.authentication" : "kerberos",
+                           "hbase.security.authorization": "true"}
+      props_empty_check = ['hbase.master.keytab.file',
+                           'hbase.master.kerberos.principal']
+      props_read_check = ['hbase.master.keytab.file']
+      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
+                                                  props_read_check)
+
+      hbase_expectations = {}
+      hbase_expectations.update(hbase_site_expectations)
+
+      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
+                                                   {'hbase-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hbase_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hbase-site' not in security_params
+               or 'hbase.master.keytab.file' not in security_params['hbase-site']
+               or 'hbase.master.kerberos.principal' not in security_params['hbase-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hbase_user,
+                                security_params['hbase-site']['hbase.master.keytab.file'],
+                                security_params['hbase-site']['hbase.master.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+      
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.hbase_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.hbase_master_pid_file]
+
+if __name__ == "__main__":
+  HbaseMaster().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..226e7fd5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_regionserver.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+
+from resource_management.core import shell
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+from hbase import hbase
+from hbase_service import hbase_service
+import upgrade
+from setup_ranger_hbase import setup_ranger_hbase
+
+
+class HbaseRegionServer(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='regionserver')
+
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseRegionServerWindows(HbaseRegionServer):
+  def start(self, env):
+    import status_params
+    self.configure(env)
+    Service(status_params.hbase_regionserver_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.hbase_regionserver_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.hbase_regionserver_win_service_name)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseRegionServerDefault(HbaseRegionServer):
+  def get_component_name(self):
+    return "hbase-regionserver"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-regionserver")
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.post_regionserver(env)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+    setup_ranger_hbase(upgrade_type=upgrade_type, service_name="hbase-regionserver")
+
+    hbase_service('regionserver', action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    check_process_status(status_params.regionserver_pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hbase.security.authentication" : "kerberos",
+                           "hbase.security.authorization": "true"}
+      props_empty_check = ['hbase.regionserver.keytab.file',
+                           'hbase.regionserver.kerberos.principal']
+      props_read_check = ['hbase.regionserver.keytab.file']
+      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
+                                                   props_read_check)
+
+      hbase_expectations = {}
+      hbase_expectations.update(hbase_site_expectations)
+
+      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
+                                                   {'hbase-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hbase_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hbase-site' not in security_params
+               or 'hbase.regionserver.keytab.file' not in security_params['hbase-site']
+               or 'hbase.regionserver.kerberos.principal' not in security_params['hbase-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hbase_user,
+                                security_params['hbase-site']['hbase.regionserver.keytab.file'],
+                                security_params['hbase-site']['hbase.regionserver.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.hbase_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.regionserver_pid_file]
+
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_service.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_service.py
new file mode 100644
index 0000000..1dbd560
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_service.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.core.shell import as_sudo
+from resource_management.core.resources.system import Execute, File
+
+def hbase_service(
+  name,
+  action = 'start'): # 'start' or 'stop' or 'status'
+    
+    import params
+  
+    role = name
+    cmd = format("{daemon_script} --config {hbase_conf_dir}")
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-{role}.pid")
+    pid_expression = as_sudo(["cat", pid_file])
+    no_op_test = as_sudo(["test", "-f", pid_file]) + format(" && ps -p `{pid_expression}` >/dev/null 2>&1")
+    
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+      
+      try:
+        Execute ( daemon_cmd,
+          not_if = no_op_test,
+          user = params.hbase_user
+        )
+      except:
+        show_logs(params.log_dir, params.hbase_user)
+        raise
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role}")
+
+      try:
+        Execute ( daemon_cmd,
+          user = params.hbase_user,
+          only_if = no_op_test,
+          # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
+          timeout = params.hbase_regionserver_shutdown_timeout,
+          on_timeout = format("! ( {no_op_test} ) || {sudo} -H -E kill -9 `{pid_expression}`"),
+        )
+      except:
+        show_logs(params.log_dir, params.hbase_user)
+        raise
+      
+      File(pid_file,
+           action = "delete",
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_upgrade.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_upgrade.py
new file mode 100644
index 0000000..e5bb781
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_upgrade.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute
+
+class HbaseMasterUpgrade(Script):
+
+  def take_snapshot(self, env):
+    import params
+
+    snap_cmd = "echo 'snapshot_all' | {0} shell".format(params.hbase_cmd)
+
+    exec_cmd = "{0} {1}".format(params.kinit_cmd, snap_cmd)
+
+    Execute(exec_cmd, user=params.hbase_user)
+
+  def restore_snapshot(self, env):
+    import params
+    print "TODO AMBARI-12698"
+
+if __name__ == "__main__":
+  HbaseMasterUpgrade().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params.py
new file mode 100644
index 0000000..e0607f3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+retryAble = default("/commandParams/command_retry_enabled", False)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..b7e2b89
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_linux.py
@@ -0,0 +1,426 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import status_params
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from functions import calc_xmn_from_xms, ensure_unit_for_memory
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons.os_check import OSCheck
+from ambari_commons.str_utils import string_set_intersection
+
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import is_empty
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.expect import expect
+from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
+
+# server configurations
+config = Script.get_config()
+exec_tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_name = status_params.stack_name
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+version = default("/commandParams/version", None)
+component_directory = status_params.component_directory
+etc_prefix_dir = "/etc/hbase"
+
+stack_version_unformatted = status_params.stack_version_unformatted
+stack_version_formatted = status_params.stack_version_formatted
+stack_root = status_params.stack_root
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+
+# hadoop default parameters
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+hbase_cmd = "/usr/lib/hbase/bin/hbase"
+hbase_max_direct_memory_size = None
+
+# hadoop parameters for stacks supporting rolling_upgrade
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+  daemon_script = format('{stack_root}/current/hbase-client/bin/hbase-daemon.sh')
+  region_mover = format('{stack_root}/current/hbase-client/bin/region_mover.rb')
+  region_drainer = format('{stack_root}/current/hbase-client/bin/draining_servers.rb')
+  hbase_cmd = format('{stack_root}/current/hbase-client/bin/hbase')
+
+  hbase_max_direct_memory_size  = default('configurations/hbase-env/hbase_max_direct_memory_size', None)
+
+  daemon_script=format("{stack_root}/current/{component_directory}/bin/hbase-daemon.sh")
+  region_mover = format("{stack_root}/current/{component_directory}/bin/region_mover.rb")
+  region_drainer = format("{stack_root}/current/{component_directory}/bin/draining_servers.rb")
+  hbase_cmd = format("{stack_root}/current/{component_directory}/bin/hbase")
+
+
+hbase_conf_dir = status_params.hbase_conf_dir
+limits_conf_dir = status_params.limits_conf_dir
+
+hbase_user_nofile_limit = default("/configurations/hbase-env/hbase_user_nofile_limit", "32000")
+hbase_user_nproc_limit = default("/configurations/hbase-env/hbase_user_nproc_limit", "16000")
+
+# no symlink for phoenix-server at this point
+phx_daemon_script = format('{stack_root}/current/phoenix-server/bin/queryserver.py')
+
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = default("/commandParams/mark_draining_only",False)
+hbase_included_hosts = config['commandParams']['included_hosts']
+
+hbase_user = status_params.hbase_user
+hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+# this is "hadoop-metrics.properties" for 1.x stacks
+metric_prop_file_name = "hadoop-metrics2-hbase.properties"
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+
+log_dir = config['configurations']['hbase-env']['hbase_log_dir']
+java_io_tmpdir = default("/configurations/hbase-env/hbase_java_io_tmpdir", "/tmp")
+master_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_master_heapsize'])
+
+regionserver_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_regionserver_heapsize'])
+regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
+regionserver_xmn_percent = expect("/configurations/hbase-env/hbase_regionserver_xmn_ratio", float)
+regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
+
+hbase_regionserver_shutdown_timeout = expect('/configurations/hbase-env/hbase_regionserver_shutdown_timeout', int, 30)
+
+phoenix_hosts = default('/clusterHostInfo/phoenix_query_server_hosts', [])
+phoenix_enabled = default('/configurations/hbase-env/phoenix_sql_enabled', False)
+has_phoenix = len(phoenix_hosts) > 0
+
+underscored_version = stack_version_unformatted.replace('.', '_')
+dashed_version = stack_version_unformatted.replace('.', '-')
+if OSCheck.is_redhat_family() or OSCheck.is_suse_family():
+  phoenix_package = format("phoenix_{underscored_version}_*")
+elif OSCheck.is_ubuntu_family():
+  phoenix_package = format("phoenix-{dashed_version}-.*")
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+local_dir = config['configurations']['hbase-site']['hbase.local.dir']
+ioengine_param = default('/configurations/hbase-site/hbase.bucketcache.ioengine', None)
+
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
+queryserver_jaas_config_file = format("{hbase_conf_dir}/hbase_queryserver_jaas.conf")
+
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+has_metric_collector = not len(ams_collector_hosts) == 0
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+
+# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
+if 'slave_hosts' in config['clusterHostInfo']:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+else:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
+
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+smokeuser_permissions = "RWXCA"
+service_check_data = get_unique_id_and_date()
+user_group = config['configurations']['cluster-env']["user_group"]
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+  regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  _queryserver_jaas_princ = config['configurations']['hbase-site']['phoenix.queryserver.kerberos.principal']
+  if not is_empty(_queryserver_jaas_princ):
+    queryserver_jaas_princ =_queryserver_jaas_princ.replace('_HOST',_hostname_lowercase)
+
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+queryserver_keytab_path = config['configurations']['hbase-site']['phoenix.queryserver.keytab.file']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+if security_enabled:
+  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
+  kinit_cmd_master = format("{kinit_path_local} -kt {master_keytab_path} {master_jaas_princ};")
+  master_security_config = format("-Djava.security.auth.login.config={hbase_conf_dir}/hbase_master_jaas.conf")
+else:
+  kinit_cmd = ""
+  kinit_cmd_master = ""
+  master_security_config = ""
+
+#log4j.properties
+# HBase log4j settings
+hbase_log_maxfilesize = default('configurations/hbase-log4j/hbase_log_maxfilesize',256)
+hbase_log_maxbackupindex = default('configurations/hbase-log4j/hbase_log_maxbackupindex',20)
+hbase_security_log_maxfilesize = default('configurations/hbase-log4j/hbase_security_log_maxfilesize',256)
+hbase_security_log_maxbackupindex = default('configurations/hbase-log4j/hbase_security_log_maxbackupindex',20)
+
+if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
+  log4j_props = config['configurations']['hbase-log4j']['content']
+else:
+  log4j_props = None
+  
+hbase_env_sh_template = config['configurations']['hbase-env']['content']
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
+hbase_staging_dir = "/apps/hbase/staging"
+#for create_hdfs_directory
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)
+
+zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+hbase_zookeeper_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+hbase_zookeeper_property_clientPort = config['configurations']['hbase-site']['hbase.zookeeper.property.clientPort']
+hbase_security_authentication = config['configurations']['hbase-site']['hbase.security.authentication']
+hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+
+# ranger hbase plugin section start
+
+# to get db connector jar
+jdk_location = config['hostLevelParams']['jdk_location']
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env introduced, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+# ranger hbase plugin enabled property
+enable_ranger_hbase = default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled", "No")
+enable_ranger_hbase = True if enable_ranger_hbase.lower() == 'yes' else False
+
+# ranger hbase properties
+if enable_ranger_hbase:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+  if xml_configurations_supported:
+    policymgr_mgr_url = config['configurations']['ranger-hbase-security']['ranger.plugin.hbase.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  # ranger hbase service/repository name
+  repo_name = str(config['clusterName']) + '_hbase'
+  repo_name_value = config['configurations']['ranger-hbase-security']['ranger.plugin.hbase.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  common_name_for_certificate = config['configurations']['ranger-hbase-plugin-properties']['common.name.for.certificate']
+  repo_config_username = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+  ranger_plugin_properties = config['configurations']['ranger-hbase-plugin-properties']
+  policy_user = config['configurations']['ranger-hbase-plugin-properties']['policy_user']
+  repo_config_password = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_hbase:
+    external_admin_username = default('/configurations/ranger-hbase-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-hbase-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-hbase-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-hbase-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  downloaded_custom_connector = None
+  previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
+
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
+
+    downloaded_custom_connector = format("{exec_tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{stack_root}/current/{component_directory}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{stack_root}/current/{component_directory}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    sql_connector_jar = ''
+
+  if security_enabled:
+    master_principal = config['configurations']['hbase-site']['hbase.master.kerberos.principal']
+
+  hbase_ranger_plugin_config = {
+    'username': repo_config_username,
+    'password': repo_config_password,
+    'hadoop.security.authentication': hadoop_security_authentication,
+    'hbase.security.authentication': hbase_security_authentication,
+    'hbase.zookeeper.property.clientPort': hbase_zookeeper_property_clientPort,
+    'hbase.zookeeper.quorum': hbase_zookeeper_quorum,
+    'zookeeper.znode.parent': zookeeper_znode_parent,
+    'commonNameForCertificate': common_name_for_certificate,
+    'hbase.master.kerberos.principal': master_principal if security_enabled else ''
+  }
+
+  hbase_ranger_plugin_repo = {
+    'isActive': 'true',
+    'config': json.dumps(hbase_ranger_plugin_config),
+    'description': 'hbase repo',
+    'name': repo_name,
+    'repositoryType': 'hbase',
+    'assetType': '2'
+  }
+
+  custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
+  if len(custom_ranger_service_config) > 0:
+    hbase_ranger_plugin_config.update(custom_ranger_service_config)
+
+  if stack_supports_ranger_kerberos and security_enabled:
+    hbase_ranger_plugin_config['policy.download.auth.users'] = hbase_user
+    hbase_ranger_plugin_config['tag.download.auth.users'] = hbase_user
+    hbase_ranger_plugin_config['policy.grantrevoke.auth.users'] = hbase_user
+
+  if stack_supports_ranger_kerberos:
+    hbase_ranger_plugin_config['ambari.service.check.user'] = policy_user
+
+    hbase_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': hbase_ranger_plugin_config,
+      'description': 'hbase repo',
+      'name': repo_name,
+      'type': 'hbase'
+    }
+
+  ranger_hbase_principal = None
+  ranger_hbase_keytab = None
+  if stack_supports_ranger_kerberos and security_enabled and 'hbase-master' in component_directory.lower():
+    ranger_hbase_principal = master_jaas_princ
+    ranger_hbase_keytab = master_keytab_path
+  elif stack_supports_ranger_kerberos and security_enabled and 'hbase-regionserver' in component_directory.lower():
+    ranger_hbase_principal = regionserver_jaas_princ
+    ranger_hbase_keytab = regionserver_keytab_path
+
+  xa_audit_db_is_enabled = False
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.db']
+
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
+  ssl_keystore_password = config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
+    xa_audit_db_is_enabled = False
+
+# need this to capture cluster name from where ranger hbase plugin is enabled
+cluster_name = config['clusterName']
+
+# ranger hbase plugin section end
+
+create_hbase_home_directory = check_stack_feature(StackFeature.HBASE_HOME_DIRECTORY, stack_version_formatted)
+hbase_home_directory = format("/user/{hbase_user}")
+
+atlas_hosts = default('/clusterHostInfo/atlas_server_hosts', [])
+has_atlas = len(atlas_hosts) > 0
+
+metadata_user = default('/configurations/atlas-env/metadata_user', None)
+atlas_graph_storage_hostname = default('/configurations/application-properties/atlas.graph.storage.hostname', None)
+atlas_graph_storage_hbase_table = default('/configurations/application-properties/atlas.graph.storage.hbase.table', None)
+atlas_audit_hbase_tablename = default('/configurations/application-properties/atlas.audit.hbase.tablename', None)
+
+if has_atlas:
+  zk_hosts_matches = string_set_intersection(atlas_graph_storage_hostname, hbase_zookeeper_quorum)
+  atlas_with_managed_hbase = len(zk_hosts_matches) > 0
+else:
+  atlas_with_managed_hbase = False

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..ddc9e93
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_windows.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import status_params
+from resource_management.libraries.script.script import Script
+
+# server configurations
+config = Script.get_config()
+hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
+hbase_bin_dir = os.path.join(os.environ["HBASE_HOME"],'bin')
+hbase_executable = os.path.join(hbase_bin_dir,"hbase.cmd")
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+hbase_user = hadoop_user
+
+#decomm params
+region_drainer = os.path.join(hbase_bin_dir,"draining_servers.rb")
+region_mover = os.path.join(hbase_bin_dir,"region_mover.rb")
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = config['commandParams']['mark_draining_only']
+
+service_map = {
+  'master' : status_params.hbase_master_win_service_name,
+  'regionserver' : status_params.hbase_regionserver_win_service_name
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_queryserver.py
new file mode 100644
index 0000000..77820cc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_queryserver.py
@@ -0,0 +1,92 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.script import Script
+from phoenix_service import phoenix_service
+from hbase import hbase
+
+# Note: Phoenix Query Server is only applicable to stack version supporting Phoenix.
+class PhoenixQueryServer(Script):
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+
+  def get_component_name(self):
+    return "phoenix-server"
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='queryserver')
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    phoenix_service('start')
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    phoenix_service('stop')
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.stack_version_formatted and check_stack_feature(StackFeature.PHOENIX, params.stack_version_formatted):     
+      # phoenix uses hbase configs
+      conf_select.select(params.stack_name, "hbase", params.version)
+      stack_select.select("phoenix-server", params.version)
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    phoenix_service('status')
+
+
+  def security_status(self, env):
+    self.put_structured_out({"securityState": "UNSECURED"})
+    
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.hbase_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.phoenix_pid_file]
+
+if __name__ == "__main__":
+  PhoenixQueryServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_service.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_service.py
new file mode 100644
index 0000000..42d9cd1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_service.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import errno
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.libraries.functions import check_process_status, format
+
+# Note: Phoenix Query Server is only applicable to phoenix version stacks and above.
+def phoenix_service(action = 'start'): # 'start', 'stop', 'status'
+    # Note: params should already be imported before calling phoenix_service()
+    import status_params
+    pid_file = status_params.phoenix_pid_file
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+
+    if action == "status":
+      check_process_status(pid_file)
+    else:
+      env = {'JAVA_HOME': format("{java64_home}"), 'HBASE_CONF_DIR': format("{hbase_conf_dir}")}
+      daemon_cmd = format("{phx_daemon_script} {action}")
+      if action == 'start':
+        Execute(daemon_cmd,
+                user=format("{hbase_user}"),
+                environment=env)
+  
+      elif action == 'stop':
+        Execute(daemon_cmd,
+                user=format("{hbase_user}"),
+                environment=env
+        )
+        try:
+          File(pid_file, action = "delete")
+        except OSError as exc:
+          # OSError: [Errno 2] No such file or directory
+          if exc.errno == errno.ENOENT:
+            Logger.info("Did not remove '{0}' as it did not exist".format(pid_file))
+          else:
+            raise

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..5184ea7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/service_check.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import StaticFile
+from resource_management.core.source import Template
+import functions
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HbaseServiceCheck(Script):
+  pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseServiceCheckWindows(HbaseServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
+    service = "HBASE"
+    Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hbase_user, logoutput=True)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseServiceCheckDefault(HbaseServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    output_file = "/apps/hbase/data/ambarismoketest"
+    smokeuser_kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal} &&") if params.security_enabled else ""
+    hbase_servicecheck_file = format("{exec_tmp_dir}/hbase-smoke.sh")
+    hbase_servicecheck_cleanup_file = format("{exec_tmp_dir}/hbase-smoke-cleanup.sh")
+
+    File( format("{exec_tmp_dir}/hbaseSmokeVerify.sh"),
+      content = StaticFile("hbaseSmokeVerify.sh"),
+      mode = 0755
+    )
+
+    File(hbase_servicecheck_cleanup_file,
+      content = StaticFile("hbase-smoke-cleanup.sh"),
+      mode = 0755
+    )
+  
+    File( hbase_servicecheck_file,
+      mode = 0755,
+      content = Template('hbase-smoke.sh.j2')
+    )
+    
+    if params.security_enabled:    
+      hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh")
+      grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell {hbase_grant_premissions_file}")
+  
+      File( hbase_grant_premissions_file,
+        owner   = params.hbase_user,
+        group   = params.user_group,
+        mode    = 0644,
+        content = Template('hbase_grant_permissions.j2')
+      )
+      
+      Execute( grantprivelegecmd,
+        user = params.hbase_user,
+        logoutput = True
+      )
+
+    servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} {hbase_cmd}")
+    cleanupCmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_cleanup_file}")
+    Execute(format("{servicecheckcmd} && {smokeverifycmd} && {cleanupCmd}"),
+      tries     = 6,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()
+  


[43/50] [abbrv] ambari git commit: AMBARI-20816 Create Ranger Hive Service during Hive Metastore restart (mugdha)

Posted by ao...@apache.org.
AMBARI-20816 Create Ranger Hive Service during Hive Metastore restart (mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/61e363bc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/61e363bc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/61e363bc

Branch: refs/heads/branch-3.0-perf
Commit: 61e363bc691f6ffe8ce469abf4a06ae20af1b98a
Parents: 41006eb
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Fri Apr 21 18:04:05 2017 +0530
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../package/scripts/hive_metastore.py           |  3 +
 .../0.12.0.2.0/package/scripts/params_linux.py  |  7 +++
 .../package/scripts/setup_ranger_hive.py        | 58 ++++++++++++++++++++
 .../2.1.0.3.0/package/scripts/hive_metastore.py |  3 +
 .../2.1.0.3.0/package/scripts/params_linux.py   |  7 +++
 .../package/scripts/setup_ranger_hive.py        | 58 ++++++++++++++++++++
 6 files changed, 136 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/61e363bc/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
index 2dc6906..99eb8b5 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
@@ -35,6 +35,7 @@ from resource_management.libraries.functions.security_commons import get_params_
 from resource_management.libraries.functions.security_commons import validate_security_config_properties
 from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
 from resource_management.core.resources.system import File
+from setup_ranger_hive import setup_ranger_hive_metastore_service
 
 from hive import create_metastore_schema, hive, jdbc_connector
 from hive_service import hive_service
@@ -61,6 +62,8 @@ class HiveMetastore(Script):
 
     hive_service('metastore', action='start', upgrade_type=upgrade_type)
 
+    # below function call is used for cluster depolyed in cloud env to create ranger hive service in ranger admin.
+    setup_ranger_hive_metastore_service()
 
   def stop(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/61e363bc/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 84bac38..7615ee7 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -824,3 +824,10 @@ if enable_ranger_hive:
     xa_audit_db_is_enabled = False
 
 # ranger hive plugin section end
+
+# below property is used for cluster deployed in cloud env to create ranger hive service in ranger admin
+# need to add it as custom property
+ranger_hive_metastore_lookup = default('/configurations/ranger-hive-plugin-properties/ranger.service.config.param.enable.hive.metastore.lookup', False)
+
+if security_enabled:
+  hive_metastore_principal_with_host = hive_metastore_principal.replace('_HOST', hostname.lower())
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/61e363bc/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
index 80bd7c8..379b4ac 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
@@ -18,6 +18,9 @@ limitations under the License.
 
 """
 from resource_management.core.logger import Logger
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
+from resource_management.libraries.functions.setup_ranger_plugin_xml import generate_ranger_service_config
 
 def setup_ranger_hive(upgrade_type = None):
   import params
@@ -96,3 +99,58 @@ def setup_ranger_hive(upgrade_type = None):
                         stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger Hive plugin is not enabled')
+
+def setup_ranger_hive_metastore_service():
+  """
+  Creates ranger hive service in ranger admin installed in same cluster for cluster depolyed in cloud env.
+  """
+  import params
+
+  if params.has_ranger_admin and params.ranger_hive_metastore_lookup:
+
+    repo_name = str(params.config['clusterName']) + '_hive'
+    repo_name_value = params.config['configurations']['ranger-hive-security']['ranger.plugin.hive.service.name']
+    if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+      repo_name = repo_name_value
+
+    hive_ranger_plugin_config = {
+      'username': params.config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
+      'password': params.config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'],
+      'jdbc.driverClassName': params.config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName'],
+      'jdbc.url': 'none',
+      'commonNameForCertificate': params.config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate'],
+      'ambari.service.check.user': params.config['configurations']['ranger-hive-plugin-properties']['policy_user']
+    }
+
+    if params.security_enabled:
+      hive_ranger_plugin_config['policy.download.auth.users'] = params.hive_user
+      hive_ranger_plugin_config['tag.download.auth.users'] = params.hive_user
+      hive_ranger_plugin_config['policy.grantrevoke.auth.users'] = params.hive_user
+
+    custom_ranger_service_config = generate_ranger_service_config(params.config['configurations']['ranger-hive-plugin-properties'])
+    if len(custom_ranger_service_config) > 0:
+      hive_ranger_plugin_config.update(custom_ranger_service_config)
+
+    hive_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': hive_ranger_plugin_config,
+      'description': 'Hive service',
+      'name': repo_name,
+      'type': 'hive'
+    }
+
+    ranger_admin_obj = RangeradminV2(url = params.config['configurations']['ranger-hive-security']['ranger.plugin.hive.policy.rest.url'], skip_if_rangeradmin_down = not params.retryAble)
+    ranger_admin_obj.create_ranger_repository(
+      component = 'hive',
+      repo_name = repo_name,
+      repo_properties = hive_ranger_plugin_repo,
+      ambari_ranger_admin = params.config['configurations']['ranger-env']['ranger_admin_username'],
+      ambari_ranger_password = params.config['configurations']['ranger-env']['ranger_admin_password'],
+      admin_uname = params.config['configurations']['ranger-env']['admin_username'],
+      admin_password = params.config['configurations']['ranger-env']['admin_password'],
+      policy_user = params.config['configurations']['ranger-hive-plugin-properties']['policy_user'],
+      is_security_enabled = params.security_enabled,
+      is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+      component_user = params.hive_user,
+      component_user_principal = params.hive_metastore_principal_with_host if params.security_enabled else None,
+      component_user_keytab = params.hive_metastore_keytab_path if params.security_enabled else None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/61e363bc/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
index 2dc6906..99eb8b5 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
@@ -35,6 +35,7 @@ from resource_management.libraries.functions.security_commons import get_params_
 from resource_management.libraries.functions.security_commons import validate_security_config_properties
 from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
 from resource_management.core.resources.system import File
+from setup_ranger_hive import setup_ranger_hive_metastore_service
 
 from hive import create_metastore_schema, hive, jdbc_connector
 from hive_service import hive_service
@@ -61,6 +62,8 @@ class HiveMetastore(Script):
 
     hive_service('metastore', action='start', upgrade_type=upgrade_type)
 
+    # below function call is used for cluster depolyed in cloud env to create ranger hive service in ranger admin.
+    setup_ranger_hive_metastore_service()
 
   def stop(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/61e363bc/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
index 84bac38..7615ee7 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
@@ -824,3 +824,10 @@ if enable_ranger_hive:
     xa_audit_db_is_enabled = False
 
 # ranger hive plugin section end
+
+# below property is used for cluster deployed in cloud env to create ranger hive service in ranger admin
+# need to add it as custom property
+ranger_hive_metastore_lookup = default('/configurations/ranger-hive-plugin-properties/ranger.service.config.param.enable.hive.metastore.lookup', False)
+
+if security_enabled:
+  hive_metastore_principal_with_host = hive_metastore_principal.replace('_HOST', hostname.lower())
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/61e363bc/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/setup_ranger_hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/setup_ranger_hive.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/setup_ranger_hive.py
index 80bd7c8..379b4ac 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/setup_ranger_hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/setup_ranger_hive.py
@@ -18,6 +18,9 @@ limitations under the License.
 
 """
 from resource_management.core.logger import Logger
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
+from resource_management.libraries.functions.setup_ranger_plugin_xml import generate_ranger_service_config
 
 def setup_ranger_hive(upgrade_type = None):
   import params
@@ -96,3 +99,58 @@ def setup_ranger_hive(upgrade_type = None):
                         stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger Hive plugin is not enabled')
+
+def setup_ranger_hive_metastore_service():
+  """
+  Creates ranger hive service in ranger admin installed in same cluster for cluster depolyed in cloud env.
+  """
+  import params
+
+  if params.has_ranger_admin and params.ranger_hive_metastore_lookup:
+
+    repo_name = str(params.config['clusterName']) + '_hive'
+    repo_name_value = params.config['configurations']['ranger-hive-security']['ranger.plugin.hive.service.name']
+    if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+      repo_name = repo_name_value
+
+    hive_ranger_plugin_config = {
+      'username': params.config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
+      'password': params.config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'],
+      'jdbc.driverClassName': params.config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName'],
+      'jdbc.url': 'none',
+      'commonNameForCertificate': params.config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate'],
+      'ambari.service.check.user': params.config['configurations']['ranger-hive-plugin-properties']['policy_user']
+    }
+
+    if params.security_enabled:
+      hive_ranger_plugin_config['policy.download.auth.users'] = params.hive_user
+      hive_ranger_plugin_config['tag.download.auth.users'] = params.hive_user
+      hive_ranger_plugin_config['policy.grantrevoke.auth.users'] = params.hive_user
+
+    custom_ranger_service_config = generate_ranger_service_config(params.config['configurations']['ranger-hive-plugin-properties'])
+    if len(custom_ranger_service_config) > 0:
+      hive_ranger_plugin_config.update(custom_ranger_service_config)
+
+    hive_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': hive_ranger_plugin_config,
+      'description': 'Hive service',
+      'name': repo_name,
+      'type': 'hive'
+    }
+
+    ranger_admin_obj = RangeradminV2(url = params.config['configurations']['ranger-hive-security']['ranger.plugin.hive.policy.rest.url'], skip_if_rangeradmin_down = not params.retryAble)
+    ranger_admin_obj.create_ranger_repository(
+      component = 'hive',
+      repo_name = repo_name,
+      repo_properties = hive_ranger_plugin_repo,
+      ambari_ranger_admin = params.config['configurations']['ranger-env']['ranger_admin_username'],
+      ambari_ranger_password = params.config['configurations']['ranger-env']['ranger_admin_password'],
+      admin_uname = params.config['configurations']['ranger-env']['admin_username'],
+      admin_password = params.config['configurations']['ranger-env']['admin_password'],
+      policy_user = params.config['configurations']['ranger-hive-plugin-properties']['policy_user'],
+      is_security_enabled = params.security_enabled,
+      is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+      component_user = params.hive_user,
+      component_user_principal = params.hive_metastore_principal_with_host if params.security_enabled else None,
+      component_user_keytab = params.hive_metastore_keytab_path if params.security_enabled else None)


[38/50] [abbrv] ambari git commit: AMBARI-20733. /var/log/krb5kdc.log is growing rapidly on the KDC server (echekanskiy)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
index 602dad7..a42ca79 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
@@ -216,63 +216,6 @@ class NameNodeDefault(NameNode):
             try_sleep=10
     )
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.namenode.kerberos.internal.spnego.principal',
-                         'dfs.namenode.keytab.file',
-                         'dfs.namenode.kerberos.principal']
-    props_read_check = ['dfs.namenode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'hdfs-site' not in security_params
-               or 'dfs.namenode.keytab.file' not in security_params['hdfs-site']
-               or 'dfs.namenode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.namenode.keytab.file'],
-                                security_params['hdfs-site']['dfs.namenode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def rebalancehdfs(self, env):
     import params
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py
index 7ba1f96..602c179 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py
@@ -77,64 +77,6 @@ class NFSGateway(Script):
 
     check_process_status(status_params.nfsgateway_pid_file)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['nfs.keytab.file',
-                         'nfs.kerberos.principal']
-    props_read_check = ['nfs.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                'nfs.keytab.file' not in security_params['hdfs-site'] or
-                'nfs.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['nfs.keytab.file'],
-                                security_params['hdfs-site'][
-                                  'nfs.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
   def get_log_folder(self):
     import params
     return params.hdfs_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
index 0f1f438..030a470 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
@@ -75,66 +75,6 @@ class SNameNodeDefault(SNameNode):
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-secondarynamenode", params.version)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.secondary.namenode.kerberos.internal.spnego.principal',
-                         'dfs.secondary.namenode.keytab.file',
-                         'dfs.secondary.namenode.kerberos.principal']
-    props_read_check = ['dfs.secondary.namenode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.secondary.namenode.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.secondary.namenode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.secondary.namenode.keytab.file'],
-                                security_params['hdfs-site'][
-                                  'dfs.secondary.namenode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
   def get_log_folder(self):
     import params
     return params.hdfs_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
index 19a78c7..fa948ca 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
@@ -119,49 +119,6 @@ class ZkfcSlaveDefault(ZkfcSlave):
     env.set_params(status_params)
     check_process_status(status_params.zkfc_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'core-site.xml': FILE_TYPE_XML})
-    result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      if not result_issues:  # If all validations passed successfully
-        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
-          try:
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hdfs_user,
-                                  status_params.hdfs_user_keytab,
-                                  status_params.hdfs_user_principal,
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          self.put_structured_out(
-            {"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def disable_security(self, env):
     import params
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
index 8b69e45..2dc6906 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
@@ -113,58 +113,6 @@ class HiveMetastoreDefault(HiveMetastore):
             check_stack_feature(StackFeature.HIVE_METASTORE_UPGRADE_SCHEMA, params.stack_version_formatted_major):
       self.upgrade_schema(env)
 
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hive.server2.authentication": "KERBEROS",
-                           "hive.metastore.sasl.enabled": "true",
-                           "hive.security.authorization.enabled": "true"}
-      props_empty_check = ["hive.metastore.kerberos.keytab.file",
-                           "hive.metastore.kerberos.principal"]
-
-      props_read_check = ["hive.metastore.kerberos.keytab.file"]
-      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                            props_read_check)
-
-      hive_expectations ={}
-      hive_expectations.update(hive_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
-                                                   {'hive-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hive_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'hive-site' not in security_params \
-            or 'hive.metastore.kerberos.keytab.file' not in security_params['hive-site'] \
-            or 'hive.metastore.kerberos.principal' not in security_params['hive-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.metastore.kerberos.keytab.file'],
-                                security_params['hive-site']['hive.metastore.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-
   def upgrade_schema(self, env):
     """
     Executes the schema upgrade binary.  This is its own function because it could

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
index f6251e7..7c3a805 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
@@ -135,67 +135,6 @@ class HiveServerDefault(HiveServer):
       if resource_created:
         params.HdfsResource(None, action="execute")
 
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hive.server2.authentication": "KERBEROS",
-                           "hive.metastore.sasl.enabled": "true",
-                           "hive.security.authorization.enabled": "true"}
-      props_empty_check = ["hive.server2.authentication.kerberos.keytab",
-                           "hive.server2.authentication.kerberos.principal",
-                           "hive.server2.authentication.spnego.principal",
-                           "hive.server2.authentication.spnego.keytab"]
-
-      props_read_check = ["hive.server2.authentication.kerberos.keytab",
-                          "hive.server2.authentication.spnego.keytab"]
-      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                            props_read_check)
-
-      hive_expectations ={}
-      hive_expectations.update(hive_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
-                                                   {'hive-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hive_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'hive-site' not in security_params \
-            or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
-            or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site']  \
-            or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
-            or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
-                                security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
-                                security_params['hive-site']['hive.server2.authentication.spnego.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def _base_node(self, path):
     if not path.startswith('/'):
       path = '/' + path

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
index 46864c0..8f57f1e 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
@@ -152,67 +152,6 @@ class HiveServerInteractiveDefault(HiveServerInteractive):
       # Recursively check all existing gmetad pid files
       check_process_status(status_params.hive_interactive_pid)
 
-    def security_status(self, env):
-      import status_params
-      env.set_params(status_params)
-
-      if status_params.security_enabled:
-        props_value_check = {"hive.server2.authentication": "KERBEROS",
-                             "hive.metastore.sasl.enabled": "true",
-                             "hive.security.authorization.enabled": "true"}
-        props_empty_check = ["hive.server2.authentication.kerberos.keytab",
-                             "hive.server2.authentication.kerberos.principal",
-                             "hive.server2.authentication.spnego.principal",
-                             "hive.server2.authentication.spnego.keytab"]
-
-        props_read_check = ["hive.server2.authentication.kerberos.keytab",
-                            "hive.server2.authentication.spnego.keytab"]
-        hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                             props_read_check)
-
-        hive_expectations ={}
-        hive_expectations.update(hive_site_props)
-
-        security_params = get_params_from_filesystem(status_params.hive_server_interactive_conf_dir,
-                                                     {'hive-site.xml': FILE_TYPE_XML})
-        result_issues = validate_security_config_properties(security_params, hive_expectations)
-        if not result_issues: # If all validations passed successfully
-          try:
-            # Double check the dict before calling execute
-            if 'hive-site' not in security_params \
-              or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
-              self.put_structured_out({"securityState": "UNSECURED"})
-              self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-              return
-
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hive_user,
-                                  security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
-                                  security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hive_user,
-                                  security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
-                                  security_params['hive-site']['hive.server2.authentication.spnego.principal'],
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        self.put_structured_out({"securityState": "UNSECURED"})
-
     def restart_llap(self, env):
       """
       Custom command to Restart LLAP

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
index 93fa411..18e11ab 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
@@ -84,73 +84,6 @@ class WebHCatServerDefault(WebHCatServer):
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hive-webhcat", params.version)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      expectations ={}
-      expectations.update(
-        build_expectations(
-          'webhcat-site',
-          {
-            "templeton.kerberos.secret": "secret"
-          },
-          [
-            "templeton.kerberos.keytab",
-            "templeton.kerberos.principal"
-          ],
-          [
-            "templeton.kerberos.keytab"
-          ]
-        )
-      )
-      expectations.update(
-        build_expectations(
-          'hive-site',
-          {
-            "hive.server2.authentication": "KERBEROS",
-            "hive.metastore.sasl.enabled": "true",
-            "hive.security.authorization.enabled": "true"
-          },
-          None,
-          None
-        )
-      )
-
-      security_params = {}
-      security_params.update(get_params_from_filesystem(status_params.webhcat_conf_dir,
-                                                        {'webhcat-site.xml': FILE_TYPE_XML}))
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'webhcat-site' not in security_params \
-            or 'templeton.kerberos.keytab' not in security_params['webhcat-site'] \
-            or 'templeton.kerberos.principal' not in security_params['webhcat-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.webhcat_user,
-                                security_params['webhcat-site']['templeton.kerberos.keytab'],
-                                security_params['webhcat-site']['templeton.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.hcat_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
index 8b69e45..2dc6906 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
@@ -113,58 +113,6 @@ class HiveMetastoreDefault(HiveMetastore):
             check_stack_feature(StackFeature.HIVE_METASTORE_UPGRADE_SCHEMA, params.stack_version_formatted_major):
       self.upgrade_schema(env)
 
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hive.server2.authentication": "KERBEROS",
-                           "hive.metastore.sasl.enabled": "true",
-                           "hive.security.authorization.enabled": "true"}
-      props_empty_check = ["hive.metastore.kerberos.keytab.file",
-                           "hive.metastore.kerberos.principal"]
-
-      props_read_check = ["hive.metastore.kerberos.keytab.file"]
-      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                            props_read_check)
-
-      hive_expectations ={}
-      hive_expectations.update(hive_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
-                                                   {'hive-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hive_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'hive-site' not in security_params \
-            or 'hive.metastore.kerberos.keytab.file' not in security_params['hive-site'] \
-            or 'hive.metastore.kerberos.principal' not in security_params['hive-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.metastore.kerberos.keytab.file'],
-                                security_params['hive-site']['hive.metastore.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-
   def upgrade_schema(self, env):
     """
     Executes the schema upgrade binary.  This is its own function because it could

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server.py
index f6251e7..7c3a805 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server.py
@@ -135,67 +135,6 @@ class HiveServerDefault(HiveServer):
       if resource_created:
         params.HdfsResource(None, action="execute")
 
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hive.server2.authentication": "KERBEROS",
-                           "hive.metastore.sasl.enabled": "true",
-                           "hive.security.authorization.enabled": "true"}
-      props_empty_check = ["hive.server2.authentication.kerberos.keytab",
-                           "hive.server2.authentication.kerberos.principal",
-                           "hive.server2.authentication.spnego.principal",
-                           "hive.server2.authentication.spnego.keytab"]
-
-      props_read_check = ["hive.server2.authentication.kerberos.keytab",
-                          "hive.server2.authentication.spnego.keytab"]
-      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                            props_read_check)
-
-      hive_expectations ={}
-      hive_expectations.update(hive_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
-                                                   {'hive-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hive_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'hive-site' not in security_params \
-            or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
-            or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site']  \
-            or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
-            or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
-                                security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
-                                security_params['hive-site']['hive.server2.authentication.spnego.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def _base_node(self, path):
     if not path.startswith('/'):
       path = '/' + path

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server_interactive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server_interactive.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server_interactive.py
index 46864c0..8f57f1e 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server_interactive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server_interactive.py
@@ -152,67 +152,6 @@ class HiveServerInteractiveDefault(HiveServerInteractive):
       # Recursively check all existing gmetad pid files
       check_process_status(status_params.hive_interactive_pid)
 
-    def security_status(self, env):
-      import status_params
-      env.set_params(status_params)
-
-      if status_params.security_enabled:
-        props_value_check = {"hive.server2.authentication": "KERBEROS",
-                             "hive.metastore.sasl.enabled": "true",
-                             "hive.security.authorization.enabled": "true"}
-        props_empty_check = ["hive.server2.authentication.kerberos.keytab",
-                             "hive.server2.authentication.kerberos.principal",
-                             "hive.server2.authentication.spnego.principal",
-                             "hive.server2.authentication.spnego.keytab"]
-
-        props_read_check = ["hive.server2.authentication.kerberos.keytab",
-                            "hive.server2.authentication.spnego.keytab"]
-        hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                             props_read_check)
-
-        hive_expectations ={}
-        hive_expectations.update(hive_site_props)
-
-        security_params = get_params_from_filesystem(status_params.hive_server_interactive_conf_dir,
-                                                     {'hive-site.xml': FILE_TYPE_XML})
-        result_issues = validate_security_config_properties(security_params, hive_expectations)
-        if not result_issues: # If all validations passed successfully
-          try:
-            # Double check the dict before calling execute
-            if 'hive-site' not in security_params \
-              or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
-              self.put_structured_out({"securityState": "UNSECURED"})
-              self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-              return
-
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hive_user,
-                                  security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
-                                  security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hive_user,
-                                  security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
-                                  security_params['hive-site']['hive.server2.authentication.spnego.principal'],
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        self.put_structured_out({"securityState": "UNSECURED"})
-
     def restart_llap(self, env):
       """
       Custom command to Restart LLAP

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/webhcat_server.py
index 93fa411..18e11ab 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/webhcat_server.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/webhcat_server.py
@@ -84,73 +84,6 @@ class WebHCatServerDefault(WebHCatServer):
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hive-webhcat", params.version)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      expectations ={}
-      expectations.update(
-        build_expectations(
-          'webhcat-site',
-          {
-            "templeton.kerberos.secret": "secret"
-          },
-          [
-            "templeton.kerberos.keytab",
-            "templeton.kerberos.principal"
-          ],
-          [
-            "templeton.kerberos.keytab"
-          ]
-        )
-      )
-      expectations.update(
-        build_expectations(
-          'hive-site',
-          {
-            "hive.server2.authentication": "KERBEROS",
-            "hive.metastore.sasl.enabled": "true",
-            "hive.security.authorization.enabled": "true"
-          },
-          None,
-          None
-        )
-      )
-
-      security_params = {}
-      security_params.update(get_params_from_filesystem(status_params.webhcat_conf_dir,
-                                                        {'webhcat-site.xml': FILE_TYPE_XML}))
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'webhcat-site' not in security_params \
-            or 'templeton.kerberos.keytab' not in security_params['webhcat-site'] \
-            or 'templeton.kerberos.principal' not in security_params['webhcat-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.webhcat_user,
-                                security_params['webhcat-site']['templeton.kerberos.keytab'],
-                                security_params['webhcat-site']['templeton.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.hcat_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/kerberos_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/kerberos_client.py b/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/kerberos_client.py
index c50c67b..39fdcf5 100644
--- a/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/kerberos_client.py
+++ b/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/kerberos_client.py
@@ -43,27 +43,6 @@ class KerberosClient(KerberosScript):
   def status(self, env):
     raise ClientComponentHasNoStatus()
 
-  def security_status(self, env):
-    import status_params
-    if status_params.security_enabled:
-      if status_params.smoke_user and status_params.smoke_user_keytab:
-        try:
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.smoke_user,
-                                status_params.smoke_user_keytab,
-                                status_params.smoke_user_principal,
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        self.put_structured_out({"securityState": "UNKNOWN"})
-        self.put_structured_out({"securityStateErrorInfo": "Missing smoke user credentials"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def set_keytab(self, env):
     self.write_keytab_file()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
index 31e54e5..8996d23 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
@@ -202,67 +202,6 @@ class KnoxGatewayDefault(KnoxGateway):
     File(params.ldap_pid_file,
       action = "delete"
     )
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      expectations = {}
-      expectations.update(build_expectations(
-        'krb5JAASLogin',
-        None,
-        ['keytab', 'principal'],
-        None
-      ))
-      expectations.update(build_expectations(
-        'gateway-site',
-        {
-          "gateway.hadoop.kerberos.secured" : "true"
-        },
-        None,
-        None
-      ))
-
-      security_params = {
-        "krb5JAASLogin":
-          {
-            'keytab': status_params.knox_keytab_path,
-            'principal': status_params.knox_principal_name
-          }
-      }
-      security_params.update(get_params_from_filesystem(status_params.knox_conf_dir,
-        {"gateway-site.xml" : FILE_TYPE_XML}))
-
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'krb5JAASLogin' not in security_params
-               or 'keytab' not in security_params['krb5JAASLogin']
-               or 'principal' not in security_params['krb5JAASLogin']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file and principal are not set."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.knox_user,
-                                security_params['krb5JAASLogin']['keytab'],
-                                security_params['krb5JAASLogin']['principal'],
-                                status_params.hostname,
-                                status_params.temp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
index a8b2cf4..9320bc3 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
@@ -105,69 +105,6 @@ class OozieServer(Script):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class OozieServerDefault(OozieServer):
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      expectations = {
-        "oozie-site":
-          build_expectations('oozie-site',
-                             {
-                               "oozie.authentication.type": "kerberos",
-                               "oozie.service.AuthorizationService.security.enabled": "true",
-                               "oozie.service.HadoopAccessorService.kerberos.enabled": "true"
-                             },
-                             [
-                               "local.realm",
-                               "oozie.authentication.kerberos.principal",
-                               "oozie.authentication.kerberos.keytab",
-                               "oozie.service.HadoopAccessorService.kerberos.principal",
-                               "oozie.service.HadoopAccessorService.keytab.file"
-                             ],
-                             None)
-      }
-
-      security_params = get_params_from_filesystem(status_params.conf_dir,
-                                                   {'oozie-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('oozie-site' not in security_params
-              or 'oozie.authentication.kerberos.principal' not in security_params['oozie-site']
-              or 'oozie.authentication.kerberos.keytab' not in security_params['oozie-site']
-              or 'oozie.service.HadoopAccessorService.kerberos.principal' not in security_params['oozie-site']
-              or 'oozie.service.HadoopAccessorService.keytab.file' not in security_params['oozie-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.oozie_user,
-                                security_params['oozie-site']['oozie.authentication.kerberos.keytab'],
-                                security_params['oozie-site']['oozie.authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.oozie_user,
-                                security_params['oozie-site']['oozie.service.HadoopAccessorService.keytab.file'],
-                                security_params['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     """
     Performs the tasks that should be done before an upgrade of oozie. This includes:

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/drpc_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/drpc_server.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/drpc_server.py
index 178c043..f991e71 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/drpc_server.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/drpc_server.py
@@ -74,58 +74,6 @@ class DrpcServer(Script):
     import status_params
     env.set_params(status_params)
     check_process_status(status_params.pid_drpc)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      # Expect the following files to be available in status_params.config_dir:
-      #   storm_jaas.conf
-
-      try:
-        props_value_check = None
-        props_empty_check = ['StormServer/keyTab', 'StormServer/principal']
-        props_read_check = ['StormServer/keyTab']
-        storm_env_expectations = build_expectations('storm_jaas', props_value_check, props_empty_check,
-                                                 props_read_check)
-
-        storm_expectations = {}
-        storm_expectations.update(storm_env_expectations)
-
-        security_params = get_params_from_filesystem(status_params.conf_dir,
-                                                     {'storm_jaas.conf': FILE_TYPE_JAAS_CONF})
-
-        result_issues = validate_security_config_properties(security_params, storm_expectations)
-        if not result_issues:  # If all validations passed successfully
-          # Double check the dict before calling execute
-          if ( 'storm_jaas' not in security_params
-               or 'StormServer' not in security_params['storm_jaas']
-               or 'keyTab' not in security_params['storm_jaas']['StormServer']
-               or 'principal' not in security_params['storm_jaas']['StormServer']):
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.storm_user,
-                                security_params['storm_jaas']['StormServer']['keyTab'],
-                                security_params['storm_jaas']['StormServer']['principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
index a974103..360af5d 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
@@ -82,51 +82,6 @@ class NimbusDefault(Nimbus):
     env.set_params(status_params)
     check_process_status(status_params.pid_nimbus)
 
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      # Expect the following files to be available in status_params.config_dir:
-      #   storm_jaas.conf
-      try:
-        props_value_check = None
-        props_empty_check = ['StormServer/keyTab', 'StormServer/principal']
-        props_read_check = ['StormServer/keyTab']
-        storm_env_expectations = build_expectations('storm_jaas', props_value_check, props_empty_check,  props_read_check)
-        storm_expectations = {}
-        storm_expectations.update(storm_env_expectations)
-        security_params = get_params_from_filesystem(status_params.conf_dir, {'storm_jaas.conf': FILE_TYPE_JAAS_CONF})
-        result_issues = validate_security_config_properties(security_params, storm_expectations)
-        if not result_issues:  # If all validations passed successfully
-          # Double check the dict before calling execute
-          if ( 'storm_jaas' not in security_params
-               or 'StormServer' not in security_params['storm_jaas']
-               or 'keyTab' not in security_params['storm_jaas']['StormServer']
-               or 'principal' not in security_params['storm_jaas']['StormServer']):
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.storm_user,
-                                security_params['storm_jaas']['StormServer']['keyTab'],
-                                security_params['storm_jaas']['StormServer']['principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/pacemaker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/pacemaker.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/pacemaker.py
index a56c0cd..fa3112d 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/pacemaker.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/pacemaker.py
@@ -74,58 +74,6 @@ class PaceMaker(Script):
       env.set_params(status_params)
       check_process_status(status_params.pid_pacemaker)
 
-  def security_status(self, env):
-      import status_params
-
-      env.set_params(status_params)
-
-      if status_params.security_enabled:
-          # Expect the following files to be available in status_params.config_dir:
-          #   storm_jaas.conf
-
-          try:
-              props_value_check = None
-              props_empty_check = ['StormServer/keyTab', 'StormServer/principal']
-              props_read_check = ['StormServer/keyTab']
-              storm_env_expectations = build_expectations('storm_jaas', props_value_check, props_empty_check,
-                                                          props_read_check)
-
-              storm_expectations = {}
-              storm_expectations.update(storm_env_expectations)
-
-              security_params = get_params_from_filesystem(status_params.conf_dir,
-                                                     {'storm_jaas.conf': FILE_TYPE_JAAS_CONF})
-
-              result_issues = validate_security_config_properties(security_params, storm_expectations)
-              if not result_issues:  # If all validations passed successfully
-                  # Double check the dict before calling execute
-                  if ( 'storm_jaas' not in security_params
-                       or 'StormServer' not in security_params['storm_jaas']
-                       or 'keyTab' not in security_params['storm_jaas']['StormServer']
-                       or 'principal' not in security_params['storm_jaas']['StormServer']):
-                      self.put_structured_out({"securityState": "ERROR"})
-                      self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-                      return
-
-                  cached_kinit_executor(status_params.kinit_path_local,
-                                        status_params.storm_user,
-                                        security_params['storm_jaas']['StormServer']['keyTab'],
-                                        security_params['storm_jaas']['StormServer']['principal'],
-                                        status_params.hostname,
-                                        status_params.tmp_dir)
-                  self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-              else:
-                  issues = []
-                  for cf in result_issues:
-                      issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-                      self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-                      self.put_structured_out({"securityState": "UNSECURED"})
-          except Exception as e:
-              self.put_structured_out({"securityState": "ERROR"})
-              self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
       import params
       return params.log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/ui_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/ui_server.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/ui_server.py
index 63acecf..e257ef9 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/ui_server.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/ui_server.py
@@ -120,59 +120,6 @@ class UiServerDefault(UiServer):
     import status_params
     env.set_params(status_params)
     check_process_status(status_params.pid_ui)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      # Expect the following files to be available in status_params.config_dir:
-      #   storm_jaas.conf
-
-      try:
-        props_value_check = None
-        props_empty_check = ['storm_ui_principal_name', 'storm_ui_keytab']
-        props_read_check = ['storm_ui_keytab']
-        storm_env_expectations = build_expectations('storm_ui', props_value_check, props_empty_check,
-                                                 props_read_check)
-
-        storm_expectations = {}
-        storm_expectations.update(storm_env_expectations)
-
-        security_params = {}
-        security_params['storm_ui'] = {}
-        security_params['storm_ui']['storm_ui_principal_name'] = status_params.storm_ui_principal
-        security_params['storm_ui']['storm_ui_keytab'] = status_params.storm_ui_keytab
-
-        result_issues = validate_security_config_properties(security_params, storm_expectations)
-        if not result_issues:  # If all validations passed successfully
-          # Double check the dict before calling execute
-          if ( 'storm_ui' not in security_params
-               or 'storm_ui_principal_name' not in security_params['storm_ui']
-               or 'storm_ui_keytab' not in security_params['storm_ui']):
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.storm_user,
-                                security_params['storm_ui']['storm_ui_keytab'],
-                                security_params['storm_ui']['storm_ui_principal_name'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
index 03fff21..b1e0c16 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
@@ -83,67 +83,6 @@ class ApplicationTimelineServerDefault(ApplicationTimelineServer):
     env.set_params(status_params)
     check_process_status(status_params.yarn_historyserver_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.enabled": "true",
-                           "yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.timeline-service.principal",
-                           "yarn.timeline-service.keytab",
-                           "yarn.timeline-service.http-authentication.kerberos.principal",
-                           "yarn.timeline-service.http-authentication.kerberos.keytab"]
-
-      props_read_check = ["yarn.timeline-service.keytab",
-                          "yarn.timeline-service.http-authentication.kerberos.keytab"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                                  props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.timeline-service.keytab' not in security_params['yarn-site']
-               or 'yarn.timeline-service.principal' not in security_params['yarn-site']) \
-            or 'yarn.timeline-service.http-authentication.kerberos.keytab' not in security_params['yarn-site'] \
-            or 'yarn.timeline-service.http-authentication.kerberos.principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.yarn_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
index 8f5d380..d886244 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
@@ -120,62 +120,6 @@ class HistoryServerDefault(HistoryServer):
     env.set_params(status_params)
     check_process_status(status_params.mapred_historyserver_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      expectations = {}
-      expectations.update(build_expectations('mapred-site',
-                                             None,
-                                             [
-                                               'mapreduce.jobhistory.keytab',
-                                               'mapreduce.jobhistory.principal',
-                                               'mapreduce.jobhistory.webapp.spnego-keytab-file',
-                                               'mapreduce.jobhistory.webapp.spnego-principal'
-                                             ],
-                                             None))
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'mapred-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'mapred-site' not in security_params or
-               'mapreduce.jobhistory.keytab' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.principal' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-keytab-file' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-principal' not in security_params['mapred-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal not set."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.keytab'],
-                                security_params['mapred-site']['mapreduce.jobhistory.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.mapred_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
index 133d2e1..5acb20b 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
@@ -90,66 +90,6 @@ class NodemanagerDefault(Nodemanager):
     env.set_params(status_params)
     check_process_status(status_params.nodemanager_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.nodemanager.principal",
-                           "yarn.nodemanager.keytab",
-                           "yarn.nodemanager.webapp.spnego-principal",
-                           "yarn.nodemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.nodemanager.keytab",
-                          "yarn.nodemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.nodemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.nodemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.nodemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.nodemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.keytab'],
-                                security_params['yarn-site']['yarn.nodemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.yarn_log_dir


[48/50] [abbrv] ambari git commit: AMBARI-20326. HDP 3.0 TP - support for HBase with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metainfo.xml
new file mode 100644
index 0000000..ac57693
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metainfo.xml
@@ -0,0 +1,232 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <displayName>HBase</displayName>
+      <comment>Non-relational distributed database and centralized service for configuration management &amp;
+        synchronization
+      </comment>
+      <!-- For Alpha-1 release, it will be version 1.1.2. -->
+      <version>2.0.0.3.0</version>
+      <components>
+        <component>
+          <name>HBASE_MASTER</name>
+          <displayName>HBase Master</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <timelineAppid>HBASE</timelineAppid>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HBASE/HBASE_MASTER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hbase_master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hbase_master</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HBASE_REGIONSERVER</name>
+          <displayName>RegionServer</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <decommissionAllowed>true</decommissionAllowed>
+          <timelineAppid>HBASE</timelineAppid>
+          <commandScript>
+            <script>scripts/hbase_regionserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <bulkCommands>
+            <displayName>RegionServers</displayName>
+            <!-- Used by decommission and recommission -->
+            <masterComponent>HBASE_MASTER</masterComponent>
+          </bulkCommands>
+          <logs>
+            <log>
+              <logId>hbase_regionserver</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>HBASE_CLIENT</name>
+          <displayName>HBase Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/hbase_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hbase-site.xml</fileName>
+              <dictionaryName>hbase-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hbase-env.sh</fileName>
+              <dictionaryName>hbase-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>hbase-policy.xml</fileName>
+              <dictionaryName>hbase-policy</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hbase-log4j</dictionaryName>
+            </configFile>            
+          </configFiles>
+        </component>
+
+        <component>
+          <name>PHOENIX_QUERY_SERVER</name>
+          <displayName>Phoenix Query Server</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/phoenix_queryserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hbase_phoenix_server</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hbase</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type> <!-- hbase puts core-site in it's folder -->
+        <config-type>hbase-policy</config-type>
+        <config-type>hbase-site</config-type>
+        <config-type>hbase-env</config-type>
+        <config-type>hbase-log4j</config-type>
+        <config-type>ranger-hbase-plugin-properties</config-type>
+        <config-type>ranger-hbase-audit</config-type>
+        <config-type>ranger-hbase-policymgr-ssl</config-type>
+        <config-type>ranger-hbase-security</config-type>
+      </configuration-dependencies>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>hbase_${stack_version}</name>
+            </package>
+            <package>
+              <name>phoenix_${stack_version}</name>
+              <condition>should_install_phoenix</condition>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>hbase-${stack_version}</name>
+            </package>
+            <package>
+              <name>phoenix-${stack_version}</name>
+              <condition>should_install_phoenix</condition>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+    </service>
+  </services>
+</metainfo>


[29/50] [abbrv] ambari git commit: AMBARI-20800.Need to change the arrangement of the action buttons in project manager(Supreeth Sharma via Venkata Sairam)

Posted by ao...@apache.org.
AMBARI-20800.Need to change the arrangement of the action buttons in project manager(Supreeth Sharma via Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2c552240
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2c552240
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2c552240

Branch: refs/heads/branch-3.0-perf
Commit: 2c5522404a08eb9b399b70dad503725290dd78bc
Parents: 22fda0a
Author: Venkata Sairam <ve...@gmail.com>
Authored: Fri Apr 21 16:51:37 2017 +0530
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../src/main/resources/ui/app/templates/components/drafts-wf.hbs   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2c552240/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/drafts-wf.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/drafts-wf.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/drafts-wf.hbs
index ac28de3..05599e5 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/drafts-wf.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/drafts-wf.hbs
@@ -122,10 +122,10 @@
          <div><label>{{deleteMsg}}</label></div>
       </div>
       <div class="modal-footer">
+        <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
         {{#unless deleteMsg}}
           <button type="button" class="btn btn-primary" {{action "deleteWorkflow"}}>Delete</button>
         {{/unless}}
-        <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
       </div>
     </div>
   </div>


[34/50] [abbrv] ambari git commit: AMBARI-20811. Trying to upgrade HDF 2.0.2.0 to HDF 3.0.0.0 fails with NPE (dlysnichenko)

Posted by ao...@apache.org.
AMBARI-20811. Trying to upgrade HDF 2.0.2.0 to HDF 3.0.0.0 fails with NPE (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6b549d36
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6b549d36
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6b549d36

Branch: refs/heads/branch-3.0-perf
Commit: 6b549d36fb196a423cfa2ba83971e9f1fa39b220
Parents: 9f113f9
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Fri Apr 21 16:09:01 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../java/org/apache/ambari/server/stack/StackDirectory.java    | 4 ++++
 .../java/org/apache/ambari/server/stack/StackManagerTest.java  | 6 +++++-
 2 files changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6b549d36/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
index d0950a7..5b44112 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
@@ -20,6 +20,7 @@ package org.apache.ambari.server.stack;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
@@ -437,6 +438,9 @@ public class StackDirectory extends StackDefinitionDirectory {
     if (configUpgradePack != null) {
       this.configUpgradePack = configUpgradePack;
     } else {
+      ConfigUpgradePack emptyConfigUpgradePack = new ConfigUpgradePack();
+      emptyConfigUpgradePack.services = new ArrayList<>();
+      this.configUpgradePack = emptyConfigUpgradePack;
       LOG.info("Stack '{}' doesn't contain config upgrade pack file", getPath());
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6b549d36/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
index 8c7ab9d..d8c02dc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -58,6 +58,7 @@ import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.stack.ConfigUpgradePack;
 import org.apache.ambari.server.state.stack.MetricDefinition;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.ambari.server.state.stack.UpgradePack;
@@ -728,7 +729,7 @@ public class StackManagerTest {
   }
 
   /**
-   * Tests that {@link UpgradePack} instances are correctly initialized
+   * Tests that {@link UpgradePack} and {@link ConfigUpgradePack} instances are correctly initialized
    * post-unmarshalling.
    *
    * @throws Exception
@@ -745,6 +746,9 @@ public class StackManagerTest {
       // reference equality (make sure it's the same list)
       assertTrue(upgradePack.getTasks() == upgradePack.getTasks());
     }
+    ConfigUpgradePack configUpgradePack = stack.getConfigUpgradePack();
+    assertNotNull(configUpgradePack);
+    assertNotNull(configUpgradePack.services);
   }
 
   @Test


[31/50] [abbrv] ambari git commit: AMBARI-20770 Cover assign master controller with unit tests. (atkach)

Posted by ao...@apache.org.
AMBARI-20770 Cover assign master controller with unit tests. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/be2510f7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/be2510f7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/be2510f7

Branch: refs/heads/branch-3.0-perf
Commit: be2510f7d46dc99170514a7e8a04f7dd8c43c05d
Parents: c8de2d0
Author: Andrii Tkach <at...@apache.org>
Authored: Fri Apr 14 16:37:36 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../wizard/step7/assign_master_controller.js    | 275 ++++---
 .../step7/assign_master_controller_test.js      | 780 ++++++++++++++++---
 2 files changed, 840 insertions(+), 215 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/be2510f7/ambari-web/app/controllers/wizard/step7/assign_master_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7/assign_master_controller.js b/ambari-web/app/controllers/wizard/step7/assign_master_controller.js
index 8bdab28..099931a 100644
--- a/ambari-web/app/controllers/wizard/step7/assign_master_controller.js
+++ b/ambari-web/app/controllers/wizard/step7/assign_master_controller.js
@@ -65,7 +65,7 @@ App.AssignMasterOnStep7Controller = Em.Controller.extend(App.BlueprintMixin, App
     
     switch (action) {
       case 'ADD':
-        if (hostComponent.componentName == "HIVE_SERVER_INTERACTIVE") {
+        if (hostComponent.componentName === "HIVE_SERVER_INTERACTIVE") {
           this.getPendingBatchRequests(hostComponent);  
         } else {
           this.showPopup(hostComponent);
@@ -94,9 +94,9 @@ App.AssignMasterOnStep7Controller = Em.Controller.extend(App.BlueprintMixin, App
 
   pendingBatchRequestsAjaxError: function(data) {
     var error = Em.I18n.t('services.service.actions.run.yarnRefreshQueues.error');
-    if(data && data.responseText){
+    if (data && data.responseText) {
       try {
-        var json = $.parseJSON(data.responseText);
+        var json = JSON.parse(data.responseText);
         error += json.message;
       } catch (err) {}
     }
@@ -105,21 +105,11 @@ App.AssignMasterOnStep7Controller = Em.Controller.extend(App.BlueprintMixin, App
 
   pendingBatchRequestsAjaxSuccess : function(data, opt, params) {
     var self = this;
-    var showAlert = false;
-    if (data.hasOwnProperty('items') && data.items.length > 0) {
-      data.items.forEach( function(_item) {
-        _item.RequestSchedule.batch.batch_requests.forEach( function(batchRequest) {
-          // Check if a DELETE request on HIVE_SERVER_INTERACTIVE is in progress
-          if (batchRequest.request_type == "DELETE" && batchRequest.request_uri.indexOf("HIVE_SERVER_INTERACTIVE") > -1) {
-            showAlert = true;
-          }
-        });
-      });
-    }
-    if (showAlert) {
-      App.showAlertPopup(Em.I18n.t('services.service.actions.hsi.alertPopup.header'), Em.I18n.t('services.service.actions.hsi.alertPopup.body'), function() {
+    if (this.shouldShowAlertOnBatchRequest(data)) {
+      App.showAlertPopup(Em.I18n.t('services.service.actions.hsi.alertPopup.header'),
+        Em.I18n.t('services.service.actions.hsi.alertPopup.body'), function() {
         var configWidgetContext = self.get('configWidgetContext');
-        var config = self.get('configWidgetContext.config');
+        var config = configWidgetContext.get('config');
         configWidgetContext.toggleProperty('controller.forceUpdateBoundaries');
         var value = config.get('initialValue');
         config.set('value', value);
@@ -131,6 +121,21 @@ App.AssignMasterOnStep7Controller = Em.Controller.extend(App.BlueprintMixin, App
       this.showPopup(params.hostComponent);
     }
   },
+
+  shouldShowAlertOnBatchRequest: function(data) {
+    var showAlert = false;
+    if (data.hasOwnProperty('items') && data.items.length > 0) {
+      data.items.forEach( function(_item) {
+        _item.RequestSchedule.batch.batch_requests.forEach( function(batchRequest) {
+          // Check if a DELETE request on HIVE_SERVER_INTERACTIVE is in progress
+          if (batchRequest.request_type === "DELETE" && batchRequest.request_uri.indexOf("HIVE_SERVER_INTERACTIVE") > -1) {
+            showAlert = true;
+          }
+        });
+      });
+    }
+    return showAlert;
+  },
   
   showPopup: function(hostComponent) {
     var missingDependentServices = this.getAllMissingDependentServices();
@@ -461,7 +466,6 @@ App.AssignMasterOnStep7Controller = Em.Controller.extend(App.BlueprintMixin, App
       var configActionComponent = self.get('configActionComponent');
       var componentHostName = self.getSelectedHostName(configActionComponent.componentName);
       var config = self.get('configWidgetContext.config');
-      var oldValueKey = context.get('controller.wizardController.name') === 'installerController' ? 'initialValue' : 'savedValue';
 
       // TODO remove after stack advisor is able to handle this case
       // workaround for hadoop.proxyuser.{{hiveUser}}.hosts after adding Hive Server Interactive from Install Wizard
@@ -484,114 +488,159 @@ App.AssignMasterOnStep7Controller = Em.Controller.extend(App.BlueprintMixin, App
       configActionComponent.hostName = componentHostName;
       config.set('configActionComponent', configActionComponent);
       /* TODO uncomment after stack advisor is able to handle this case
-      context.get('controller').loadConfigRecommendations([{
+       var oldValueKey = context.get('controller.wizardController.name') === 'installerController' ? 'initialValue' : 'savedValue';
+       context.get('controller').loadConfigRecommendations([{
         type: App.config.getConfigTagFromFileName(config.get('fileName')),
         name: config.get('name'),
         old_value: config.get(oldValueKey)
       }]);
       */
+      self.resolveDependencies(dependencies, serviceConfigs, context);
+    });
+  },
 
-      // TODO remove after stack advisor is able to handle this case
-      // workaround for hadoop.proxyuser.{{hiveUser}}.hosts after adding Hive Server Interactive
-      if (dependencies) {
-        var foreignKeys = {};
-        if (dependencies.foreignKeys) {
-          dependencies.foreignKeys.forEach(function (dependency) {
-            var matchingProperty = serviceConfigs.find(function (property) {
-              return property.get('filename') === App.config.getOriginalFileName(dependency.fileName) && property.get('name') === dependency.propertyName;
+  /**
+   * TODO remove after stack advisor is able to handle this case
+   * workaround for hadoop.proxyuser.{{hiveUser}}.hosts after adding Hive Server Interactive
+   * @param {object} dependencies
+   * @param {array} serviceConfigs
+   * @param {Em.Object} context
+   */
+  resolveDependencies: function(dependencies, serviceConfigs, context) {
+    if (dependencies) {
+      var foreignKeys = this.getDependenciesForeignKeys(dependencies, serviceConfigs);
+
+      if (dependencies.properties && dependencies.initializer) {
+        var initializer = App.get(dependencies.initializer.name);
+        var setup = Em.getProperties(foreignKeys, dependencies.initializer.setupKeys);
+        initializer.setup(setup);
+        var blueprintObject = {};
+        dependencies.properties.forEach(function (property) {
+          var propertyObject = Em.getProperties(property, ['name', 'fileName']);
+          if (property.nameTemplate) {
+            var name = property.nameTemplate;
+            Em.keys(foreignKeys).forEach(function (key) {
+              name = name.replace('{{' + key + '}}', foreignKeys[key]);
             });
-            if (matchingProperty) {
-              foreignKeys[dependency.key] = matchingProperty.get('value');
-            }
+            propertyObject.name = name;
+          }
+          if (!blueprintObject[property.fileName]) {
+            blueprintObject[property.fileName] = {
+              properties: {}
+            };
+          }
+          var result = initializer.initialValue(propertyObject, {
+            masterComponentHosts: this.getMasterComponents(dependencies, context)
           });
-        }
-        if (dependencies.properties && dependencies.initializer) {
-          var initializer = App.get(dependencies.initializer.name);
-          var setup = Em.getProperties(foreignKeys, dependencies.initializer.setupKeys);
-          initializer.setup(setup);
-          var blueprintObject = {};
-          dependencies.properties.forEach(function (property) {
-            var propertyObject = Em.getProperties(property, ['name', 'fileName']);
-            if (property.nameTemplate) {
-              var name = property.nameTemplate;
-              Em.keys(foreignKeys).forEach(function (key) {
-                name = name.replace('{{' + key + '}}', foreignKeys[key]);
-              });
-              propertyObject.name = name;
-            }
-            if (!blueprintObject[property.fileName]) {
-              blueprintObject[property.fileName] = {
-                properties: {}
-              };
-            }
-            var masterComponents = [];
-            if (self.get('content.controllerName')) {
-              var savedMasterComponents = context.get('controller.content.masterComponentHosts').filter(function (componentObject) {
-                return dependencies.initializer.componentNames.contains(componentObject.component);
-              });
-              masterComponents = savedMasterComponents.map(function (componentObject) {
-                var masterComponent = Em.getProperties(componentObject, ['component', 'hostName']);
-                masterComponent.isInstalled = true;
-                return masterComponent;
-              });
-            } else {
-              var hostsMap = blueprintUtils.getComponentForHosts();
-              Em.keys(hostsMap).forEach(function (hostName) {
-                hostsMap[hostName].forEach(function (componentName) {
-                  if (dependencies.initializer.componentNames.contains(componentName)) {
-                    masterComponents.push({
-                      component: componentName,
-                      hostName: hostName,
-                      isInstalled: true
-                    });
-                  }
-                });
+
+          var propertiesMap = blueprintObject[propertyObject.fileName].properties;
+          propertiesMap[propertyObject.name] = result.value;
+
+          if (property.isHostsList) {
+            var service = App.config.get('serviceByConfigTypeMap')[propertyObject.fileName];
+            if (service) {
+              var serviceName = service.get('serviceName');
+              var configs = serviceName === context.get('controller.selectedService.serviceName') ? serviceConfigs :
+                context.get('controller.stepConfigs').findProperty('serviceName', serviceName).get('configs');
+              var originalFileName = App.config.getOriginalFileName(propertyObject.fileName);
+              var currentProperty = configs.find(function (configProperty) {
+                return configProperty.get('filename') === originalFileName && configProperty.get('name') === propertyObject.name;
               });
-            }
-            var result = initializer.initialValue(propertyObject, {
-              masterComponentHosts: masterComponents
-            });
-            var propertiesMap = blueprintObject[propertyObject.fileName].properties;
-            propertiesMap[propertyObject.name] = result.value;
-            if (property.isHostsList) {
-              var service = App.config.get('serviceByConfigTypeMap')[propertyObject.fileName];
-              if (service) {
-                var serviceName = service.get('serviceName');
-                var configs = serviceName === context.get('controller.selectedService.serviceName') ? serviceConfigs :
-                  context.get('controller.stepConfigs').findProperty('serviceName', serviceName).get('configs');
-                var originalFileName = App.config.getOriginalFileName(propertyObject.fileName);
-                var currentProperty = configs.find(function (configProperty) {
-                  return configProperty.get('filename') === originalFileName && configProperty.get('name') === propertyObject.name;
-                });
-                if (currentProperty) {
-                  propertiesMap[propertyObject.name] = currentProperty.get('value');
-                  App.config.updateHostsListValue(propertiesMap, propertyObject.fileName, propertyObject.name, propertyObject.value, property.isHostsArray);
-                }
+              if (currentProperty) {
+                propertiesMap[propertyObject.name] = currentProperty.get('value');
+                App.config.updateHostsListValue(propertiesMap, propertyObject.fileName, propertyObject.name, propertyObject.value, property.isHostsArray);
               }
             }
-            context.get('controller').loadRecommendationsSuccess({
-              resources: [
-                {
-                  recommendations: {
-                    blueprint: {
-                      configurations: blueprintObject
-                    }
-                  }
-                }
-              ]
-            }, null, {
-              dataToSend: {
-                changed_configurations: [{
-                  type: App.config.getConfigTagFromFileName(config.get('fileName')),
-                  name: config.get('name'),
-                  old_value: config.get(oldValueKey)
-                }]
-              }
-            });
-            initializer.cleanup();
-          });
+          }
+          this.saveRecommendations(context, blueprintObject);
+          initializer.cleanup();
+        }, this);
+      }
+    }
+  },
+
+  /**
+   *
+   * @param {Em.Object} context
+   * @param {object} blueprintObject
+   */
+  saveRecommendations: function(context, blueprintObject) {
+    var oldValueKey = context.get('controller.wizardController.name') === 'installerController' ? 'initialValue' : 'savedValue';
+    var config = this.get('configWidgetContext.config');
+
+    context.get('controller').loadRecommendationsSuccess({
+      resources: [
+        {
+          recommendations: {
+            blueprint: {
+              configurations: blueprintObject
+            }
+          }
         }
+      ]
+    }, null, {
+      dataToSend: {
+        changed_configurations: [{
+          type: App.config.getConfigTagFromFileName(config.get('fileName')),
+          name: config.get('name'),
+          old_value: config.get(oldValueKey)
+        }]
       }
     });
+  },
+
+  /**
+   *
+   * @param dependencies
+   * @param serviceConfigs
+   * @returns {{}}
+   */
+  getDependenciesForeignKeys: function(dependencies, serviceConfigs) {
+    var foreignKeys = {};
+    if (dependencies.foreignKeys) {
+      dependencies.foreignKeys.forEach(function (dependency) {
+        var matchingProperty = serviceConfigs.find(function (property) {
+          return property.get('filename') === App.config.getOriginalFileName(dependency.fileName) && property.get('name') === dependency.propertyName;
+        });
+        if (matchingProperty) {
+          foreignKeys[dependency.key] = matchingProperty.get('value');
+        }
+      });
+    }
+    return foreignKeys;
+  },
+
+  /**
+   *
+   * @param dependencies
+   * @param context
+   * @returns {Array}
+   */
+  getMasterComponents: function(dependencies, context) {
+    var masterComponents = [];
+    if (this.get('content.controllerName')) {
+      var savedMasterComponents = context.get('controller.content.masterComponentHosts').filter(function (componentObject) {
+        return dependencies.initializer.componentNames.contains(componentObject.component);
+      });
+      masterComponents = savedMasterComponents.map(function (componentObject) {
+        var masterComponent = Em.getProperties(componentObject, ['component', 'hostName']);
+        masterComponent.isInstalled = true;
+        return masterComponent;
+      });
+    } else {
+      var hostsMap = blueprintUtils.getComponentForHosts();
+      Em.keys(hostsMap).forEach(function (hostName) {
+        hostsMap[hostName].forEach(function (componentName) {
+          if (dependencies.initializer.componentNames.contains(componentName)) {
+            masterComponents.push({
+              component: componentName,
+              hostName: hostName,
+              isInstalled: true
+            });
+          }
+        });
+      });
+    }
+    return masterComponents;
   }
 });
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/be2510f7/ambari-web/test/controllers/wizard/step7/assign_master_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/wizard/step7/assign_master_controller_test.js b/ambari-web/test/controllers/wizard/step7/assign_master_controller_test.js
index e70055b..1380ef2 100644
--- a/ambari-web/test/controllers/wizard/step7/assign_master_controller_test.js
+++ b/ambari-web/test/controllers/wizard/step7/assign_master_controller_test.js
@@ -19,6 +19,8 @@
 var App = require('app');
 var stringUtils = require('utils/string_utils');
 var numberUtils = require('utils/number_utils');
+var blueprintUtils = require('utils/blueprint');
+var testHelpers = require('test/helpers');
 require('models/stack_service_component');
 
 describe('App.AssignMasterOnStep7Controller', function () {
@@ -50,58 +52,51 @@ describe('App.AssignMasterOnStep7Controller', function () {
   describe("#execute()", function () {
     var context = Em.Object.create({
       controller: {
-        content: Em.Object.create({
-          controllerName: ""
-        })
+        content: {}
       }
     });
 
     beforeEach(function() {
-      this.mock = sinon.stub(view, 'getAllMissingDependentServices');
-      sinon.stub(view, 'showInstallServicesPopup');
-      sinon.stub(view, 'showAssignComponentPopup');
+      sinon.stub(view, 'showPopup');
       sinon.stub(view, 'removeMasterComponent');
-      view.reopen({
-        content: Em.Object.create()
-      });
-    });
+      sinon.stub(view, 'getPendingBatchRequests');
+     });
 
     afterEach(function() {
-      this.mock.restore();
-      view.showInstallServicesPopup.restore();
-      view.showAssignComponentPopup.restore();
+      view.getPendingBatchRequests.restore();
+      view.showPopup.restore();
       view.removeMasterComponent.restore();
     });
 
-    it("ADD action, controllerName is empty", function() {
-      this.mock.returns([{}]);
+    it("should set configWidgetContext", function() {
       view.execute(context, 'ADD', {componentName: 'C1'});
-      expect(view.showInstallServicesPopup.calledOnce).to.be.true;
+      expect(view.get('configWidgetContext')).to.be.eql(context);
     });
 
-    it("ADD action, controllerName is set", function() {
-      context = Em.Object.create({
-        controller: {
-          content: Em.Object.create({
-            controllerName: "ctrl1"
-          })
-        }
-      });
-      this.mock.returns([{}]);
+    it("should set content", function() {
       view.execute(context, 'ADD', {componentName: 'C1'});
-      expect(view.showAssignComponentPopup.calledOnce).to.be.true;
+      expect(view.get('content')).to.be.eql({});
     });
 
-    it("ADD action, no dependent services", function() {
-      this.mock.returns([]);
+    it("should set configActionComponent", function() {
       view.execute(context, 'ADD', {componentName: 'C1'});
-      expect(view.showAssignComponentPopup.calledOnce).to.be.true;
+      expect(view.get('configActionComponent')).to.be.eql({componentName: 'C1'});
     });
 
-    it("DELETE action", function() {
-      this.mock.returns([{}]);
+    it("should call showPopup when action is ADD", function() {
+      view.execute(context, 'ADD', {componentName: 'C1'});
+      expect(view.showPopup.calledWith({componentName: 'C1'})).to.be.true;
+    });
+
+    it("should call getPendingBatchRequests when action is ADD and HIVE_SERVER_INTERACTIVE", function() {
+      view.execute(context, 'ADD', {componentName: 'HIVE_SERVER_INTERACTIVE'});
+      expect(view.getPendingBatchRequests.calledWith({componentName: 'HIVE_SERVER_INTERACTIVE'})).to.be.true;
+    });
+
+    it("should call removeMasterComponent when action is DELETE", function() {
       view.execute(context, 'DELETE', {componentName: 'C1'});
       expect(view.removeMasterComponent.calledOnce).to.be.true;
+      expect(view.get('mastersToCreate')).to.be.eql(['C1']);
     });
   });
 
@@ -186,11 +181,20 @@ describe('App.AssignMasterOnStep7Controller', function () {
     beforeEach(function() {
       sinon.stub(App.router, 'get').returns(mock);
       sinon.stub(mock, 'setDBProperty');
+      sinon.stub(view, 'clearComponentsToBeAdded');
+      sinon.stub(App.HostComponent, 'find').returns([
+        Em.Object.create({
+          componentName: 'C1',
+          hostName: 'host1'
+        })
+      ]);
     });
 
     afterEach(function() {
       App.router.get.restore();
       mock.setDBProperty.restore();
+      view.clearComponentsToBeAdded.restore();
+      App.HostComponent.find.restore();
     });
 
     it("should set masterComponentHosts", function() {
@@ -216,6 +220,22 @@ describe('App.AssignMasterOnStep7Controller', function () {
       expect(view.get('content.masterComponentHosts')).to.be.eql([{component: 'C1'}]);
       expect(view.get('content.recommendationsHostGroups').blueprint).to.be.eql({host_groups: [{name: 'host-group-1', components: [{name: 'C1'}]}]});
     });
+
+    it("should call clearComponentsToBeAdded when controllerName is null", function() {
+      view.setProperties({
+        content: Em.Object.create(),
+        mastersToCreate: ['C1'],
+        configWidgetContext: {
+          config: Em.Object.create()
+        }
+      });
+      view.removeMasterComponent();
+      expect(view.clearComponentsToBeAdded.calledWith('C1')).to.be.true;
+      expect(App.get('componentToBeDeleted')).to.be.eql(Em.Object.create({
+        componentName: 'C1',
+        hostName: 'host1'
+      }));
+    });
   });
 
   describe("#renderHostInfo()", function () {
@@ -318,101 +338,657 @@ describe('App.AssignMasterOnStep7Controller', function () {
     });
   });
 
-  describe("#submit()", function () {
-    var popup = {
-      hide: Em.K
+  describe('#getPendingBatchRequests', function() {
+
+    it('App.ajax.send should be called', function() {
+      view.getPendingBatchRequests({componentName: 'C1'});
+      var args = testHelpers.findAjaxRequest('name', 'request_schedule.get.pending');
+      expect(args[0]).to.be.eql({
+        name : 'request_schedule.get.pending',
+        sender: view,
+        error : 'pendingBatchRequestsAjaxError',
+        success: 'pendingBatchRequestsAjaxSuccess',
+        data: {
+          hostComponent: {componentName: 'C1'}
+        }
+      });
+    });
+  });
+
+  describe('#pendingBatchRequestsAjaxError', function() {
+    beforeEach(function() {
+      sinon.stub(App, 'showAlertPopup');
+    });
+    afterEach(function() {
+      App.showAlertPopup.restore();
+    });
+
+    it('should call showAlertPopup, invalid JSON', function() {
+      view.pendingBatchRequestsAjaxError({responseText: null});
+      expect(App.showAlertPopup.calledWith(
+        Em.I18n.t('services.service.actions.run.yarnRefreshQueues.error'),
+        Em.I18n.t('services.service.actions.run.yarnRefreshQueues.error'),
+        null
+      )).to.be.true;
+    });
+
+    it('should call showAlertPopup, valid JSON', function() {
+      view.pendingBatchRequestsAjaxError({responseText: '{"message":"foo"}'});
+      expect(App.showAlertPopup.calledWith(
+        Em.I18n.t('services.service.actions.run.yarnRefreshQueues.error'),
+        Em.I18n.t('services.service.actions.run.yarnRefreshQueues.error') + 'foo',
+        null
+      )).to.be.true;
+    });
+  });
+
+  describe('#pendingBatchRequestsAjaxSuccess', function() {
+    var configWidgetContext = Em.Object.create({
+      config: Em.Object.create({
+        initialValue: 'iv1',
+        value: 'v1'
+      }),
+      controller: {
+        forceUpdateBoundaries: false
+      },
+      setValue: sinon.spy(),
+      sendRequestRorDependentConfigs: sinon.spy()
+    });
+    beforeEach(function() {
+      this.mock = sinon.stub(view, 'shouldShowAlertOnBatchRequest');
+      sinon.stub(App, 'showAlertPopup', function() {
+        arguments[2].apply({hide: Em.K});
+      });
+      sinon.stub(view, 'showPopup');
+      view.set('configWidgetContext', configWidgetContext);
+    });
+    afterEach(function() {
+      this.mock.restore();
+      App.showAlertPopup.restore();
+      view.showPopup.restore();
+    });
+
+    it('showPopup should be called', function() {
+      this.mock.returns(false);
+      view.pendingBatchRequestsAjaxSuccess({}, {}, {hostComponent: {componentName: 'C1'}});
+      expect(view.showPopup.calledWith({componentName: 'C1'})).to.be.true;
+    });
+
+    it('showAlertPopup should be called', function() {
+      this.mock.returns(true);
+      view.pendingBatchRequestsAjaxSuccess({}, {}, {hostComponent: {componentName: 'C1'}});
+      expect(App.showAlertPopup.calledWith(
+        Em.I18n.t('services.service.actions.hsi.alertPopup.header'),
+        Em.I18n.t('services.service.actions.hsi.alertPopup.body')
+      )).to.be.true;
+      expect(configWidgetContext.get('config.value')).to.be.equal('iv1');
+      expect(configWidgetContext.get('controller.forceUpdateBoundaries')).to.be.true;
+      expect(configWidgetContext.setValue.calledWith('iv1')).to.be.true;
+      expect(configWidgetContext.sendRequestRorDependentConfigs.calledWith(
+        configWidgetContext.get('config')
+      )).to.be.true;
+    });
+  });
+
+  describe('#shouldShowAlertOnBatchRequest', function() {
+    var testCases = [
+      {
+        input: {},
+        expected: false
       },
-      mock = {
-        saveMasterComponentHosts: Em.K,
-        loadMasterComponentHosts: Em.K,
-        setDBProperty: Em.K
+      {
+        input: {
+          items: []
+        },
+        expected: false
+      },
+      {
+        input: {
+          items: [
+            {
+              RequestSchedule: {
+                batch: {
+                  batch_requests: [
+                    {
+                      request_type: 'ADD',
+                      request_uri: ''
+                    }
+                  ]
+                }
+              }
+            }
+          ]
+        },
+        expected: false
       },
-      config = Em.Object.create({
-        filename: 'file1',
-        name: 'conf1'
+      {
+        input: {
+          items: [
+            {
+              RequestSchedule: {
+                batch: {
+                  batch_requests: [
+                    {
+                      request_type: 'DELETE',
+                      request_uri: 'HIVE_SERVER_INTERACTIVE'
+                    }
+                  ]
+                }
+              }
+            }
+          ]
+        },
+        expected: true
+      }
+    ];
+
+    testCases.forEach(function(test) {
+      it('should return ' + test.expected + ' when data = ' + JSON.stringify(test.input), function() {
+        expect(view.shouldShowAlertOnBatchRequest(test.input)).to.be.equal(test.expected);
       });
+    });
+  });
+
+  describe('#updateComponent and showAddControl should be false for component', function() {
 
     beforeEach(function() {
-      sinon.stub(popup, 'hide');
-      sinon.stub(App.router, 'get').returns(mock);
-      sinon.stub(mock, 'saveMasterComponentHosts');
-      sinon.stub(mock, 'loadMasterComponentHosts');
-      sinon.stub(mock, 'setDBProperty');
-      sinon.stub(App.config, 'getConfigTagFromFileName', function (value) {
-        return value;
+      sinon.stub(App.StackServiceComponent, 'find').returns([
+        Em.Object.create({
+          componentName: 'C1',
+          stackService: Em.Object.create({
+            isInstalled: false
+          })
+        })
+      ]);
+    });
+    afterEach(function() {
+      App.StackServiceComponent.find.restore();
+    });
+
+    it('showRemoveControl ', function() {
+      var component = Em.Object.create({
+        component_name: 'C1',
+        showAddControl: true,
+        showRemoveControl: true
       });
+      view.setProperties({
+        mastersToCreate: [],
+        selectedServicesMasters: [ component, {component_name: 'C2'} ]
+      });
+      view.updateComponent('C1');
+      expect(component.get('showAddControl')).to.be.false;
+      expect(component.get('showRemoveControl')).to.be.false;
+    });
+  });
+
+  describe('#saveRecommendationsHostGroups', function() {
+    beforeEach(function() {
+      sinon.stub(view, 'getSelectedHostName').returns('host1');
+    });
+    afterEach(function() {
+      view.getSelectedHostName.restore();
+    });
+
+    it('should add component to recommendations', function() {
+      var recommendationsHostGroups = {
+        blueprint_cluster_binding: {
+          host_groups: [
+            {
+              name: 'g1',
+              hosts: [
+                {
+                  fqdn: 'host1'
+                }
+              ]
+            }
+          ]
+        },
+        blueprint: {
+          host_groups: [
+            {
+              name: 'g1',
+              components: []
+            }
+          ]
+        }
+      };
       view.reopen({
+        mastersToCreate: ['C1'],
         content: Em.Object.create({
-          controllerName: 'ctrl1',
-          componentsFromConfigs: []
-        }),
-        selectedServicesMasters: [
-          {
-            component_name: 'C1',
-            selectedHost: 'host1'
-          }
-        ],
-        popup: popup,
-        configActionComponent: {
-          componentName: 'C1'
-        },
-        configWidgetContext: Em.Object.create({
-          config: Em.Object.create({
-            fileName: 'file1',
-            name: 'conf1',
+          recommendationsHostGroups: recommendationsHostGroups
+        })
+      });
+      view.saveRecommendationsHostGroups();
+      expect(view.get('content.recommendationsHostGroups')).to.be.eql(Object.assign(recommendationsHostGroups, {
+        blueprint: {
+          host_groups: [
+            {
+              name: 'g1',
+              components: [{name: 'C1'}]
+            }
+          ]
+        }
+      }));
+    });
+  });
+
+  describe('#setGlobalComponentToBeAdded', function() {
+
+    it('should set componentToBeAdded', function() {
+      view.setGlobalComponentToBeAdded('C1', 'host1');
+      expect(App.get('componentToBeAdded')).to.be.eql(Em.Object.create({
+        componentName: 'C1',
+        hostNames: ['host1']
+      }));
+    });
+  });
+
+  describe('#clearComponentsToBeDeleted', function() {
+
+    it('should clear componentToBeDeleted', function() {
+      App.set('componentToBeDeleted', Em.Object.create({
+        componentName: 'C1'
+      }));
+      view.clearComponentsToBeDeleted('C1');
+      expect(App.get('componentToBeDeleted')).to.be.empty;
+    });
+  });
+
+  describe('#clearComponentsToBeAdded', function() {
+
+    it('should clear componentToBeAdded', function() {
+      App.set('componentToBeAdded', Em.Object.create({
+        componentName: 'C1'
+      }));
+      view.clearComponentsToBeAdded('C1');
+      expect(App.get('componentToBeAdded')).to.be.empty;
+    });
+  });
+
+  describe('#showPopup', function() {
+    beforeEach(function() {
+      this.mock = sinon.stub(view, 'getAllMissingDependentServices');
+      sinon.stub(view, 'showInstallServicesPopup');
+      sinon.stub(view, 'showAssignComponentPopup');
+    });
+    afterEach(function() {
+      this.mock.restore();
+      view.showInstallServicesPopup.restore();
+      view.showAssignComponentPopup.restore();
+    });
+
+    it('showAssignComponentPopup should be called', function() {
+      this.mock.returns([]);
+      view.showPopup({componentName: 'C1'});
+      expect(view.get('mastersToCreate')).to.be.eql(['C1']);
+      expect(view.showAssignComponentPopup.calledOnce).to.be.true;
+    });
+
+    it('showInstallServicesPopup should be called', function() {
+      this.mock.returns([{}]);
+      view.reopen({
+        content: Em.Object.create()
+      });
+      view.showPopup({componentName: 'C1'});
+      expect(view.showInstallServicesPopup.calledWith([{}])).to.be.true;
+    });
+  });
+
+  describe('#submit', function() {
+    var configWidgetContext = Em.Object.create({
+      controller: {
+        forceUpdateBoundaries: false,
+        stepConfigs: [
+          Em.Object.create({
             serviceName: 'S1',
-            savedValue: 'val1',
-            toggleProperty: Em.K
+            configs: []
           }),
-          controller: Em.Object.create({
-            selectedService: {
-              serviceName: 'S1'
-            },
-            wizardController: {
-              name: 'ctrl'
-            },
-            stepConfigs: [
-              Em.Object.create({
-                serviceName: 'S1',
-                configs: [
-                  config
-                ]
-              }),
-              Em.Object.create({
-                serviceName: 'MISC',
-                configs: [
-                  config
-                ]
-              })
-            ]
+          Em.Object.create({
+            serviceName: 'MISC',
+            configs: []
           })
-        })
+        ],
+        selectedService: {
+          serviceName: 'S1'
+        }
+      },
+      config: Em.Object.create({
+        configAction: {
+          dependencies: []
+        }
+      })
+    });
+    beforeEach(function() {
+      sinon.stub(view, 'resolveDependencies');
+      sinon.stub(view, 'saveMasterComponentHosts');
+      sinon.stub(view, 'saveRecommendationsHostGroups');
+      sinon.stub(view, 'setGlobalComponentToBeAdded');
+      sinon.stub(view, 'clearComponentsToBeDeleted');
+      sinon.stub(App, 'get').returns({
+        getKDCSessionState: Em.clb
+      });
+      sinon.stub(view, 'getSelectedHostName').returns('host1');
+      view.setProperties({
+        configWidgetContext: configWidgetContext,
+        configActionComponent: { componentName: 'C1'},
+        popup: {
+          hide: sinon.spy()
+        }
+      });
+    });
+    afterEach(function() {
+      App.get.restore();
+      view.resolveDependencies.restore();
+      view.clearComponentsToBeDeleted.restore();
+      view.setGlobalComponentToBeAdded.restore();
+      view.saveRecommendationsHostGroups.restore();
+      view.saveMasterComponentHosts.restore();
+      view.getSelectedHostName.restore();
+    });
+
+    it('saveMasterComponentHosts should be called when controllerName defined', function() {
+      view.reopen({
+        content: {
+          controllerName: 'ctrl1'
+        }
+      });
+      view.submit();
+      expect(view.saveMasterComponentHosts.calledOnce).to.be.true;
+    });
+    it('saveRecommendationsHostGroups should be called when controllerName defined', function() {
+      view.reopen({
+        content: {
+          controllerName: 'ctrl1'
+        }
+      });
+      view.submit();
+      expect(view.saveRecommendationsHostGroups.calledOnce).to.be.true;
+    });
+    it('setGlobalComponentToBeAdded should be called when controllerName undefined', function() {
+      view.reopen({
+        content: {
+          controllerName: undefined
+        }
+      });
+      view.submit();
+      expect(view.setGlobalComponentToBeAdded.calledWith('C1', 'host1')).to.be.true;
+    });
+    it('clearComponentsToBeDeleted should be called when controllerName undefined', function() {
+      view.reopen({
+        content: {
+          controllerName: undefined
+        }
       });
       view.submit();
+      expect(view.clearComponentsToBeDeleted.calledWith('C1')).to.be.true;
+    });
+    it('resolveDependencies should be called', function() {
+      view.submit();
+      expect(view.resolveDependencies.calledWith([], [])).to.be.true;
+    });
+    it('hide should be called', function() {
+      view.submit();
+      expect(view.get('popup').hide.calledOnce).to.be.true;
+    });
+    it('configActionComponent should be set', function() {
+      view.submit();
+      expect(view.get('configWidgetContext.config.configActionComponent')).to.be.eql({
+        componentName: 'C1',
+        hostName: 'host1'
+      });
+    });
+  });
+
+  describe('#resolveDependencies', function() {
+    var initializer = {
+      setup: sinon.spy(),
+      initialValue: sinon.stub().returns({value: 'val1'}),
+      cleanup: sinon.spy()
+    };
+    var dependencies = {
+      properties: [
+        {
+          name: 'p1',
+          fileName: 'file1.xml',
+          nameTemplate: '{{bar}}',
+          isHostsList: true,
+          isHostsArray: false
+        }
+      ],
+      initializer: {
+        name: 'i1',
+        setupKeys: ['bar']
+      }
+    };
+    var context = Em.Object.create({
+      controller: {
+        selectedService: {
+          serviceName: 'S1'
+        }
+      }
+    });
+    var serviceConfigs = [
+      Em.Object.create({
+        name: 'foo',
+        filename: 'file1.xml',
+        value: 'val1'
+      })
+    ];
+
+    beforeEach(function() {
+      sinon.stub(view, 'getDependenciesForeignKeys').returns({
+        bar: 'foo'
+      });
+      sinon.stub(App, 'get').returns(initializer);
+      sinon.stub(view, 'getMasterComponents');
+      sinon.stub(view, 'saveRecommendations');
+      sinon.stub(App.config, 'updateHostsListValue');
+      sinon.stub(App.config, 'get').returns({
+        'file1.xml': Em.Object.create({
+          serviceName: 'S1'
+        })
+      });
+      view.resolveDependencies(dependencies, serviceConfigs, context);
+    });
+    afterEach(function() {
+      App.config.get.restore();
+      view.getDependenciesForeignKeys.restore();
+      App.get.restore();
+      view.getMasterComponents.restore();
+      view.saveRecommendations.restore();
+      App.config.updateHostsListValue.restore();
+    });
+
+    it('initializer.setup should be called', function() {
+      expect(initializer.setup.calledWith({bar: 'foo'})).to.be.true;
+    });
+    it('initializer.setup initialValue be called', function() {
+      expect(initializer.initialValue.calledWith({
+        name: 'foo',
+        fileName: 'file1.xml'
+      })).to.be.true;
+    });
+    it('initializer.cleanup should be called', function() {
+      expect(initializer.cleanup.called).to.be.true;
     });
+    it('saveRecommendations should be called', function() {
+      expect(view.saveRecommendations.calledWith(context)).to.be.true;
+    });
+    it('App.config.updateHostsListValue should be called', function() {
+      expect(App.config.updateHostsListValue.getCall(0).args).to.be.eql([
+        {
+          foo: 'val1'
+        },
+        'file1.xml',
+        'foo',
+        undefined,
+        false
+      ]);
+    });
+  });
 
+  describe('#saveMasterComponentHosts', function() {
+    var mockCtrl = {
+      saveMasterComponentHosts: sinon.spy(),
+      loadMasterComponentHosts: sinon.spy()
+    };
+    beforeEach(function() {
+      sinon.stub(App.router, 'get').returns(mockCtrl);
+      view.reopen({
+        content: Em.Object.create({
+          componentsFromConfigs: []
+        })
+      });
+      view.set('mastersToCreate', [
+        {}
+      ]);
+    });
     afterEach(function() {
       App.router.get.restore();
-      popup.hide.restore();
-      mock.saveMasterComponentHosts.restore();
-      mock.loadMasterComponentHosts.restore();
-      mock.setDBProperty.restore();
-      App.config.getConfigTagFromFileName.restore();
     });
 
-    it("saveMasterComponentHosts should be called", function() {
-      expect(mock.saveMasterComponentHosts.calledOnce).to.be.true;
+    it('saveMasterComponentHosts should be called', function() {
+      view.saveMasterComponentHosts();
+      expect(mockCtrl.saveMasterComponentHosts.calledWith(view, true)).to.be.true;
+    });
+    it('loadMasterComponentHosts should be called', function() {
+      view.saveMasterComponentHosts();
+      expect(mockCtrl.loadMasterComponentHosts.calledWith(true)).to.be.true;
     });
+    it('componentsFromConfigs should be set', function() {
+      view.saveMasterComponentHosts();
+      expect(view.get('content.componentsFromConfigs')).to.be.eql([{}]);
+    });
+  });
 
-    it("loadMasterComponentHosts should be called", function() {
-      expect(mock.loadMasterComponentHosts.calledOnce).to.be.true;
+  describe('#getSelectedHostName', function() {
+
+    it('should return host of component', function() {
+      view.set('selectedServicesMasters', [
+        {
+          component_name: 'C1',
+          selectedHost: 'host1'
+        }
+      ]);
+      expect(view.getSelectedHostName('C1')).to.be.equal('host1');
     });
+  });
 
-    it("configActionComponent should be set", function() {
-      expect(view.get('configWidgetContext.config.configActionComponent')).to.be.eql({
-        componentName: 'C1',
-        hostName: 'host1'
+  describe('#saveRecommendations', function() {
+    var mockCtrl = {
+      loadRecommendationsSuccess: sinon.spy(),
+      wizardController: {
+        name: 'installerController'
+      }
+    };
+    var context = Em.Object.create({
+      controller: mockCtrl
+    });
+    it('loadRecommendationsSuccess should be called', function() {
+      view.set('configWidgetContext', {
+        config: Em.Object.create({
+          fileName: 'foo.xml',
+          name: 'bar',
+          initialValue: 'iv1'
+        })
+      });
+      view.saveRecommendations(context, {});
+      expect(mockCtrl.loadRecommendationsSuccess.getCall(0).args).to.be.eql([
+        {
+          resources: [
+            {
+              recommendations: {
+                blueprint: {
+                  configurations: {}
+                }
+              }
+            }
+          ]
+        }, null, {
+          dataToSend: {
+            changed_configurations: [{
+              type: 'foo',
+              name: 'bar',
+              old_value: 'iv1'
+            }]
+          }
+        }
+      ]);
+    });
+  });
+
+  describe('#getDependenciesForeignKeys', function() {
+    var dependencies = {
+      foreignKeys: [
+        {
+          fileName: 'foo.xml',
+          propertyName: 'c1',
+          key: 'k1'
+        }
+      ]
+    };
+    var serviceConfigs = [
+      Em.Object.create({
+        filename: 'foo.xml',
+        name: 'c1',
+        value: 'val1'
+      })
+    ];
+
+    it('should return foreignKeys map', function() {
+      expect(view.getDependenciesForeignKeys(dependencies, serviceConfigs)).to.be.eql({
+        k1: 'val1'
+      });
+    });
+  });
+
+  describe('#getMasterComponents', function() {
+    var dependencies = {
+      initializer: {
+        componentNames: ['C1']
+      }
+    };
+    var context = Em.Object.create({
+      controller: {
+        content: {
+          masterComponentHosts: [
+            {
+              component: 'C1',
+              hostName: 'host2'
+            }
+          ]
+        }
+      }
+    });
+    beforeEach(function() {
+      sinon.stub(blueprintUtils, 'getComponentForHosts').returns({
+        host1: ['C1']
       });
     });
+    afterEach(function() {
+      blueprintUtils.getComponentForHosts.restore();
+    });
+
+    it('should return master components when controllerName undefined', function() {
+      view.set('content.controllerName', undefined);
+      expect(view.getMasterComponents(dependencies, context)).to.be.eql([
+        {
+          component: 'C1',
+          hostName: 'host1',
+          isInstalled: true
+        }
+      ]);
+    });
+
+    it('should return master components when controllerName valid', function() {
+      view.set('content.controllerName', 'ctrl1');
+      expect(view.getMasterComponents(dependencies, context)).to.be.eql([
+        {
+          component: 'C1',
+          hostName: 'host2',
+          isInstalled: true
+        }
+      ]);
+    });
   });
 });
\ No newline at end of file


[28/50] [abbrv] ambari git commit: AMBARI-20742 Log Search common properties (mgergely)

Posted by ao...@apache.org.
AMBARI-20742 Log Search common properties (mgergely)

Change-Id: I4fb2fa150890d528ea6499e6348bf0be0d74ccb5


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/22fda0ac
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/22fda0ac
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/22fda0ac

Branch: refs/heads/branch-3.0-perf
Commit: 22fda0accc034414fe0078036408668001b699c1
Parents: 2049199
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Fri Apr 21 13:09:53 2017 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../logsearch-common-properties.xml             | 23 ++++++++++++++++++++
 .../LOGSEARCH/0.5.0/package/scripts/params.py   |  8 +++++--
 .../stacks/2.4/LOGSEARCH/test_logfeeder.py      |  1 +
 .../stacks/2.4/LOGSEARCH/test_logsearch.py      |  3 ++-
 .../test/python/stacks/2.4/configs/default.json |  3 +++
 5 files changed, 35 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/22fda0ac/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-common-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-common-properties.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-common-properties.xml
new file mode 100644
index 0000000..0087028
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-common-properties.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/22fda0ac/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
index 84a6ad9..7d4aefc 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
@@ -257,7 +257,9 @@ logsearch_properties['logsearch.protocol'] = logsearch_ui_protocol
 
 # load config values
 
-logsearch_properties = dict(logsearch_properties.items() + dict(config['configurations']['logsearch-properties']).items())
+logsearch_properties = dict(logsearch_properties.items() +\
+                       dict(config['configurations']['logsearch-common-properties']).items() +\
+                       dict(config['configurations']['logsearch-properties']).items())
 
 # load derivated values
 
@@ -347,7 +349,9 @@ logfeeder_properties['logfeeder.solr.core.config.name'] = 'history'
 
 # load config values
 
-logfeeder_properties = dict(logfeeder_properties.items() + dict(config['configurations']['logfeeder-properties']).items())
+logfeeder_properties = dict(logfeeder_properties.items() +\
+                       dict(config['configurations']['logsearch-common-properties']).items() +\
+                       dict(config['configurations']['logfeeder-properties']).items())
 
 # load derivated values
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/22fda0ac/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logfeeder.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logfeeder.py b/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logfeeder.py
index 662e4a7..159cb81 100644
--- a/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logfeeder.py
+++ b/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logfeeder.py
@@ -68,6 +68,7 @@ class TestLogFeeder(RMFTestCase):
                               )
     self.assertResourceCalled('PropertiesFile', '/etc/ambari-logsearch-logfeeder/conf/logfeeder.properties',
                               properties={'cluster.name': 'c1',
+                                          'common-property': 'common-value',
                                           'hadoop.security.credential.provider.path': 'jceks://file/etc/ambari-logsearch-logfeeder/conf/logfeeder.jceks',
                                           'logfeeder.checkpoint.folder': '/etc/ambari-logsearch-logfeeder/conf/checkpoints',
                                           'logfeeder.config.dir': '/etc/ambari-logsearch-logfeeder/conf',

http://git-wip-us.apache.org/repos/asf/ambari/blob/22fda0ac/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py b/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
index 52ac316..afdc42f 100644
--- a/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
+++ b/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
@@ -83,7 +83,8 @@ class TestLogSearch(RMFTestCase):
                               action = ['delete']
     )
     self.assertResourceCalled('PropertiesFile', '/etc/ambari-logsearch-portal/conf/logsearch.properties',
-                              properties = {'hadoop.security.credential.provider.path': 'jceks://file/etc/ambari-logsearch-portal/conf/logsearch.jceks',
+                              properties = {'common-property': 'common-value',
+                                            'hadoop.security.credential.provider.path': 'jceks://file/etc/ambari-logsearch-portal/conf/logsearch.jceks',
                                             'logsearch.audit.logs.split.interval.mins': '1',
                                             'logsearch.auth.external_auth.enabled': 'false',
                                             'logsearch.auth.external_auth.host_url': 'http://c6401.ambari.apache.org:8080',

http://git-wip-us.apache.org/repos/asf/ambari/blob/22fda0ac/ambari-server/src/test/python/stacks/2.4/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.4/configs/default.json b/ambari-server/src/test/python/stacks/2.4/configs/default.json
index 8822e96..1863c02 100644
--- a/ambari-server/src/test/python/stacks/2.4/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.4/configs/default.json
@@ -292,6 +292,9 @@
       "logsearch-log4j": {
         "content": "&lt;?xml version=\"1.0\" encoding=\"UTF-8\" ?&gt;\n&lt;!--\n  Licensed to the Apache Software Foundation (ASF) under one or more\n  contributor license agreements.  See the NOTICE file distributed with\n  this work for additional information regarding copyright ownership.\n  The ASF licenses this file to You under the Apache License, Version 2.0\n  (the \"License\"); you may not use this file except in compliance with\n  the License.  You may obtain a copy of the License at\n\n      http://www.apache.org/licenses/LICENSE-2.0\n\n  Unless required by applicable law or agreed to in writing, software\n  distributed under the License is distributed on an \"AS IS\" BASIS,\n  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n  See the License for the specific language governing permissions and\n  limitations under the License.\n--&gt;\n&lt;!DOCTYPE log4j:configuration SYSTEM \"log4j.dtd\"&gt;\n&lt;log4j:configuration xmlns:log4j=\"http://jakarta.
 apache.org/log4j/\"&gt;\n  &lt;appender name=\"console\" class=\"org.apache.log4j.ConsoleAppender\"&gt;\n    &lt;param name=\"Target\" value=\"System.out\" /&gt;\n    &lt;layout class=\"org.apache.log4j.PatternLayout\"&gt;\n      &lt;param name=\"ConversionPattern\" value=\"%d [%t] %-5p %C{6} (%F:%L) - %m%n\" /&gt;\n    &lt;/layout&gt;\n  &lt;/appender&gt;\n\n  &lt;appender name=\"rolling_file\" class=\"org.apache.log4j.RollingFileAppender\"&gt; \n    &lt;param name=\"file\" value=\"{{logsearch_log_dir}}/logsearch.log\" /&gt; \n    &lt;param name=\"append\" value=\"true\" /&gt; \n    &lt;param name=\"maxFileSize\" value=\"10MB\" /&gt; \n    &lt;param name=\"maxBackupIndex\" value=\"10\" /&gt; \n    &lt;layout class=\"org.apache.log4j.PatternLayout\"&gt; \n      &lt;param name=\"ConversionPattern\" value=\"%d [%t] %-5p %C{6} (%F:%L) - %m%n\"/&gt; \n    &lt;/layout&gt; \n  &lt;/appender&gt; \n\n  &lt;appender name=\"performance_analyzer\" class=\"org.apache.log4j.RollingFileAppender\"
 &gt;\n    &lt;param name=\"file\" value=\"{{logsearch_log_dir}}/logsearch-performance.log\" /&gt;\n    &lt;param name=\"Threshold\" value=\"info\" /&gt;\n    &lt;param name=\"append\" value=\"true\" /&gt;\n    &lt;param name=\"maxFileSize\" value=\"10MB\" /&gt; \n    &lt;param name=\"maxBackupIndex\" value=\"10\" /&gt; \n    &lt;layout class=\"org.apache.log4j.PatternLayout\"&gt;\n      &lt;param name=\"ConversionPattern\" value=\"%d [%t] %-5p %C{6} (%F:%L) - %m%n\" /&gt;\n    &lt;/layout&gt;\n  &lt;/appender&gt;\n  \n  &lt;logger name=\"org.apache.ambari.logsearch.perfomance\" additivity=\"false\"&gt;\n   &lt;appender-ref ref=\"performance_analyzer\" /&gt;\n  &lt;/logger&gt;\n\n  &lt;category name=\"org.apache.ambari.logsearch\" additivity=\"false\"&gt;\n    &lt;priority value=\"info\" /&gt;\n    &lt;appender-ref ref=\"rolling_file\" /&gt;\n  &lt;/category&gt;\n\n  &lt;root&gt;\n    &lt;priority value=\"warn\" /&gt;\n    &lt;appender-ref ref=\"rolling_file\" /&gt;\n  &lt;/root&gt;\
 n&lt;/log4j:configuration&gt;"
       },
+      "logsearch-common-properties": {
+        "common-property": "common-value"
+      },
       "logsearch-properties": {
         "logsearch.spnego.kerberos.host" : "localhost",
         "logsearch.solr.collection.service.logs" : "hadoop_logs",


[44/50] [abbrv] ambari git commit: AMBARI-20821. Update default values for YARN preemption in Ambari.(vbrodetskyi)

Posted by ao...@apache.org.
AMBARI-20821. Update default values for YARN preemption in Ambari.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d1dce4c8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d1dce4c8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d1dce4c8

Branch: refs/heads/branch-3.0-perf
Commit: d1dce4c8077e38bc8c67ccae4934819ebb869876
Parents: 978c9d7
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Fri Apr 21 18:53:06 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../YARN/3.0.0.3.0/configuration/yarn-site.xml          | 12 ++++++++++++
 .../common-services/YARN/3.0.0.3.0/service_advisor.py   |  4 ++++
 .../HDP/2.6/services/YARN/configuration/yarn-site.xml   | 12 ++++++++++++
 .../resources/stacks/HDP/2.6/services/stack_advisor.py  |  5 +++++
 .../test/python/stacks/2.6/common/test_stack_advisor.py |  3 +++
 5 files changed, 36 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d1dce4c8/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
index 394fae3..cc96cd7 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
@@ -1177,4 +1177,16 @@ yarn.node-labels.manager-class
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round</name>
+    <value>0.1</value>
+    <description>This option controls the pace at which containers-marked-for-preemption are actually preempted in each period.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor</name>
+    <value>1</value>
+    <description>Similar to total_preemption_per_round, we can apply this factor to slowdown resource preemption after preemption-target is computed for each queue.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d1dce4c8/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
index 1ac7849..34fe117 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
@@ -430,6 +430,10 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
       else:
         putYarnSiteProperty('yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled', "false")
 
+    # calculate total_preemption_per_round
+    total_preemption_per_round = str(round(max(float(1)/len(hosts['items']), 0.1),2))
+    putYarnSiteProperty('yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round', total_preemption_per_round)
+
     if 'yarn-env' in services['configurations'] and 'yarn_user' in services['configurations']['yarn-env']['properties']:
       yarn_user = services['configurations']['yarn-env']['properties']['yarn_user']
     else:

http://git-wip-us.apache.org/repos/asf/ambari/blob/d1dce4c8/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
index acdae65..c434c12 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
@@ -42,6 +42,18 @@
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
+    <name>yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round</name>
+    <value>0.1</value>
+    <description>This option controls the pace at which containers-marked-for-preemption are actually preempted in each period.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor</name>
+    <value>1</value>
+    <description>Similar to total_preemption_per_round, we can apply this factor to slowdown resource preemption after preemption-target is computed for each queue.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.timeline-service.entity-group-fs-store.app-cache-size</name>
     <value>10</value>
     <description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d1dce4c8/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 4e1b4b6..da3c98f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -189,6 +189,11 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
       else:
         putYarnSiteProperty('yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled', "false")
 
+    # calculate total_preemption_per_round
+    total_preemption_per_round = str(round(max(float(1)/len(hosts['items']), 0.1),2))
+    putYarnSiteProperty('yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round', total_preemption_per_round)
+
+
     if 'yarn-env' in services['configurations'] and 'yarn_user' in services['configurations']['yarn-env']['properties']:
       yarn_user = services['configurations']['yarn-env']['properties']['yarn_user']
     else:

http://git-wip-us.apache.org/repos/asf/ambari/blob/d1dce4c8/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index 114dd3a..d4d28c9 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -1162,6 +1162,7 @@ class TestHDP26StackAdvisor(TestCase):
           'yarn.timeline-service.entity-group-fs-store.app-cache-size': '10',
           'yarn.scheduler.minimum-allocation-mb': '256',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath': '',
+          'yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round': '1.0',
           'yarn.nodemanager.resource.cpu-vcores': '4',
           'yarn.scheduler.maximum-allocation-mb': '768',
           'yarn.nodemanager.linux-container-executor.group': 'hadoop',
@@ -1336,6 +1337,7 @@ class TestHDP26StackAdvisor(TestCase):
           'yarn.nodemanager.resource.memory-mb': '1280',
           'yarn.scheduler.minimum-allocation-mb': '256',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath': '',
+          'yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round': '1.0',
           'yarn.nodemanager.resource.cpu-vcores': '4',
           'yarn.scheduler.maximum-allocation-mb': '1280',
           'yarn.nodemanager.linux-container-executor.group': 'hadoop',
@@ -1442,6 +1444,7 @@ class TestHDP26StackAdvisor(TestCase):
           'yarn.nodemanager.resource.memory-mb': '1280',
           'yarn.scheduler.minimum-allocation-mb': '256',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath': '',
+          'yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round': '1.0',
           'yarn.nodemanager.resource.cpu-vcores': '4',
           'yarn.scheduler.maximum-allocation-mb': '1280',
           'yarn.nodemanager.linux-container-executor.group': 'hadoop',


[26/50] [abbrv] ambari git commit: AMBARI-20806. Upgrade Pause button should not show when the Upgrade is already Paused (alexantonenko)

Posted by ao...@apache.org.
AMBARI-20806. Upgrade Pause button should not show when the Upgrade is already Paused (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8322ed57
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8322ed57
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8322ed57

Branch: refs/heads/branch-3.0-perf
Commit: 8322ed571e2b9b235154dea7393593b6f8401370
Parents: c306a2b
Author: Alex Antonenko <hi...@gmail.com>
Authored: Thu Apr 20 19:02:05 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../admin/stack_upgrade/stack_upgrade_wizard.hbs    | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8322ed57/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
index b2e8991..6fceeb1 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
@@ -39,13 +39,15 @@
     {{#if view.isLoaded}}
       <div>
         <div class="row">
-          {{#if isDowngrade}}
-            <button
-              class="btn btn-default" {{action confirmPauseDowngrade target="view"}}>{{t admin.stackUpgrade.pauseDowngrade}}</button>
-          {{else}}
-            <button
-              class="btn btn-default" {{action confirmPauseUpgrade target="view"}}>{{t admin.stackUpgrade.pauseUpgrade}}</button>
-          {{/if}}
+          {{#unless App.upgradeSuspended}}
+              {{#if isDowngrade}}
+                <button
+                  class="btn btn-default" {{action confirmPauseDowngrade target="view"}}>{{t admin.stackUpgrade.pauseDowngrade}}</button>
+              {{else}}
+                <button
+                  class="btn btn-default" {{action confirmPauseUpgrade target="view"}}>{{t admin.stackUpgrade.pauseUpgrade}}</button>
+              {{/if}}
+          {{/unless}}
         </div>
         {{#if view.runningItem}}
           <div class="panel panel-default details-box row">


[04/50] [abbrv] ambari git commit: AMBARI-20745 : Metrics collector goes down after starting up. (avijayan)

Posted by ao...@apache.org.
AMBARI-20745 : Metrics collector goes down after starting up. (avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/13dd115f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/13dd115f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/13dd115f

Branch: refs/heads/branch-3.0-perf
Commit: 13dd115fd7e913d59b249672257705df55a67502
Parents: 5ab5de4
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Mon Apr 17 13:55:40 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 ambari-metrics/ambari-metrics-timelineservice/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/13dd115f/ambari-metrics/ambari-metrics-timelineservice/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/pom.xml b/ambari-metrics/ambari-metrics-timelineservice/pom.xml
index d848eab..f9d7e19 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/pom.xml
+++ b/ambari-metrics/ambari-metrics-timelineservice/pom.xml
@@ -477,7 +477,7 @@
     <dependency>
       <groupId>org.apache.httpcomponents</groupId>
       <artifactId>httpclient</artifactId>
-      <version>4.2.5</version>
+      <version>4.5.2</version>
     </dependency>
     <dependency>
       <groupId>com.google.guava</groupId>


[14/50] [abbrv] ambari git commit: AMBARI-20768. Local Ambari user with no cluster role must not be able to access Logsearch UI (Keta Patel via oleewere)

Posted by ao...@apache.org.
AMBARI-20768. Local Ambari user with no cluster role must not be able to access Logsearch UI (Keta Patel via oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c8de2d07
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c8de2d07
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c8de2d07

Branch: refs/heads/branch-3.0-perf
Commit: c8de2d07191c014504111a1d55db259d250fbef5
Parents: d853ad8
Author: oleewere <ol...@gmail.com>
Authored: Thu Apr 20 13:35:25 2017 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../security/LogsearchExternalServerAuthenticationProvider.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c8de2d07/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/security/LogsearchExternalServerAuthenticationProvider.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/security/LogsearchExternalServerAuthenticationProvider.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/security/LogsearchExternalServerAuthenticationProvider.java
index e23f0a2..1dab126 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/security/LogsearchExternalServerAuthenticationProvider.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/security/LogsearchExternalServerAuthenticationProvider.java
@@ -122,8 +122,9 @@ public class LogsearchExternalServerAuthenticationProvider extends LogsearchAbst
 
     List<String> values = new ArrayList<>();
     JSONUtil.getValuesOfKey(responseJson, PrivilegeInfo.PERMISSION_NAME.toString(), values);
-    if (values.isEmpty())
-      return true;
+    if (values.isEmpty()) {
+      return false;
+    }
     
     if (allowedRoleList.length > 0 && responseJson != null) {
       for (String allowedRole : allowedRoleList) {


[47/50] [abbrv] ambari git commit: AMBARI-20326. HDP 3.0 TP - support for HBase with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metrics.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metrics.json
new file mode 100644
index 0000000..f94f510
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metrics.json
@@ -0,0 +1,4733 @@
+{
+  "HBASE_REGIONSERVER": {
+    "Component": [
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/percentFilesLocal": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheFree": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/flushQueueSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/ScanNext_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.ScanNext_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Increment_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Increment_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Append_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Append_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/ScanNext_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.ScanNext_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Append_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Append_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Increment_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Increment_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/updatesBlockedTime": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.updatesBlockedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numActiveHandler": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numActiveHandler",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numCallsInGeneralQueue": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numCallsInGeneralQueue",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numOpenConnections": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheEvictedCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowGetCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/storefileIndexSizeMB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/requests": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/storefiles": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowDeleteCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/stores": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheHitCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/percentFilesLocal": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheFree": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/flushQueueSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/ScanNext_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.ScanNext_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Increment_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Increment_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Append_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Append_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/ScanNext_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.ScanNext_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Append_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Append_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Increment_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Increment_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/updatesBlockedTime": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.updatesBlockedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numActiveHandler": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numActiveHandler",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numCallsInGeneralQueue": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numCallsInGeneralQueue",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numOpenConnections": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheEvictedCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowGetCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/storefileIndexSizeMB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/requests": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/storefiles": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowDeleteCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/stores": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheHitCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ]
+  },
+  "HBASE_MASTER": {
+    "Component": [
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/rpc/regionServerReport.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memMaxM": {
+              "metric": "Hadoop:service=HBase,name=JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalError.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunningAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/Revision": {
+              "metric": "hadoop:service=HBase,name=Info.revision",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/splitRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptorsMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatus.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/splitRegionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummariesMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumnMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClosestRowBeforeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignatureNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcSlowResponseMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/AverageLoad": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.averageLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openScannerNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumn.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getCompactionStateMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReport.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaster.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumn.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHServerInfoAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rollHLogWriterMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offlineMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offlineMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignatureMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/ServerName": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.serverName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHServerInfoMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSizeMaxTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/execCoprocessorAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatusMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offline.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/ZookeeperQuorum": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.zookeeperQuorum",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsDate": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsDate",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offlineMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offlineMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offline.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClosestRowBeforeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumnNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getLastFlushTimeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaster.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTable.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsUrl": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsUrl",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/multiMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.multiMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/revision": {
+              "metric": "hadoop:service=HBase,name=Info.revision",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumnMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumnMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptorsNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/multiNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.multiNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersion.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offline.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReportMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalErrorNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumn.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassign.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalErrorMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/existsAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.existsAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/MasterActiveTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterActiveTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummariesNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsUser": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsUser",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartupAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartupNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumnNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/version": {
+              "metric": "hadoop:service=HBase,name=Info.version",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTimeMaxTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitchNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMasterNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTimeNumOps": {
+              "metric": "hadoop:service=Master,name=MasterStatistics.splitTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalErrorAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicateLogEntriesNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/multiMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.multiMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumnMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignature.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getLastFlushTimeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReportMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/IsActiveMaster": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.isActiveMaster",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/bulkLoadHFilesMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/MasterStartTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterStartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitchMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unlockRowMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalError.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitch.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/execCoprocessorMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/putMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.putMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/flushRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/nextNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.nextNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getOnlineRegionsAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTable.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatusAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assign.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartup.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitch.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunningMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/existsNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.existsNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/compactRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/bulkLoadHFilesMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rollHLogWriterNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unlockRowAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionsNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndDeleteMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaster.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/splitRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptorsMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumn.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/moveMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.moveMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdown.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/appendNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.appendNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/appendAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.appendAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatusNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcSlowResponseNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSize_num_ops": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/MasterActiveTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterActiveTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getLastFlushTimeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndPutNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTime_avg_time": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatus.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getCompactionStateMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTimeAvgTime": {
+              "metric": "hadoop:service=Master,name=MasterStatistics.splitTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getStoreFileListMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignature.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementColumnValueNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/multiAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.multiAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTable.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdownAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummariesMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumn.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumn.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersion.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicateLogEntriesAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/cluster_requests": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.clusterRequests",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHServerInfoMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatusMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/Coprocessors": {
+              "metric": "hadoop:service=Master,name=Master.Coprocessors",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unlockRowNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatus.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartup.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementColumnValueMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumn.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/RegionsInTransition": {
+              "metric": "hadoop:service=Master,name=Master.RegionsInTransition",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/master/AssignmentManger/ritCount": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=AssignmentManger.ritCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitchAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatusMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassignMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassignMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/nextAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.nextAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rollHLogWriterMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatus.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsVersion": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassignMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassignMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcSlowResponseAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assignNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assignNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getLastFlushTimeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatusAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/mutateRowNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClosestRowBeforeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReport.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatus.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitchNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/RegionServers": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.numRegionServers",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/bulkLoadHFilesAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/compactRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTableMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openScannerMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/moveMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.moveMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndPutMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assign.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/ClusterId": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.clusterId",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartupMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatus.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumnNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSizeNumOps": {
+              "metric": "hadoop:service=Master,name=MasterStatistics.splitSizeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassignNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassignNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balance.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeRegionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignature.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitchAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/appendMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.appendMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unlockRowMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getStoreFileListMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/moveAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.moveAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/mutateRowMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptors.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getCompactionStateAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeRegionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalError.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcSlowResponseMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitchMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/lockRowMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openScannerMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getStoreFileListAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionsAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getOnlineRegionsNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumnAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/putMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.putMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicationCallQueueLen": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicationCallQueueLen",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalErrorMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/flushRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumn.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offline.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTimeMinTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTable.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/lockRowAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/lockRowMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatus.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumn.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunningNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumn.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicateLogEntriesMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionsMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balance.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicateLogEntriesMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/execCoprocessorMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTime_num_ops": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/RegionsInTransition": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.ritCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assignMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assignMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunning.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReport.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTable.aboveOneS

<TRUNCATED>

[25/50] [abbrv] ambari git commit: AMBARI-20759. The Web Client Upgrade Wizard Does Not Wait Long Enough When Creating a Large Upgrade (alexantonenko)

Posted by ao...@apache.org.
AMBARI-20759. The Web Client Upgrade Wizard Does Not Wait Long Enough When Creating a Large Upgrade (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c306a2b7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c306a2b7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c306a2b7

Branch: refs/heads/branch-3.0-perf
Commit: c306a2b7c7dab94cffaeb56224af9c0308068f29
Parents: 53dcaa8
Author: Alex Antonenko <hi...@gmail.com>
Authored: Fri Apr 14 19:17:27 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/utils/ajax/ajax.js | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c306a2b7/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index 2b5e52a..5d7108f 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -1707,6 +1707,7 @@ var urls = {
     'type': 'POST',
     'format': function (data) {
       return {
+        timeout : 600000,
         data: JSON.stringify({
           "Upgrade": {
             "repository_version": data.value,


[30/50] [abbrv] ambari git commit: AMBARI-20803. Devdeploy: Logsearch service check fails (aonishuk)

Posted by ao...@apache.org.
AMBARI-20803. Devdeploy: Logsearch service check fails (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/abe5e556
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/abe5e556
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/abe5e556

Branch: refs/heads/branch-3.0-perf
Commit: abe5e5565387e0d1e86e879ff15c7b6f7c94bc37
Parents: 2c55224
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Apr 20 16:20:45 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../common-services/LOGSEARCH/0.5.0/package/scripts/params.py      | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/abe5e556/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
index 7d4aefc..bfe0c0d 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
@@ -397,7 +397,7 @@ if 'infra-solr-env' in config['configurations'] and security_enabled and not log
 # Smoke command
 #####################################
 
-logsearch_server_hosts = default('/configurations/clusterHostInfo/logsearch_server_hosts', None)
+logsearch_server_hosts = default('/clusterHostInfo/logsearch_server_hosts', None)
 logsearch_server_host = ""
 if logsearch_server_hosts is not None and len(logsearch_server_hosts) > 0:
   logsearch_server_host = logsearch_server_hosts[0]


[41/50] [abbrv] ambari git commit: AMBARI-20678. Complete node name is not shown when node name is larger than 17 characters (pallavkul)

Posted by ao...@apache.org.
AMBARI-20678. Complete node name is not shown when node name is larger than 17 characters (pallavkul)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/978c9d7e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/978c9d7e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/978c9d7e

Branch: refs/heads/branch-3.0-perf
Commit: 978c9d7e894d6017fb5bc6f65fd8bf12d1264cd5
Parents: 4b27feb
Author: pallavkul <pa...@gmail.com>
Authored: Fri Apr 21 21:17:37 2017 +0530
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../hive20/src/main/resources/ui/app/adapters/application.js  | 2 +-
 .../views/hive20/src/main/resources/ui/app/styles/app.scss    | 7 +++++++
 .../main/resources/ui/app/utils/hive-explainer/renderer.js    | 4 ++--
 3 files changed, 10 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/978c9d7e/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js b/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js
index 42935fc..696f861 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js
@@ -57,7 +57,7 @@ export default DS.RESTAdapter.extend({
       // In development mode when the UI is served using ember serve the xhr requests are proxied to ambari server
       // by setting the proxyurl parameter in ember serve and for ambari to authenticate the requests, it needs this
       // basic authorization. This is for default admin/admin username/password combination.
-      //headers['Authorization'] = 'Basic YWRtaW46YWRtaW4=';
+      headers['Authorization'] = 'Basic YWRtaW46YWRtaW4=';
       //headers['Authorization'] = 'Basic aGl2ZTpoaXZl';
       //headers['Authorization'] = 'Basic ZGlwYXlhbjpkaXBheWFu';
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/978c9d7e/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss b/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss
index f4b63c5..ca3bc98 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss
+++ b/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss
@@ -977,3 +977,10 @@ rect.operator__box {
 .CodeMirror-scroll {
   padding-bottom: 20px;
 }
+
+.ellipsis-node {
+  white-space: nowrap;
+  overflow: hidden;
+  text-overflow: ellipsis;
+  width: 100px;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/978c9d7e/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js b/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js
index 78aa39d..987cda9 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js
@@ -159,7 +159,7 @@ function getRenderer(type, isSingleReducer) {
             <i class='fa ${getOperatorIcon(d._operator)}' aria-hidden='true'></i>
           </div>
           <div class='operator-body' style='margin-left: 10px;'>
-            <div>${getOperatorLabel(d, isSingleReducer)}</div>
+            <div class="ellipsis-node" title=${getOperatorLabel(d, isSingleReducer)}>${getOperatorLabel(d, isSingleReducer)}</div>
             ${(d['limit:'] && d['limit:'] > -1) ? '<div><span style="font-weight: lighter;">Limit:</span> ' + d['limit:'] + ' </div>' : ''}
           </div>
         </div>
@@ -175,7 +175,7 @@ function getRenderer(type, isSingleReducer) {
           <i class='fa ${getOperatorIcon(d._operator)}' aria-hidden='true'></i>
         </div>
         <div class='operator-body' style='margin-left: 10px;'>
-          <div>${getOperatorLabel(d, isSingleReducer)}</div>
+          <div class="ellipsis-node" title=${getOperatorLabel(d, isSingleReducer)}>${getOperatorLabel(d, isSingleReducer)}</div>
           ${stats}
         </div>
       </div>


[37/50] [abbrv] ambari git commit: AMBARI-20733. /var/log/krb5kdc.log is growing rapidly on the KDC server (echekanskiy)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index b871b68..81b99e6 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -132,66 +132,6 @@ class ResourcemanagerDefault(Resourcemanager):
     check_process_status(status_params.resourcemanager_pid_file)
     pass
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.resourcemanager.principal",
-                           "yarn.resourcemanager.keytab",
-                           "yarn.resourcemanager.webapp.spnego-principal",
-                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.resourcemanager.keytab",
-                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
-                                security_params['yarn-site']['yarn.resourcemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def refreshqueues(self, env):
     import params
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
index 03fff21..b1e0c16 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
@@ -83,67 +83,6 @@ class ApplicationTimelineServerDefault(ApplicationTimelineServer):
     env.set_params(status_params)
     check_process_status(status_params.yarn_historyserver_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.enabled": "true",
-                           "yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.timeline-service.principal",
-                           "yarn.timeline-service.keytab",
-                           "yarn.timeline-service.http-authentication.kerberos.principal",
-                           "yarn.timeline-service.http-authentication.kerberos.keytab"]
-
-      props_read_check = ["yarn.timeline-service.keytab",
-                          "yarn.timeline-service.http-authentication.kerberos.keytab"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                                  props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.timeline-service.keytab' not in security_params['yarn-site']
-               or 'yarn.timeline-service.principal' not in security_params['yarn-site']) \
-            or 'yarn.timeline-service.http-authentication.kerberos.keytab' not in security_params['yarn-site'] \
-            or 'yarn.timeline-service.http-authentication.kerberos.principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.yarn_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
index 8f5d380..d886244 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
@@ -120,62 +120,6 @@ class HistoryServerDefault(HistoryServer):
     env.set_params(status_params)
     check_process_status(status_params.mapred_historyserver_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      expectations = {}
-      expectations.update(build_expectations('mapred-site',
-                                             None,
-                                             [
-                                               'mapreduce.jobhistory.keytab',
-                                               'mapreduce.jobhistory.principal',
-                                               'mapreduce.jobhistory.webapp.spnego-keytab-file',
-                                               'mapreduce.jobhistory.webapp.spnego-principal'
-                                             ],
-                                             None))
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'mapred-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'mapred-site' not in security_params or
-               'mapreduce.jobhistory.keytab' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.principal' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-keytab-file' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-principal' not in security_params['mapred-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal not set."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.keytab'],
-                                security_params['mapred-site']['mapreduce.jobhistory.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.mapred_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
index 133d2e1..5acb20b 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
@@ -90,66 +90,6 @@ class NodemanagerDefault(Nodemanager):
     env.set_params(status_params)
     check_process_status(status_params.nodemanager_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.nodemanager.principal",
-                           "yarn.nodemanager.keytab",
-                           "yarn.nodemanager.webapp.spnego-principal",
-                           "yarn.nodemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.nodemanager.keytab",
-                          "yarn.nodemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.nodemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.nodemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.nodemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.nodemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.keytab'],
-                                security_params['yarn-site']['yarn.nodemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.yarn_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
index ba748f1..78675bf 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
@@ -147,66 +147,6 @@ class ResourcemanagerDefault(Resourcemanager):
     check_process_status(status_params.resourcemanager_pid_file)
     pass
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.resourcemanager.principal",
-                           "yarn.resourcemanager.keytab",
-                           "yarn.resourcemanager.webapp.spnego-principal",
-                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.resourcemanager.keytab",
-                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
-                                security_params['yarn-site']['yarn.resourcemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def refreshqueues(self, env):
     import params
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py
index c2d76be..8d9de9e 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py
@@ -111,57 +111,6 @@ class ZookeeperServerLinux(ZookeeperServer):
     import status_params
     env.set_params(status_params)
     check_process_status(status_params.zk_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      # Expect the following files to be available in params.config_dir:
-      #   zookeeper_jaas.conf
-      #   zookeeper_client_jaas.conf
-      try:
-        props_value_check = None
-        props_empty_check = ['Server/keyTab', 'Server/principal']
-        props_read_check = ['Server/keyTab']
-        zk_env_expectations = build_expectations('zookeeper_jaas', props_value_check, props_empty_check,
-                                                 props_read_check)
-
-        zk_expectations = {}
-        zk_expectations.update(zk_env_expectations)
-
-        security_params = get_params_from_filesystem(status_params.config_dir,
-                                                   {'zookeeper_jaas.conf': FILE_TYPE_JAAS_CONF})
-
-        result_issues = validate_security_config_properties(security_params, zk_expectations)
-        if not result_issues:  # If all validations passed successfully
-          # Double check the dict before calling execute
-          if ( 'zookeeper_jaas' not in security_params
-               or 'Server' not in security_params['zookeeper_jaas']
-               or 'keyTab' not in security_params['zookeeper_jaas']['Server']
-               or 'principal' not in security_params['zookeeper_jaas']['Server']):
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.zk_user,
-                                security_params['zookeeper_jaas']['Server']['keyTab'],
-                                security_params['zookeeper_jaas']['Server']['principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
index 1298f1e..b2cdaa6 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
@@ -43,27 +43,6 @@ class KerberosClient(KerberosScript):
   def status(self, env):
     raise ClientComponentHasNoStatus()
 
-  def security_status(self, env):
-    import status_params
-    if status_params.security_enabled:
-      if status_params.smoke_user and status_params.smoke_user_keytab:
-        try:
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.smoke_user,
-                                status_params.smoke_user_keytab,
-                                status_params.smoke_user_principal,
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        self.put_structured_out({"securityState": "UNKNOWN"})
-        self.put_structured_out({"securityStateErrorInfo": "Missing smoke user credentials"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def set_keytab(self, env):
     self.write_keytab_file()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
index cff0e34..2dd91c0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@ -495,7 +495,6 @@ public class HeartbeatProcessorTest {
     componentStatus1.setServiceName(HDFS);
     componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.SECURED_KERBEROS.name());
     componentStatus1.setComponentName(DATANODE);
     componentStatuses.add(componentStatus1);
     ComponentStatus componentStatus2 = new ComponentStatus();
@@ -503,7 +502,6 @@ public class HeartbeatProcessorTest {
     componentStatus2.setServiceName(HDFS);
     componentStatus2.setMessage(DummyHostStatus);
     componentStatus2.setStatus(State.STARTED.name());
-    componentStatus2.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus2.setComponentName(SECONDARY_NAMENODE);
     componentStatuses.add(componentStatus2);
     hb.setComponentStatus(componentStatuses);
@@ -526,7 +524,6 @@ public class HeartbeatProcessorTest {
     State componentState2 = serviceComponentHost2.getState();
     State componentState3 = serviceComponentHost3.getState();
     assertEquals(State.STARTED, componentState1);
-    assertEquals(SecurityState.SECURED_KERBEROS, serviceComponentHost1.getSecurityState());
     assertEquals(State.INSTALLED, componentState2);
     assertEquals(SecurityState.SECURING, serviceComponentHost2.getSecurityState());
     //starting state will not be overridden by status command
@@ -837,7 +834,6 @@ public class HeartbeatProcessorTest {
     componentStatus1.setServiceName(HDFS);
     componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus1.setComponentName(DATANODE);
 
     componentStatus1.setExtra(extra);
@@ -873,7 +869,6 @@ public class HeartbeatProcessorTest {
     componentStatus1.setServiceName(HDFS);
     componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus1.setComponentName(DATANODE);
     hb.setComponentStatus(Collections.singletonList(componentStatus1));
 
@@ -1328,7 +1323,6 @@ public class HeartbeatProcessorTest {
     componentStatus1.setServiceName(HDFS);
     componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.INSTALLED.name());
-    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus1.setComponentName(DATANODE);
     componentStatuses.add(componentStatus1);
 
@@ -1337,7 +1331,6 @@ public class HeartbeatProcessorTest {
     componentStatus2.setServiceName(HDFS);
     componentStatus2.setMessage(DummyHostStatus);
     componentStatus2.setStatus(State.INSTALLED.name());
-    componentStatus2.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus2.setComponentName(NAMENODE);
     componentStatuses.add(componentStatus2);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index 80775c3..909bf69 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -316,7 +316,6 @@ public class TestHeartbeatHandler {
     componentStatus1.setServiceName(HDFS);
     componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus1.setComponentName(DATANODE);
     componentStatuses.add(componentStatus1);
 
@@ -325,7 +324,6 @@ public class TestHeartbeatHandler {
     componentStatus2.setServiceName(HDFS);
     componentStatus2.setMessage(DummyHostStatus);
     componentStatus2.setStatus(State.INSTALLED.name());
-    componentStatus2.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus2.setComponentName(NAMENODE);
     componentStatuses.add(componentStatus2);
 
@@ -1128,14 +1126,12 @@ public class TestHeartbeatHandler {
     dataNodeStatus.setServiceName(HDFS);
     dataNodeStatus.setComponentName(DATANODE);
     dataNodeStatus.setStatus("STARTED");
-    dataNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(dataNodeStatus);
     ComponentStatus nameNodeStatus = new ComponentStatus();
     nameNodeStatus.setClusterName(cluster.getClusterName());
     nameNodeStatus.setServiceName(HDFS);
     nameNodeStatus.setComponentName(NAMENODE);
     nameNodeStatus.setStatus("STARTED");
-    nameNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(nameNodeStatus);
     hb1.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb1);
@@ -1153,14 +1149,12 @@ public class TestHeartbeatHandler {
     dataNodeStatus.setServiceName(HDFS);
     dataNodeStatus.setComponentName(DATANODE);
     dataNodeStatus.setStatus("INSTALLED");
-    dataNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(dataNodeStatus);
     nameNodeStatus = new ComponentStatus();
     nameNodeStatus.setClusterName(cluster.getClusterName());
     nameNodeStatus.setServiceName(HDFS);
     nameNodeStatus.setComponentName(NAMENODE);
     nameNodeStatus.setStatus("STARTED");
-    nameNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(nameNodeStatus);
     hb2.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb2);
@@ -1180,14 +1174,12 @@ public class TestHeartbeatHandler {
     dataNodeStatus.setServiceName(HDFS);
     dataNodeStatus.setComponentName(DATANODE);
     dataNodeStatus.setStatus("INSTALLED");
-    dataNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(dataNodeStatus);
     nameNodeStatus = new ComponentStatus();
     nameNodeStatus.setClusterName(cluster.getClusterName());
     nameNodeStatus.setServiceName(HDFS);
     nameNodeStatus.setComponentName(NAMENODE);
     nameNodeStatus.setStatus("STARTED");
-    nameNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(nameNodeStatus);
     hb2a.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb2a);
@@ -1208,14 +1200,12 @@ public class TestHeartbeatHandler {
     dataNodeStatus.setServiceName(HDFS);
     dataNodeStatus.setComponentName(DATANODE);
     dataNodeStatus.setStatus("INSTALLED");
-    dataNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(dataNodeStatus);
     nameNodeStatus = new ComponentStatus();
     nameNodeStatus.setClusterName(cluster.getClusterName());
     nameNodeStatus.setServiceName(HDFS);
     nameNodeStatus.setComponentName(NAMENODE);
     nameNodeStatus.setStatus("INSTALLED");
-    nameNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(nameNodeStatus);
     hb3.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb3);
@@ -1247,7 +1237,6 @@ public class TestHeartbeatHandler {
     dataNodeStatus.setServiceName(HDFS);
     dataNodeStatus.setComponentName(DATANODE);
     dataNodeStatus.setStatus("STARTED");
-    dataNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(dataNodeStatus);
     hb4.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb4);
@@ -1405,7 +1394,6 @@ public class TestHeartbeatHandler {
     componentStatus1.setServiceName(serviceName);
     componentStatus1.setMessage(message);
     componentStatus1.setStatus(state.name());
-    componentStatus1.setSecurityState(securityState.name());
     componentStatus1.setComponentName(componentName);
     componentStatus1.setStackVersion(stackVersion);
     return componentStatus1;
@@ -1434,7 +1422,6 @@ public class TestHeartbeatHandler {
     componentStatus1.setServiceName(HDFS);
     componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus1.setComponentName(DATANODE);
 
     componentStatuses.add(componentStatus1);

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 42bc989..fda63e0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -712,108 +712,6 @@ class TestHBaseMaster(RMFTestCase):
 
     self.assertNoMoreResources()
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'hbase-site': {
-        'hbase.master.kerberos.principal': '/path/to/hbase_keytab',
-        'hbase.master.keytab.file': 'hbase_principal'
-      }
-    }
-
-    result_issues = []
-    props_value_check = {"hbase.security.authentication": "kerberos",
-                           "hbase.security.authorization": "true"}
-    props_empty_check = ["hbase.master.keytab.file",
-                           "hbase.master.kerberos.principal"]
-
-    props_read_check = ["hbase.master.keytab.file"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hbase-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hbase-env']['hbase_user'],
-                                           security_params['hbase-site']['hbase.master.keytab.file'],
-                                           security_params['hbase-site']['hbase.master.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-     # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hbase-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {}
-    result_issues_with_params['hbase-site']="Something bad happened"
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "security_status",
-                   config_file="default.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   def test_upgrade_backup(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_upgrade.py",
                    classname = "HbaseMasterUpgrade",

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
index 9bb0dd7..93f5d19 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
@@ -530,110 +530,6 @@ class TestHbaseRegionServer(RMFTestCase):
 
     self.assertNoMoreResources()
 
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'hbase-site': {
-        'hbase.regionserver.keytab.file': '/path/to/hbase_keytab',
-        'hbase.regionserver.kerberos.principal': 'hbase_principal'
-      }
-    }
-
-    result_issues = []
-    props_value_check = {"hbase.security.authentication": "kerberos",
-                           "hbase.security.authorization": "true"}
-    props_empty_check = ["hbase.regionserver.keytab.file",
-                           "hbase.regionserver.kerberos.principal"]
-
-    props_read_check = ["hbase.regionserver.keytab.file"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hbase-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hbase-env']['hbase_user'],
-                                           security_params['hbase-site']['hbase.regionserver.keytab.file'],
-                                           security_params['hbase-site']['hbase.regionserver.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-     # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hbase-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hbase-site' : "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "security_status",
-                   config_file="default.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 2cd35ab..5702b57 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -661,114 +661,3 @@ class TestDatanode(RMFTestCase):
     self.assertEquals(
       ('hdfs dfsadmin -fs hdfs://ns1 -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010'),
       mocks_dict['checked_call'].call_args_list[0][0][0])
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      },
-      'hdfs-site': {
-        'dfs.datanode.keytab.file': 'path/to/datanode/keytab/file',
-        'dfs.datanode.kerberos.principal': 'datanode_principal'
-      }
-    }
-
-    props_value_check = None
-    props_empty_check = ['dfs.datanode.keytab.file',
-                         'dfs.datanode.kerberos.principal']
-    props_read_check = ['dfs.datanode.keytab.file']
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
-                       classname = "DataNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hdfs-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           security_params['hdfs-site']['dfs.datanode.keytab.file'],
-                                           security_params['hdfs-site']['dfs.datanode.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
-                       classname = "DataNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
-                         classname = "DataNode",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hdfs-site
-    empty_security_params = {}
-    empty_security_params['core-site'] = {}
-    empty_security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
-                       classname = "DataNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {}
-    result_issues_with_params['hdfs-site']="Something bad happened"
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
-                       classname = "DataNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
index 85098fa..fc80849 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
@@ -88,106 +88,6 @@ class Test(RMFTestCase):
 
     # for now, it's enough that <stack-selector-tool> is confirmed
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      }
-    }
-
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
-                       classname = "HdfsClient",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('core-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user_keytab'],
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user_principal_name'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
-                         classname = "HdfsClient",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
-                       classname = "HdfsClient",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hdfs-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
-                       classname = "HdfsClient",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with empty hdfs_user_principal and hdfs_user_keytab
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
-                       classname = "HdfsClient",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-
   @patch("resource_management.core.shell.call")
   def test_pre_upgrade_restart_23(self, call_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
index 4b63de4..2202661 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
@@ -369,120 +369,6 @@ class TestJournalnode(RMFTestCase):
     except:
       pass
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      },
-      'hdfs-site': {
-        'dfs.journalnode.kerberos.keytab.file': 'path/to/journalnode/keytab/file',
-        'dfs.journalnode.kerberos.principal': 'journalnode_principal'
-      }
-    }
-
-    props_value_check = None
-    props_empty_check = ['dfs.journalnode.keytab.file',
-                         'dfs.journalnode.kerberos.principal']
-    props_read_check = ['dfs.journalnode.keytab.file']
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-                       classname = "JournalNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hdfs-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           security_params['hdfs-site']['dfs.journalnode.kerberos.keytab.file'],
-                                           security_params['hdfs-site']['dfs.journalnode.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-                       classname = "JournalNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-                         classname = "JournalNode",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hdfs-site
-    empty_security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      }
-    }
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-                       classname = "JournalNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hdfs-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-                       classname = "JournalNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index a6a474a..01149fb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1277,120 +1277,6 @@ class TestNamenode(RMFTestCase):
 
     self.assertTrue(isfile_mock.called)
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      },
-      'hdfs-site': {
-        'dfs.namenode.keytab.file': 'path/to/namenode/keytab/file',
-        'dfs.namenode.kerberos.principal': 'namenode_principal'
-      }
-    }
-    props_value_check = None
-    props_empty_check = ['dfs.namenode.kerberos.internal.spnego.principal',
-                       'dfs.namenode.keytab.file',
-                       'dfs.namenode.kerberos.principal']
-    props_read_check = ['dfs.namenode.keytab.file']
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
-                       classname = "NameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hdfs-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           security_params['hdfs-site']['dfs.namenode.keytab.file'],
-                                           security_params['hdfs-site']['dfs.namenode.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
-                       classname = "NameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
-                         classname = "NameNode",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hdfs-site
-    empty_security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      }
-    }
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
-                       classname = "NameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hdfs-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
-                       classname = "NameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   @patch.object(time, "sleep")
   @patch("resource_management.libraries.functions.namenode_ha_utils.get_namenode_states")
   def test_upgrade_restart(self, get_namenode_states_mock, sleep_mock):

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
index 396778d..b8fee12 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
@@ -267,122 +267,6 @@ class TestNFSGateway(RMFTestCase):
         group = 'hadoop',
     )
 
-
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      },
-      'hdfs-site': {
-        'nfs.keytab.file': 'path/to/nfsgateway/keytab/file',
-        'nfs.kerberos.principal': 'nfs_principal'
-      }
-    }
-
-    props_value_check = None
-    props_empty_check = ['nfs.keytab.file',
-                         'nfs.kerberos.principal']
-    props_read_check = ['nfs.keytab.file']
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nfsgateway.py",
-                       classname = "NFSGateway",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hdfs-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           security_params['hdfs-site']['nfs.keytab.file'],
-                                           security_params['hdfs-site']['nfs.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nfsgateway.py",
-                       classname = "NFSGateway",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nfsgateway.py",
-                         classname = "NFSGateway",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hdfs-site
-    empty_security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      }
-    }
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nfsgateway.py",
-                       classname = "NFSGateway",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hdfs-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nfsgateway.py",
-                       classname = "NFSGateway",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    self.assertNoMoreResources()
-
   @patch("resource_management.core.shell.call")
   def test_pre_upgrade_restart(self, call_mock):
     call_mock.side_effects = [(0, None), (0, None)]

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
index 7b9dcb4..9e9366d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
@@ -274,119 +274,4 @@ class TestSNamenode(RMFTestCase):
                               mode = 0755,
                               create_parents = True,
                               cd_access='a'
-                              )
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      },
-      'hdfs-site': {
-        'dfs.secondary.namenode.keytab.file': 'path/to/snamenode/keytab/file',
-        'dfs.secondary.namenode.kerberos.principal': 'snamenode_principal'
-      }
-    }
-
-    props_value_check = None
-    props_empty_check = ['dfs.secondary.namenode.kerberos.internal.spnego.principal',
-                         'dfs.secondary.namenode.keytab.file',
-                         'dfs.secondary.namenode.kerberos.principal']
-    props_read_check = ['dfs.secondary.namenode.keytab.file']
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
-                       classname = "SNameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hdfs-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           security_params['hdfs-site']['dfs.secondary.namenode.keytab.file'],
-                                           security_params['hdfs-site']['dfs.secondary.namenode.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
-                       classname = "SNameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
-                         classname = "SNameNode",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hdfs-site
-    empty_security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      }
-    }
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
-                       classname = "SNameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hdfs-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
-                       classname = "SNameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
\ No newline at end of file
+                              )
\ No newline at end of file


[06/50] [abbrv] ambari git commit: AMBARI-19149. Code cleanup: unresolved references in javadoc

Posted by ao...@apache.org.
AMBARI-19149. Code cleanup: unresolved references in javadoc


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/893f97e4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/893f97e4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/893f97e4

Branch: refs/heads/branch-3.0-perf
Commit: 893f97e437990a154229d7545c2a6797defd714c
Parents: b95dcc3
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Wed Apr 12 15:22:06 2017 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../ambari/server/actionmanager/Stage.java      |  2 +-
 .../ambari/server/agent/HeartBeatResponse.java  |  3 --
 .../ambari/server/api/query/JpaSortBuilder.java |  2 +-
 .../RequestScheduleResourceDefinition.java      |  6 +--
 .../server/api/rest/BootStrapResource.java      |  2 +-
 .../api/services/AlertDefinitionService.java    |  3 --
 .../ambari/server/checks/ServicesUpCheck.java   |  2 -
 .../server/configuration/Configuration.java     | 52 +++-----------------
 .../internal/AbstractProviderModule.java        |  2 +-
 .../internal/UpgradeResourceProvider.java       |  5 +-
 .../VersionDefinitionResourceProvider.java      |  1 -
 .../events/AlertDefinitionDisabledEvent.java    |  2 +-
 .../EntityManagerCacheInvalidationEvent.java    |  5 --
 .../listeners/alerts/AlertReceivedListener.java |  2 +-
 .../apache/ambari/server/orm/DBAccessor.java    |  3 +-
 .../ambari/server/orm/dao/ExtensionDAO.java     |  2 +-
 .../ambari/server/orm/dao/ExtensionLinkDAO.java |  3 +-
 .../ambari/server/orm/dao/RequestDAO.java       |  4 +-
 .../orm/entities/AlertDefinitionEntity.java     |  2 +-
 .../server/orm/entities/UpgradeEntity.java      |  3 --
 .../server/orm/entities/UpgradeGroupEntity.java |  2 +-
 .../server/orm/entities/ViewURLEntity.java      |  1 -
 .../server/orm/helpers/dbms/DbmsHelper.java     |  1 -
 .../authorization/LdapServerProperties.java     |  4 +-
 .../apache/ambari/server/stack/StackModule.java |  2 +-
 .../org/apache/ambari/server/state/Cluster.java |  3 +-
 .../apache/ambari/server/state/ConfigImpl.java  |  2 +-
 .../server/state/alert/AlertDefinitionHash.java |  2 -
 .../server/state/cluster/ClusterImpl.java       |  2 +-
 .../state/configgroup/ConfigGroupFactory.java   | 15 +-----
 .../state/configgroup/ConfigGroupImpl.java      |  8 ---
 .../server/state/fsm/StateMachineFactory.java   |  4 +-
 .../state/repository/VersionDefinitionXml.java  |  1 -
 .../state/services/MetricsRetrievalService.java |  6 +--
 .../state/stack/upgrade/HostOrderGrouping.java  |  1 -
 .../server/state/stack/upgrade/TaskWrapper.java |  8 +--
 .../server/upgrade/UpgradeCatalog300.java       |  2 +-
 .../server/api/query/JpaSortBuilderTest.java    |  4 --
 .../apache/ambari/server/events/EventsTest.java |  4 --
 .../apache/ambari/server/orm/OrmTestHelper.java |  8 ---
 .../server/orm/dao/HostRoleCommandDAOTest.java  |  2 -
 .../state/alerts/AlertDefinitionHashTest.java   |  2 +-
 .../state/alerts/AlertEventPublisherTest.java   |  3 --
 .../state/alerts/InitialAlertEventTest.java     |  3 --
 .../server/state/cluster/ClusterTest.java       |  2 +-
 ...omponentHostConcurrentWriteDeadlockTest.java |  4 --
 46 files changed, 43 insertions(+), 159 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
index a46e6a2..574afa1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
@@ -811,7 +811,7 @@ public class Stage {
   /**
    * This method should be used only in stage planner. To add
    * a new execution command use
-   * {@link #addHostRoleExecutionCommand(String, org.apache.ambari.server.Role, org.apache.ambari.server.RoleCommand, org.apache.ambari.server.state.ServiceComponentHostEvent, String, String, boolean)}
+   * {@link #addHostRoleExecutionCommand(String, Role, RoleCommand, ServiceComponentHostEvent, String, String, boolean, boolean)}
    * @param origStage the stage
    * @param hostname  the hostname; {@code null} for a server-side stage
    * @param r         the role

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java
index 43c484c..0cfc68d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java
@@ -138,9 +138,6 @@ public class HeartBeatResponse {
   /**
    * Gets the alert definition commands that contain the alert definitions for
    * each cluster that the host is a member of.
-   *
-   * @param commands
-   *          the commands, or {@code null} for none.
    */
   public List<AlertDefinitionCommand> getAlertDefinitionCommands() {
     return alertDefinitionCommands;

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java
index 6d76945..dffc73b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java
@@ -56,7 +56,7 @@ public class JpaSortBuilder<T> {
    * Builds the list of sort orders based on the supplied request and JPA
    * predicate visitor.
    *
-   * @param sortRequests
+   * @param sortRequest
    *          the Ambari sort request properties to turn into a JPA sort
    *          request. If {@code null} or the {@link SortRequestProperty} list
    *          is null, an empty list is returned.

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestScheduleResourceDefinition.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestScheduleResourceDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestScheduleResourceDefinition.java
index 6de5e25..dfbefd7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestScheduleResourceDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestScheduleResourceDefinition.java
@@ -25,11 +25,7 @@ import org.apache.ambari.server.api.util.TreeNode;
 import org.apache.ambari.server.controller.spi.Resource;
 
 public class RequestScheduleResourceDefinition extends BaseResourceDefinition {
-  /**
-   * Constructor.
-   *
-   * @param resourceType resource type
-   */
+
   public RequestScheduleResourceDefinition() {
     super(Resource.Type.RequestSchedule);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java b/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java
index 4aad530..3250a3b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java
@@ -100,7 +100,7 @@ public class BootStrapResource {
   /**
    * Gets a list of bootstrapped hosts.
    *
-   * @param info  the host info, with no SSL key information
+   * @param uriInfo the host info, with no SSL key information
    */
   @GET
   @Path("/hosts")

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/api/services/AlertDefinitionService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AlertDefinitionService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AlertDefinitionService.java
index d3f88ec..fb5423b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AlertDefinitionService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AlertDefinitionService.java
@@ -99,9 +99,6 @@ public class AlertDefinitionService extends BaseService {
 
   /**
    * Create a request schedule resource instance
-   * @param clusterName
-   * @param requestScheduleId
-   * @return
    */
   private ResourceInstance createResourceInstance(String clusterName,
       Long definitionId) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesUpCheck.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesUpCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesUpCheck.java
index 05f3e91..273bdaa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesUpCheck.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesUpCheck.java
@@ -195,8 +195,6 @@ public class ServicesUpCheck extends AbstractCheckDescriptor {
    * purposes of this check. Component type, maintenance mode, and state are
    * taken into account.
    *
-   * @param clusters
-   *          the clusters instance
    * @param cluster
    *          the cluster
    * @param serviceComponent

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 6f592f0..55a4f50 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -570,7 +570,7 @@ public class Configuration {
       "security.server.key_name", "ca.key");
 
   /**
-   * The name of the keystore file, located in {@link SRVR_KSTR_DIR}.
+   * The name of the keystore file, located in {@link #SRVR_KSTR_DIR}.
    */
   @Markdown(description = "The name of the keystore file, located in `security.server.keys_dir`")
   public static final ConfigurationProperty<String> KSTR_NAME = new ConfigurationProperty<>(
@@ -588,7 +588,7 @@ public class Configuration {
 
   /**
    * The name of the truststore file ambari uses to store trusted certificates.
-   * Located in {@link SRVR_KSTR_DIR}.
+   * Located in {@link #SRVR_KSTR_DIR}.
    */
   @Markdown(description = "The name of the truststore file ambari uses to store trusted certificates. Located in `security.server.keys_dir`")
   public static final ConfigurationProperty<String> TSTR_NAME = new ConfigurationProperty<>(
@@ -1652,7 +1652,7 @@ public class Configuration {
       "ssl.trustStore.password", null);
 
   /**
-   * The type of truststore used by the {@link JAVAX_SSL_TRUSTSTORE_TYPE} property.
+   * The type of truststore used by the {@link #JAVAX_SSL_TRUSTSTORE_TYPE} property.
    */
   @Markdown(description = "The type of truststore used by the `javax.net.ssl.trustStoreType` property.")
   public static final ConfigurationProperty<String> SSL_TRUSTSTORE_TYPE = new ConfigurationProperty<>(
@@ -4411,7 +4411,7 @@ public class Configuration {
    * Caching of host role command status summary can be enabled/disabled
    * through the {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED} config property.
    * This method returns the value of {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED}
-   * config property. If this config property is not defined than returns the default defined by {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED_DEFAULT}.
+   * config property.
    * @return true if caching is to be enabled otherwise false.
    */
   public boolean getHostRoleCommandStatusSummaryCacheEnabled() {
@@ -4433,8 +4433,7 @@ public class Configuration {
    * In order to avoid the cache storing host role command status summary objects exhaust
    * memory we set a max record number allowed for the cache. This limit can be configured
    * through {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE} config property. The method returns
-   * the value of this config property. If this config property is not defined than
-   * the default value specified by {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE_DEFAULT} is returned.
+   * the value of this config property.
    * @return the upper limit for the number of cached host role command summaries.
    */
   public long getHostRoleCommandStatusSummaryCacheSize() {
@@ -4455,8 +4454,7 @@ public class Configuration {
   /**
    * As a safety measure the cache storing host role command status summaries should auto expire after a while.
    * The expiry duration is specified through the {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION} config property
-   * expressed in minutes. The method returns the value of this config property. If this config property is not defined than
-   * the default value specified by {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION_DEFAULT}
+   * expressed in minutes. The method returns the value of this config property.
    * @return the cache expiry duration in minutes
    */
   public long getHostRoleCommandStatusSummaryCacheExpiryDuration() {
@@ -4757,7 +4755,7 @@ public class Configuration {
 
   /**
    * Get property-providers' timeout value in milliseconds for waiting on the
-   * completion of submitted {@link Callable}s. This will return {@value 5000}
+   * completion of submitted {@link Callable}s. This will return 5000
    * if not specified.
    *
    * @return the property-providers' completion srevice timeout, in millis.
@@ -5007,8 +5005,6 @@ public class Configuration {
   /**
    * Gets the minimum number of connections that should always exist in the
    * connection pool.
-   *
-   * @return default of {@value #SERVER_JDBC_CONNECTION_POOL_MIN_SIZE}
    */
   public int getConnectionPoolMinimumSize() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_MIN_SIZE));
@@ -5017,8 +5013,6 @@ public class Configuration {
   /**
    * Gets the maximum number of connections that should even exist in the
    * connection pool.
-   *
-   * @return default of {@value #SERVER_JDBC_CONNECTION_POOL_MAX_SIZE}
    */
   public int getConnectionPoolMaximumSize() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_MAX_SIZE));
@@ -5028,8 +5022,6 @@ public class Configuration {
    * Gets the maximum amount of time in seconds any connection, whether its been
    * idle or active, should even be in the pool. This will terminate the
    * connection after the expiration age and force new connections to be opened.
-   *
-   * @return default of {@value #SERVER_JDBC_CONNECTION_POOL_MAX_AGE}
    */
   public int getConnectionPoolMaximumAge() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_MAX_AGE));
@@ -5039,8 +5031,6 @@ public class Configuration {
    * Gets the maximum amount of time in seconds that an idle connection can
    * remain in the pool. This should always be greater than the value returned
    * from {@link #getConnectionPoolMaximumExcessIdle()}
-   *
-   * @return default of {@value #SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME}
    */
   public int getConnectionPoolMaximumIdle() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME));
@@ -5050,9 +5040,6 @@ public class Configuration {
    * Gets the maximum amount of time in seconds that connections beyond the
    * minimum pool size should remain in the pool. This should always be less
    * than than the value returned from {@link #getConnectionPoolMaximumIdle()}
-   *
-   * @return default of
-   *         {@value #SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME_EXCESS}
    */
   public int getConnectionPoolMaximumExcessIdle() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME_EXCESS));
@@ -5062,8 +5049,6 @@ public class Configuration {
    * Gets the number of connections that should be retrieved when the pool size
    * must increase. It's wise to set this higher than 1 since the assumption is
    * that a pool that needs to grow should probably grow by more than 1.
-   *
-   * @return default of {@value #SERVER_JDBC_CONNECTION_POOL_AQUISITION_SIZE}
    */
   public int getConnectionPoolAcquisitionSize() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_AQUISITION_SIZE));
@@ -5072,9 +5057,6 @@ public class Configuration {
   /**
    * Gets the number of times connections should be retried to be acquired from
    * the database before giving up.
-   *
-   * @return default of
-   *         {@value #SERVER_JDBC_CONNECTION_POOL_ACQUISITION_RETRY_ATTEMPTS}
    */
   public int getConnectionPoolAcquisitionRetryAttempts() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_ACQUISITION_RETRY_ATTEMPTS));
@@ -5082,8 +5064,6 @@ public class Configuration {
 
   /**
    * Gets the delay in milliseconds between connection acquire attempts.
-   *
-   * @return default of {@value #DEFAULT_JDBC_POOL_ACQUISITION_RETRY_DELAY}
    */
   public int getConnectionPoolAcquisitionRetryDelay() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_ACQUISITION_RETRY_DELAY));
@@ -5093,8 +5073,6 @@ public class Configuration {
   /**
    * Gets the number of seconds in between testing each idle connection in the
    * connection pool for validity.
-   *
-   * @return default of {@value #SERVER_JDBC_CONNECTION_POOL_IDLE_TEST_INTERVAL}
    */
   public int getConnectionPoolIdleTestInternval() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_IDLE_TEST_INTERVAL));
@@ -5279,9 +5257,6 @@ public class Configuration {
   /**
    * Gets the interval at which cached alert data is written out to the
    * database, if enabled.
-   *
-   * @return the cache flush interval, or
-   *         {@value #ALERTS_CACHE_FLUSH_INTERVAL_DEFAULT} if not set.
    */
   @Experimental(feature = ExperimentalFeature.ALERT_CACHING)
   public int getAlertCacheFlushInterval() {
@@ -5290,9 +5265,6 @@ public class Configuration {
 
   /**
    * Gets the size of the alerts cache, if enabled.
-   *
-   * @return the cache flush interval, or {@value #ALERTS_CACHE_SIZE_DEFAULT} if
-   *         not set.
    */
   @Experimental(feature = ExperimentalFeature.ALERT_CACHING)
   public int getAlertCacheSize() {
@@ -5435,10 +5407,6 @@ public class Configuration {
 
   /**
    * Gets the core pool size used for the {@link MetricsRetrievalService}.
-   *
-   * @return the core pool size or
-   *         {@value #PROCESSOR_BASED_THREADPOOL_MAX_SIZE_DEFAULT} if not
-   *         specified.
    */
   public int getMetricsServiceThreadPoolCoreSize() {
     return Integer.parseInt(getProperty(METRIC_RETRIEVAL_SERVICE_THREADPOOL_CORE_SIZE));
@@ -5447,11 +5415,7 @@ public class Configuration {
   /**
    * Gets the max pool size used for the {@link MetricsRetrievalService}.
    * Threads will only be increased up to this value of the worker queue is
-   * exhauseted and rejects the new task.
-   *
-   * @return the max pool size, or
-   *         {@value PROCESSOR_BASED_THREADPOOL_MAX_SIZE_DEFAULT} if not
-   *         specified.
+   * exhausted and rejects the new task.
    * @see #getMetricsServiceWorkerQueueSize()
    */
   public int getMetricsServiceThreadPoolMaxSize() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
index 27ac03e..6c4e096 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
@@ -915,7 +915,7 @@ public abstract class AbstractProviderModule implements ProviderModule,
    * @param clusterName
    *          the cluster name
    * @param configType
-   *          the configuration type (for example {@value hdfs-site}).
+   *          the configuration type (for example <code>hdfs-site</code>).
    * @return
    */
   private String getDesiredConfigVersion(String clusterName,

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 511c8fb..b657f03 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -608,8 +608,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
   /**
    * Validates a singular API request.
    *
-   * @param requestMap
-   *          the map of properties
+   * @param upgradeContext the map of properties
    * @return the validated upgrade pack
    * @throws AmbariException
    */
@@ -1650,7 +1649,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    * command was created. For upgrades, this is problematic since the commands
    * are all created ahead of time, but the upgrade may change configs as part
    * of the upgrade pack.</li>
-   * <li>{@link #COMMAND_PARAM_REQUEST_ID}</li> the ID of the request.
+   * <li>{@link UpgradeContext#COMMAND_PARAM_REQUEST_ID}</li> the ID of the request.
    * <ul>
    *
    * @return the initialized parameter map.

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
index 7914fd9..5f12e52 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
@@ -589,7 +589,6 @@ public class VersionDefinitionResourceProvider extends AbstractAuthorizedResourc
    * @param id            the definition id
    * @param xml           the version definition xml
    * @param requestedIds  the requested ids
-   * @param fromAvailable if the resource should include the {@link #SHOW_AVAILABLE} property
    * @return the resource
    * @throws SystemException
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/events/AlertDefinitionDisabledEvent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/AlertDefinitionDisabledEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/AlertDefinitionDisabledEvent.java
index a3c6e68..b91253c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/AlertDefinitionDisabledEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/AlertDefinitionDisabledEvent.java
@@ -35,7 +35,7 @@ public class AlertDefinitionDisabledEvent extends ClusterEvent {
    *
    * @param clusterId
    *          the ID of the cluster that the definition is in.
-   * @param definition
+   * @param definitionId
    *          the alert definition being registered.
    */
   public AlertDefinitionDisabledEvent(long clusterId, long definitionId) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/events/jpa/EntityManagerCacheInvalidationEvent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/jpa/EntityManagerCacheInvalidationEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/jpa/EntityManagerCacheInvalidationEvent.java
index fe472b2..9c910f7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/jpa/EntityManagerCacheInvalidationEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/jpa/EntityManagerCacheInvalidationEvent.java
@@ -23,11 +23,6 @@ package org.apache.ambari.server.events.jpa;
  */
 public final class EntityManagerCacheInvalidationEvent extends JPAEvent {
 
-  /**
-   * Constructor.
-   *
-   * @param eventType
-   */
   public EntityManagerCacheInvalidationEvent() {
     super(JPAEventType.CACHE_INVALIDATION);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java
index 907e4d8..2faadba 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java
@@ -588,7 +588,7 @@ public class AlertReceivedListener {
    *          the definition to read any repeat tolerance overrides from.
    * @param state
    *          the state of the {@link AlertCurrentEntity}.
-   * @param the
+   * @param occurrences
    *          occurrences of the alert in the current state (used for
    *          calculation firmness when moving between non-OK states)
    * @return

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
index ae07dc0..bf8ff48 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
@@ -531,9 +531,8 @@ public interface DBAccessor {
    *          the name of the table (not {@code null}).
    * @param columnName
    *          the name of the column to retrieve type for (not {@code null}).
-   * @return the integer representation of the column type from {@link Types}.
+   * @return the integer representation of the column type
    * @throws SQLException
-   * @see {@link Types}
    */
   int getColumnType(String tableName, String columnName)
       throws SQLException;

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionDAO.java
index 6c6c3ae..4b428d1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionDAO.java
@@ -135,7 +135,7 @@ public class ExtensionDAO {
 
   /**
    * Creates or updates the specified entity. This method will check
-   * {@link ExtensionEntity#getStackId()} in order to determine whether the entity
+   * {@link ExtensionEntity#getExtensionId()} in order to determine whether the entity
    * should be created or merged.
    *
    * @param extension

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
index e860c64..3df1cd5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
@@ -209,8 +209,7 @@ public class ExtensionLinkDAO {
    * {@link ExtensionLinkEntity#getLinkId()} in order to determine whether the entity
    * should be created or merged.
    *
-   * @param extension
-   *          the link to create or update (not {@code null}).
+   * @param link the link to create or update (not {@code null}).
    */
   public void createOrUpdate(ExtensionLinkEntity link)
       throws AmbariException {

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java
index 5d53416..8f16cb2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java
@@ -223,8 +223,8 @@ public class RequestDAO implements Cleanable {
    * Retrieves from the database for a cluster, or specifically for non-cluster requests.
    * This method should be considered temporary until Request/Stage/Task cleanup is achieved.
    *
-   * @param maxResults  the max number to return
-   * @param ascOrder    {@code true} to sort by requestId ascending, {@code false} for descending
+   * @param limit the max number to return
+   * @param sortAscending {@code true} to sort by requestId ascending, {@code false} for descending
    * @param clusterId   the cluster to find, or {@code null} to search for requests without cluster
    */
   @RequiresSession

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/AlertDefinitionEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/AlertDefinitionEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/AlertDefinitionEntity.java
index 6337487..0c3d817 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/AlertDefinitionEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/AlertDefinitionEntity.java
@@ -538,7 +538,7 @@ public class AlertDefinitionEntity {
    * value from {@link #getRepeatTolerance()} should be used to calculate retry
    * tolerance.
    *
-   * @param repeatToleranceEnabled
+   * @param enabled
    *          {@code true} to override the defautlt value and use the value
    *          returned from {@link #getRepeatTolerance()}.
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java
index bea1d19..de1454b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java
@@ -176,9 +176,6 @@ public class UpgradeEntity {
     return requestId;
   }
 
-  /**
-   * @param id the request id
-   */
   public void setRequestEntity(RequestEntity requestEntity) {
     this.requestEntity = requestEntity;
     requestId = requestEntity.getRequestId();

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java
index 4830e3b..53a1925 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java
@@ -111,7 +111,7 @@ public class UpgradeGroupEntity {
   }
 
   /**
-   * @param text the item text
+   * @param title the item text
    */
   public void setTitle(String title) {
     groupTitle = title;

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewURLEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewURLEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewURLEntity.java
index 27cb8e2..7765a12 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewURLEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewURLEntity.java
@@ -80,7 +80,6 @@ public class ViewURLEntity {
 
   /**
    * Set the URL suffix
-   * @param URL suffix
      */
   public void setUrlSuffix(String urlSuffix) {
     this.urlSuffix = urlSuffix;

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
index c2778d3..d374ddc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
@@ -66,7 +66,6 @@ public interface DbmsHelper {
    *
    * @param indexName
    * @param tableName
-   * @param columnNames
    * @return
    */
   String getDropIndexStatement(String indexName, String tableName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/LdapServerProperties.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/LdapServerProperties.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/LdapServerProperties.java
index e06d8f2..4435e98 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/LdapServerProperties.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/LdapServerProperties.java
@@ -148,8 +148,8 @@ public class LdapServerProperties {
   /**
    * Returns the LDAP filter to search users by.
    * @param useAlternateUserSearchFilter if true than return LDAP filter that expects user name in
-   *                                  User Principal Name format to filter users constructed from {@value org.apache.ambari.server.configuration.Configuration#LDAP_ALT_USER_SEARCH_FILTER_KEY}.
-   *                                  Otherwise the filter is constructed from {@value org.apache.ambari.server.configuration.Configuration#LDAP_USER_SEARCH_FILTER_KEY}
+   *                                  User Principal Name format to filter users constructed from {@link org.apache.ambari.server.configuration.Configuration#LDAP_ALT_USER_SEARCH_FILTER}.
+   *                                  Otherwise the filter is constructed from {@link org.apache.ambari.server.configuration.Configuration#LDAP_USER_SEARCH_FILTER}
    * @return the LDAP filter string
    */
   public String getUserSearchFilter(boolean useAlternateUserSearchFilter) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index dfe7072..d3ad351 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -1046,7 +1046,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    * Finds an upgrade pack that:
    * <ul>
    *   <li>Is found in the $SERVICENAME/upgrades/$STACKNAME folder</li>
-   *   <li>Matches the same {@link UpgradeType#getType()}as the {@code base} upgrade pack</li>
+   *   <li>Matches the same {@link UpgradeType} as the {@code base} upgrade pack</li>
    *   <li>Has the {@link UpgradePack#getTarget()} value equals to "*"</li>
    *   <li>Has the {@link UpgradePack#getTargetStack()} value equals to "*"</li>
    * </ul>

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index c961995..49fc8c0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -695,8 +695,7 @@ public interface Cluster {
    * Gets whether there is an upgrade which has been suspended and not yet
    * finalized.
    *
-   * @return {@code true} if the last upgrade is in the
-   *         {@link UpgradeState#SUSPENDED}.
+   * @return {@code true} if the last upgrade is suspended
    */
   boolean isUpgradeSuspended();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
index 543dadd..0e40254 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
@@ -52,7 +52,7 @@ public class ConfigImpl implements Config {
   private final static Logger LOG = LoggerFactory.getLogger(ConfigImpl.class);
 
   /**
-   * A label for {@link #hostLock} to use with the {@link LockFactory}.
+   * A label for {@link #propertyLock} to use with the {@link LockFactory}.
    */
   private static final String PROPERTY_LOCK_LABEL = "configurationPropertyLock";
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
index 616bcdb..5896819 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
@@ -447,8 +447,6 @@ public class AlertDefinitionHash {
    *
    * @param clusterId
    *          the ID of the cluster.
-   * @param hosts
-   *          the hosts to push {@link AlertDefinitionCommand}s for.
    */
   public void enqueueAgentCommands(long clusterId) {
     String clusterName = null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index b7cc4cd..b86c5cd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -3151,7 +3151,7 @@ public class ClusterImpl implements Cluster {
    * should make sure the cluster global write lock is acquired.
    *
    * @param stackId
-   * @see Cluster#getClusterGlobalLock()
+   * @see #clusterGlobalLock
    */
   @Transactional
   void removeAllConfigsForStack(StackId stackId) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
index 906d948..2209dc1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
@@ -29,26 +29,13 @@ import com.google.inject.assistedinject.Assisted;
 public interface ConfigGroupFactory {
   /**
    * Creates and saves a new {@link ConfigGroup}.
-   *
-   * @param cluster
-   * @param name
-   * @param tag
-   * @param description
-   * @param configs
-   * @param hosts
-   * @param serviceName
-   * @return
    */
   ConfigGroup createNew(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
       @Assisted("tag") String tag, @Assisted("description") String description,
       @Assisted("configs") Map<String, Config> configs, @Assisted("hosts") Map<Long, Host> hosts);
 
   /**
-   * Instantiates a {@link ConfigGroup} fron an existing, persisted entity.
-   *
-   * @param cluster
-   * @param entity
-   * @return
+   * Instantiates a {@link ConfigGroup} from an existing, persisted entity.
    */
   ConfigGroup createExisting(Cluster cluster, ConfigGroupEntity entity);
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
index 89a03aa..a74e2a2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
@@ -258,7 +258,6 @@ public class ConfigGroupImpl implements ConfigGroup {
 
   /**
    * Helper method to recreate configs mapping
-   * @param configs
    */
   @Override
   public void setConfigurations(Map<String, Config> configurations) {
@@ -349,10 +348,6 @@ public class ConfigGroupImpl implements ConfigGroup {
 
   /**
    * Replaces all existing host mappings with the new collection of hosts.
-   *
-   * @param the
-   *          new hosts
-   * @throws Exception
    */
   @Transactional
   void replaceHostMappings(Map<Long, Host> hosts) {
@@ -370,9 +365,6 @@ public class ConfigGroupImpl implements ConfigGroup {
 
   /**
    * Adds the collection of hosts to the configuration group.
-   *
-   * @param hostEntity
-   * @param configGroupEntity
    */
   @Transactional
   ConfigGroupEntity persistHostMapping(Collection<Host> hosts,

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachineFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachineFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachineFactory.java
index cc57f2c..1a17a25 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachineFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachineFactory.java
@@ -273,9 +273,9 @@ final public class StateMachineFactory
 
   /**
    * Effect a transition due to the effecting stimulus.
-   * @param state current state
+   * @param oldState current state
    * @param eventType trigger to initiate the transition
-   * @param cause causal eventType context
+   * @param event causal eventType context
    * @return transitioned state
    */
   private STATE doTransition

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
index 69399d6..15693a2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
@@ -353,7 +353,6 @@ public class VersionDefinitionXml {
 
   /**
    * Builds a Version Definition that is the default for the stack
-   * @param stack
    * @return the version definition
    */
   public static VersionDefinitionXml build(StackInfo stackInfo) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java b/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java
index 59ec15b..ec6b074 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java
@@ -281,7 +281,7 @@ public class MetricsRetrievalService extends AbstractService {
    * @param streamProvider
    *          the {@link StreamProvider} to use to read from the remote
    *          endpoint.
-   * @param jmxUrl
+   * @param url
    *          the URL to read from
    *
    * @see #getCachedJMXMetric(String)
@@ -399,7 +399,7 @@ public class MetricsRetrievalService extends AbstractService {
      *          the URLs which are currently waiting to be processed. This
      *          method will remove the specified URL from this {@link Set} when
      *          it completes (successful or not).
-     * @param m_ttlUrlCache
+     * @param ttlUrlCache
      *          an evicting cache which is used to determine if a request for a
      *          metric is too soon after the last request, or {@code null} if
      *          requests can be made sequentially without any separation.
@@ -501,7 +501,7 @@ public class MetricsRetrievalService extends AbstractService {
      *
      * @param throwable
      * @param url
-     * @return the key, such as {@value IOException-http://www.server.com/jmx}.
+     * @return the key, such as <code>IOException-http://www.server.com/jmx</code>.
      */
     private String buildCacheKey(Throwable throwable, String url) {
       if (null == throwable || null == url) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java
index 97864a6..d19406e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java
@@ -287,7 +287,6 @@ public class HostOrderGrouping extends Grouping {
 
     /**
      * @param upgradeContext  the context
-     * @param hosts           the list of hostnames
      * @return  the wrappers for a host
      */
     private List<StageWrapper> buildServiceChecks(UpgradeContext upgradeContext, List<String> serviceChecks) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
index dfa6159..25b8a93 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
@@ -42,10 +42,10 @@ public class TaskWrapper {
   private Set<String> timeoutKeys = new HashSet<>();
 
   /**
-   * @param s the service name for the tasks
-   * @param c the component name for the tasks
-   * @param hosts the set of hosts that the tasks are for
-   * @param tasks an array of tasks as a convenience
+   * @param s the service name for the task
+   * @param c the component name for the task
+   * @param hosts the set of hosts that the task is for
+   * @param task a single task
    */
   public TaskWrapper(String s, String c, Set<String> hosts, Task task) {
     this(s, c, hosts, null, task);

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
index 8a4afe9..633d837 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
@@ -208,7 +208,7 @@ public class UpgradeCatalog300 extends AbstractUpgradeCatalog {
    * <ul>
    * <li>Adds the {@link #CLUSTER_CONFIG_SELECTED_COLUMN} to
    * {@link #CLUSTER_CONFIG_TABLE}.
-   * <li>Adds the {@link #CLUSTER_CONFIG_SELECTED_TIMESTAMP} to
+   * <li>Adds the {@link #CLUSTER_CONFIG_SELECTED_TIMESTAMP_COLUMN} to
    * {@link #CLUSTER_CONFIG_TABLE}.
    * </ul>
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java
index 394de9f..ec60966 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java
@@ -126,10 +126,6 @@ public class JpaSortBuilderTest {
     Assert.assertEquals(1, roots.size());
   }
 
-  /**
-   * The {@link HistoryPredicateVisitor} is used to convert an Ambari
-   * {@link Predicate} into a JPA {@link javax.persistence.criteria.Predicate}.
-   */
   private final class MockAlertHistoryredicateVisitor
       extends JpaPredicateVisitor<AlertHistoryEntity> {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
index 98510db..6fb0028 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
@@ -376,10 +376,6 @@ public class EventsTest {
     Assert.assertEquals(AmbariEventType.CLUSTER_RENAME, ambariEvents.get(0).getType());
   }
 
-  /**
-   * Calls {@link Service#persist()} to mock a service install along with
-   * creating a single {@link Host} and {@link ServiceComponentHost}.
-   */
   private void installHdfsService() throws Exception {
     String serviceName = "HDFS";
     Service service = m_serviceFactory.createNew(m_cluster, serviceName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
index 8e50b5f..fdc19d1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
@@ -431,10 +431,6 @@ public class OrmTestHelper {
     serviceComponentHost.setDesiredState(State.INSTALLED);
   }
 
-  /**
-   * Calls {@link Service#persist()} to mock a service install along with
-   * creating a single {@link Host} and {@link ServiceComponentHost}.
-   */
   public void installHdfsService(Cluster cluster,
       ServiceFactory serviceFactory, ServiceComponentFactory componentFactory,
       ServiceComponentHostFactory schFactory, String hostName) throws Exception {
@@ -469,10 +465,6 @@ public class OrmTestHelper {
     sch.setStackVersion(new StackId("HDP-2.0.6"));
   }
 
-  /**
-   * Calls {@link Service#persist()} to mock a service install along with
-   * creating a single {@link Host} and {@link ServiceComponentHost}.
-   */
   public void installYarnService(Cluster cluster,
       ServiceFactory serviceFactory, ServiceComponentFactory componentFactory,
       ServiceComponentHostFactory schFactory, String hostName) throws Exception {

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java
index d5fa793..f6f3269 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java
@@ -175,8 +175,6 @@ public class HostRoleCommandDAOTest {
    * @param hostEntity
    * @param requestEntity
    * @param status
-   * @param skipStage
-   * @param supportsAutoSkipOnFailure
    * @return
    */
   private void createStage(long startStageId, int count, HostEntity hostEntity,

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
index 62fc19e..838cd6b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
@@ -275,7 +275,7 @@ public class AlertDefinitionHashTest extends TestCase {
   }
 
   /**
-   * Test {@link AlertDefinitionHash#isHashCached(String)}.
+   * Test {@link AlertDefinitionHash#isHashCached(String,String)}.
    */
   @Test
   public void testIsHashCached() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
index e50dba5..8c23b69 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
@@ -300,9 +300,6 @@ public class AlertEventPublisherTest {
         source.getAlertName()));
   }
 
-  /**
-   * Calls {@link Service#persist()} to mock a service install.
-   */
   private void installHdfsService() throws Exception {
     String serviceName = "HDFS";
     Service service = serviceFactory.createNew(cluster, serviceName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
index 483aac5..890464d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
@@ -173,9 +173,6 @@ public class InitialAlertEventTest {
         m_listener.getAlertEventReceivedCount(InitialAlertEvent.class));
   }
 
-  /**
-   * Calls {@link Service#persist()} to mock a service install.
-   */
   private void installHdfsService() throws Exception {
     String serviceName = "HDFS";
     Service service = m_serviceFactory.createNew(m_cluster, serviceName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 345c463..76689f4 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -1638,7 +1638,7 @@ public class ClusterTest {
    * Tests that hosts can be correctly transitioned into the "INSTALLING" state.
    * This method also tests that hosts in MM will not be transitioned, as per
    * the contract of
-   * {@link Cluster#transitionHostsToInstalling(ClusterVersionEntity)}.
+   * {@link Cluster#transitionHosts(ClusterVersionEntity, RepositoryVersionState)}.
    *
    * @throws Exception
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/893f97e4/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
index e13dd70..8f37ad7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
@@ -181,10 +181,6 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
   private static final class ServiceComponentHostDeadlockWriter extends Thread {
     private List<ServiceComponentHost> serviceComponentHosts;
 
-    /**
-     * @param nameNodeSCH
-     *          the nameNodeSCH to set
-     */
     public void setServiceComponentHosts(List<ServiceComponentHost> serviceComponentHosts) {
       this.serviceComponentHosts = serviceComponentHosts;
     }


[02/50] [abbrv] ambari git commit: AMBARI-18865. Kafka still showing log.cleanup.interval.mins (Bharat Viswanadham via alejandro)

Posted by ao...@apache.org.
AMBARI-18865. Kafka still showing log.cleanup.interval.mins (Bharat Viswanadham via alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/422c30f0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/422c30f0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/422c30f0

Branch: refs/heads/branch-3.0-perf
Commit: 422c30f0ed6c2448e929b72a6d4e7a89afa32fe9
Parents: 1f4a72f
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Mon Apr 17 10:51:40 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../KAFKA/0.8.1/configuration/kafka-broker.xml              | 9 +++++++++
 1 file changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/422c30f0/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
index e270b84..f2b4a76 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
@@ -157,6 +157,15 @@
     <value>10</value>
     <description>The frequency in minutes that the log cleaner checks whether any log segment is eligible for deletion to meet the retention policies.
     </description>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>log.retention.check.interval.ms</name>
+    <value>600000</value>
+    <description>
+      The frequency in milliseconds that the log cleaner checks whether any log segment is eligible for deletion to meet the retention policies.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>


[10/50] [abbrv] ambari git commit: AMBARI-20696: Skip calling stack selector, conf selector tools for Nifi, Streamline, Registry custom services in HDP cluster (Madhuvanthi Radhakrishnan via jluniya)

Posted by ao...@apache.org.
AMBARI-20696: Skip calling stack selector, conf selector tools for Nifi, Streamline, Registry custom services in HDP cluster (Madhuvanthi Radhakrishnan via jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ebb24be9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ebb24be9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ebb24be9

Branch: refs/heads/branch-3.0-perf
Commit: ebb24be9ab15fa36b4c81865afd3b188eea2911d
Parents: a6bba69
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Wed Apr 19 09:06:33 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../libraries/functions/version_select_util.py  | 40 ++++++++++++++++++++
 1 file changed, 40 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ebb24be9/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
index ff00a1f..79dc874 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
@@ -78,6 +78,46 @@ def get_component_version(stack_name, component_name):
 
   return version
 
+def get_component_version_with_stack_selector(stack_selector_path, component_name):
+  """
+   For specific cases where we deal with HDP add on services from a management pack, the version
+   needs to be determined by using the specific stack selector itself.
+   :param stack_selector_path: /usr/bin/hdf-select
+   Comes from the service which calls for this function.
+   :param component_name: Component name as a string necessary to get the version
+   :return: Returns a string if found, e.g., 2.2.1.0-2175, otherwise, returns None
+   This function can be called by custom services, hence should not be removed
+  """
+  version = None
+  out = None
+  code = -1
+  if not stack_selector_path:
+    Logger.error("Stack selector path not provided")
+  elif not os.path.exists(stack_selector_path):
+    Logger.error("Stack selector path does not exist")
+  elif not component_name:
+    Logger.error("Component name not provided")
+  else:
+    tmpfile = tempfile.NamedTemporaryFile()
+
+    get_stack_comp_version_cmd = ""
+    try:
+      # This is necessary because Ubuntu returns "stdin: is not a tty", see AMBARI-8088
+      with open(tmpfile.name, 'r') as file:
+        get_stack_comp_version_cmd = '{0} status {1} > {2}' .format(stack_selector_path, component_name, tmpfile.name)
+        code, stdoutdata = shell.call(get_stack_comp_version_cmd, quiet=True)
+        out = file.read()
+
+      if code != 0 or out is None:
+        raise Exception("Code is nonzero or output is empty")
+
+      Logger.debug("Command: %s\nOutput: %s" % (get_stack_comp_version_cmd, str(out)))
+      matches = re.findall(r"([\d\.]+\-\d+)", out)
+      version = matches[0] if matches and len(matches) > 0 else None
+    except Exception, e:
+      Logger.error("Could not determine stack version for component %s by calling '%s'. Return Code: %s, Output: %s." %
+                   (component_name, get_stack_comp_version_cmd, str(code), str(out)))
+  return version
 
 def get_versions_from_stack_root(stack_root):
   """


[36/50] [abbrv] ambari git commit: AMBARI-20733. /var/log/krb5kdc.log is growing rapidly on the KDC server (echekanskiy)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
index e952108..127a045 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
@@ -381,104 +381,4 @@ class TestZkfc(RMFTestCase):
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
     )
-    self.assertNoMoreResources()
-
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-
-    # Test that function works when is called with correct parameters
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      }
-    }
-
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
-                       classname = "ZkfcSlave",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('core-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user_keytab'],
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user_principal_name'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-        self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
-                           classname = "ZkfcSlave",
-                           command = "security_status",
-                           config_file="secured.json",
-                           stack_version = self.STACK_VERSION,
-                           target = RMFTestCase.TARGET_COMMON_SERVICES
-        )
-    except:
-      self.assertTrue(True)
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
-                       classname = "ZkfcSlave",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hdfs-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
-                       classname = "ZkfcSlave",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with empty hdfs_user_principal and hdfs_user_keytab
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
-                       classname = "ZkfcSlave",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
\ No newline at end of file
+    self.assertNoMoreResources()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index 6592590..ae2ec86 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -853,118 +853,6 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
 
     self.assertNoMoreResources()
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'hive-site': {
-        "hive.server2.authentication": "KERBEROS",
-        "hive.metastore.sasl.enabled": "true",
-        "hive.security.authorization.enabled": "true",
-        "hive.server2.authentication.kerberos.keytab": "path/to/keytab",
-        "hive.server2.authentication.kerberos.principal": "principal",
-        "hive.server2.authentication.spnego.keytab": "path/to/spnego_keytab",
-        "hive.server2.authentication.spnego.principal": "spnego_principal"
-      }
-    }
-    result_issues = []
-    props_value_check = {"hive.server2.authentication": "KERBEROS",
-                         "hive.metastore.sasl.enabled": "true",
-                         "hive.security.authorization.enabled": "true"}
-    props_empty_check = ["hive.server2.authentication.kerberos.keytab",
-                         "hive.server2.authentication.kerberos.principal",
-                         "hive.server2.authentication.spnego.principal",
-                         "hive.server2.authentication.spnego.keytab"]
-
-    props_read_check = ["hive.server2.authentication.kerberos.keytab",
-                        "hive.server2.authentication.spnego.keytab"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    get_params_mock.assert_called_with('/usr/hdp/current/hive-server2/conf', {'hive-site.xml': "XML"})
-    build_exp_mock.assert_called_with('hive-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['hive-env']['hive_user'],
-                                                  security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
-                                                  security_params['hive-site']['hive.server2.authentication.spnego.principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
-                         classname = "HiveServer",
-                         command = "security_status",
-                         config_file="../../2.1/configs/secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains startup
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {}
-    result_issues_with_params['hive-site']="Something bad happened"
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   def test_pre_upgrade_restart(self, copy_to_hdfs_mock):
     copy_to_hdfs_mock.return_value = True

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
index 718ad4d..03dd391 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
@@ -276,122 +276,6 @@ class TestWebHCatServer(RMFTestCase):
                               mode = 0644,
                               )
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'webhcat-site': {
-        "templeton.kerberos.secret": "secret",
-        "templeton.kerberos.keytab": 'path/to/keytab',
-        "templeton.kerberos.principal": "principal"
-      },
-      "hive-site": {
-        "hive.server2.authentication": "KERBEROS",
-        "hive.metastore.sasl.enabled": "true",
-        "hive.security.authorization.enabled": "true"
-      }
-    }
-    result_issues = []
-    webhcat_props_value_check = {"templeton.kerberos.secret": "secret"}
-    webhcat_props_empty_check = ["templeton.kerberos.keytab",
-                         "templeton.kerberos.principal"]
-    webhcat_props_read_check = ["templeton.kerberos.keytab"]
-
-    hive_props_value_check = {"hive.server2.authentication": "KERBEROS",
-                         "hive.metastore.sasl.enabled": "true",
-                         "hive.security.authorization.enabled": "true"}
-    hive_props_empty_check = None
-    hive_props_read_check = None
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hive-site', hive_props_value_check, hive_props_empty_check, hive_props_read_check)
-    # get_params_mock.assert_called_with(status_params.hive_conf_dir, {'hive-site.xml': "XML"})
-    get_params_mock.assert_called_with('/usr/hdp/current/hive-webhcat/conf', {'webhcat-site.xml': "XML"})
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['hive-env']['webhcat_user'],
-                                                  security_params['webhcat-site']['templeton.kerberos.keytab'],
-                                                  security_params['webhcat-site']['templeton.kerberos.principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
-                         classname = "WebHCatServer",
-                         command = "security_status",
-                         config_file="../../2.1/configs/secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains startup
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hive-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index f5bd4aa..426c36a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -1150,119 +1150,6 @@ class TestOozieServer(RMFTestCase):
     )
     self.assert_configure_default()
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-    security_params = {
-      "oozie-site": {
-        "oozie.authentication.type": "kerberos",
-        "oozie.service.AuthorizationService.security.enabled": "true",
-        "oozie.service.HadoopAccessorService.kerberos.enabled": "true",
-        "local.realm": "EXAMPLE.COM",
-        "oozie.authentication.kerberos.principal": "principal",
-        "oozie.authentication.kerberos.keytab": "/path/to_keytab",
-        "oozie.service.HadoopAccessorService.kerberos.principal": "principal",
-        "oozie.service.HadoopAccessorService.keytab.file": "/path/to_keytab"}
-    }
-
-    result_issues = []
-    props_value_check = {"oozie.authentication.type": "kerberos",
-                         "oozie.service.AuthorizationService.security.enabled": "true",
-                         "oozie.service.HadoopAccessorService.kerberos.enabled": "true"}
-    props_empty_check = [ "local.realm",
-                          "oozie.authentication.kerberos.principal",
-                          "oozie.authentication.kerberos.keytab",
-                          "oozie.service.HadoopAccessorService.kerberos.principal",
-                          "oozie.service.HadoopAccessorService.keytab.file"]
-    props_read_check = None
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
-                       classname = "OozieServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    get_params_mock.assert_called_with("/etc/oozie/conf", {'oozie-site.xml': 'XML'})
-    build_exp_mock.assert_called_with('oozie-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['oozie-env']['oozie_user'],
-                                                  security_params['oozie-site']['oozie.service.HadoopAccessorService.keytab.file'],
-                                                  security_params['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
-                         classname = "OozieServer",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains oozie-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
-                       classname = "OozieServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'oozie-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
-                       classname = "OozieServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
-                       classname = "OozieServer",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-
   @patch("os.path.isdir")
   @patch("os.path.exists")
   @patch("os.path.isfile")

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index fc7fcd8..53218d5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -741,112 +741,6 @@ class TestHistoryServer(RMFTestCase):
                               group = 'hadoop',
                               )
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      "mapred-site": {
-        'mapreduce.jobhistory.keytab': "/path/to/keytab1",
-        'mapreduce.jobhistory.principal': "principal1",
-        'mapreduce.jobhistory.webapp.spnego-keytab-file': "/path/to/keytab2",
-        'mapreduce.jobhistory.webapp.spnego-principal': "principal2"
-      }
-    }
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
-                       classname="HistoryServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    get_params_mock.assert_called_with("/etc/hadoop/conf", {'mapred-site.xml': 'XML'})
-    build_exp_mock.assert_called_with('mapred-site',
-                                      None,
-                                      [
-                                        'mapreduce.jobhistory.keytab',
-                                        'mapreduce.jobhistory.principal',
-                                        'mapreduce.jobhistory.webapp.spnego-keytab-file',
-                                        'mapreduce.jobhistory.webapp.spnego-principal'
-                                        ],
-                                      None)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['mapred-env']['mapred_user'],
-                                                  security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
-                                                  security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
-                         classname="HistoryServer",
-                         command="security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contain mapred-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
-                       classname="HistoryServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal not set."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {'mapred-site': "Something bad happened"}
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
-                       classname="HistoryServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
-                       classname="HistoryServer",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   def assert_call_to_get_hadoop_conf_dir(self):
     # From call to conf_select.get_hadoop_conf_dir()
     self.assertResourceCalled("Execute", ("cp", "-R", "-p", "/etc/hadoop/conf", "/etc/hadoop/conf.backup"),

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
index fbc1b85..6fc5bae 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
@@ -632,115 +632,6 @@ class TestNodeManager(RMFTestCase):
       self.assertTrue(mocks_dict['call'].called)
       self.assertEqual(mocks_dict['call'].call_count,1)
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'yarn-site': {
-        'yarn.nodemanager.keytab': 'path/to/nodemanager/keytab',
-        'yarn.nodemanager.principal': 'nodemanager_principal',
-        'yarn.nodemanager.webapp.spnego-keytab-file': 'path/to/nodemanager/webapp/keytab',
-        'yarn.nodemanager.webapp.spnego-principal': 'nodemanager_webapp_principal'
-      }
-    }
-    result_issues = []
-    props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                         "yarn.acl.enable": "true"}
-    props_empty_check = ["yarn.nodemanager.principal",
-                         "yarn.nodemanager.keytab",
-                         "yarn.nodemanager.webapp.spnego-principal",
-                         "yarn.nodemanager.webapp.spnego-keytab-file"]
-
-    props_read_check = ["yarn.nodemanager.keytab",
-                        "yarn.nodemanager.webapp.spnego-keytab-file"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
-                       classname="Nodemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('yarn-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['yarn-env']['yarn_user'],
-                                                  security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
-                                                  security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-          self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
-                       classname="Nodemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-          )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains yarn-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
-                       classname="Nodemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'yarn-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
-                       classname="Nodemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
-                       classname="Nodemanager",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-  
   @patch.object(resource_management.libraries.functions, "get_stack_version", new = MagicMock(return_value='2.3.0.0-1234'))
   def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
index a63caeb..ed5ee2c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
@@ -525,114 +525,6 @@ class TestResourceManager(RMFTestCase):
                               group = 'hadoop',
                               )
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'yarn-site': {
-        'yarn.resourcemanager.keytab': '/path/to/resourcemanager/keytab',
-        'yarn.resourcemanager.principal': 'nresourcemanager_principal',
-        'yarn.resourcemanager.webapp.spnego-keytab-file': 'path/to/resourcemanager/webapp/keytab',
-        'yarn.resourcemanager.webapp.spnego-principal': 'resourcemanager_webapp_principal'
-      }
-    }
-    result_issues = []
-    props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                         "yarn.acl.enable": "true"}
-    props_empty_check = ["yarn.resourcemanager.principal",
-                         "yarn.resourcemanager.keytab",
-                         "yarn.resourcemanager.webapp.spnego-principal",
-                         "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-    props_read_check = ["yarn.resourcemanager.keytab",
-                        "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
-                       classname="Resourcemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('yarn-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['yarn-env']['yarn_user'],
-                                                  security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
-                                                  security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-          self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
-                       classname="Resourcemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-          )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains yarn-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
-                       classname="Resourcemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'yarn-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
-                       classname="Resourcemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
-                       classname="Resourcemanager",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
index b9ebea2..6d38a67 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
@@ -247,109 +247,6 @@ class TestZookeeperServer(RMFTestCase):
       group = 'hadoop',
     )
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'zookeeper_jaas': {
-        'Server': {
-          'keyTab': 'path/to/zookeeper/service/keytab',
-          'principal': 'zookeeper_keytab'
-        }
-      }
-    }
-    result_issues = []
-    props_value_check = None
-    props_empty_check = ['Server/keyTab', 'Server/principal']
-    props_read_check = ['Server/keyTab']
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
-                       classname = "ZookeeperServer",
-                       command = "security_status",
-                       config_file = "secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('zookeeper_jaas', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['zookeeper-env']['zk_user'],
-                                                  security_params['zookeeper_jaas']['Server']['keyTab'],
-                                                  security_params['zookeeper_jaas']['Server']['principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
-                       classname = "ZookeeperServer",
-                       command = "security_status",
-                       config_file = "secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains zookeeper_jaas
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
-                       classname = "ZookeeperServer",
-                       command = "security_status",
-                       config_file = "secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'zookeeper_jaas': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
-                       classname = "ZookeeperServer",
-                       command = "security_status",
-                       config_file = "secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
-                       classname = "ZookeeperServer",
-                       command = "security_status",
-                       config_file = "default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
index 5f88b6b..b9747a2 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
@@ -91,30 +91,6 @@ class TestFalconClient(RMFTestCase):
                           )
     self.assertNoMoreResources()
 
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock):
-    # Test that function works when is called with correct parameters
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_client.py",
-                       classname="FalconClient",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_client.py",
-                       classname="FalconClient",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
index feba0c4..44da365 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
@@ -414,115 +414,6 @@ class TestFalconServer(RMFTestCase):
     )
     self.assertNoMoreResources()
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'startup': {
-        '*.falcon.service.authentication.kerberos.keytab': 'path/to/falcon/service/keytab',
-        '*.falcon.service.authentication.kerberos.principal': 'falcon_service_keytab',
-        '*.falcon.http.authentication.kerberos.keytab': 'path/to/falcon/http/keytab',
-        '*.falcon.http.authentication.kerberos.principal': 'falcon_http_principal'
-      }
-    }
-    result_issues = []
-    props_value_check = {"*.falcon.authentication.type": "kerberos",
-                           "*.falcon.http.authentication.type": "kerberos"}
-    props_empty_check = ["*.falcon.service.authentication.kerberos.principal",
-                           "*.falcon.service.authentication.kerberos.keytab",
-                           "*.falcon.http.authentication.kerberos.principal",
-                           "*.falcon.http.authentication.kerberos.keytab"]
-
-    props_read_check = ["*.falcon.service.authentication.kerberos.keytab",
-                          "*.falcon.http.authentication.kerberos.keytab"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
-                       classname="FalconServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    get_params_mock.assert_called_with('/etc/falcon/conf', {'startup.properties': 'PROPERTIES'})
-    build_exp_mock.assert_called_with('startup', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['falcon-env']['falcon_user'],
-                                                  security_params['startup']['*.falcon.http.authentication.kerberos.keytab'],
-                                                  security_params['startup']['*.falcon.http.authentication.kerberos.principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
-                       classname="FalconServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains startup
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
-                       classname="FalconServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'startup': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
-                       classname="FalconServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
-                       classname="FalconServer",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   @patch('os.path.isfile', new=MagicMock(return_value=True))
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/falcon-upgrade.json"

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
index a159cd0..94f639a 100644
--- a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
@@ -406,119 +406,6 @@ class TestHiveMetastore(RMFTestCase):
                               mode = 0755,
                               )
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'hive-site': {
-        'hive.server2.authentication': "KERBEROS",
-        'hive.metastore.sasl.enabled': "true",
-        'hive.security.authorization.enabled': 'true',
-        'hive.metastore.kerberos.keytab.file': 'path/to/keytab',
-        'hive.metastore.kerberos.principal': 'principal'
-      }
-    }
-    result_issues = []
-    props_value_check = {
-      'hive.server2.authentication': "KERBEROS",
-      'hive.metastore.sasl.enabled': "true",
-      'hive.security.authorization.enabled': 'true'
-    }
-    props_empty_check = [
-      'hive.metastore.kerberos.keytab.file',
-      'hive.metastore.kerberos.principal'
-    ]
-    props_read_check = [
-      'hive.metastore.kerberos.keytab.file'
-    ]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    get_params_mock.assert_called_with("/usr/hdp/current/hive-server2/conf", {'hive-site.xml': "XML"})
-    build_exp_mock.assert_called_with('hive-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['hive-env']['hive_user'],
-                                                  security_params['hive-site']['hive.metastore.kerberos.keytab.file'],
-                                                  security_params['hive-site']['hive.metastore.kerberos.principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
-                         classname = "HiveMetastore",
-                         command = "security_status",
-                         config_file="../../2.1/configs/secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains startup
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hive-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "security_status",
-                       config_file="../../2.1/configs/default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
index 3e10611..f208d3a 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
@@ -176,107 +176,3 @@ class TestStormDrpcServer(TestStormBase):
     self.assertEquals(
       ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
-
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'storm_jaas': {
-        'StormServer': {
-          'keyTab': 'path/to/storm/service/keytab',
-          'principal': 'storm_keytab'
-        }
-      }
-    }
-    result_issues = []
-
-    props_value_check = None
-    props_empty_check = ['StormServer/keyTab', 'StormServer/principal']
-    props_read_check = ['StormServer/keyTab']
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/drpc_server.py",
-                       classname = "DrpcServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('storm_jaas', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['storm-env']['storm_user'],
-                                                  security_params['storm_jaas']['StormServer']['keyTab'],
-                                                  security_params['storm_jaas']['StormServer']['principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/drpc_server.py",
-                        classname = "DrpcServer",
-                        command = "security_status",
-                        config_file="secured.json",
-                        stack_version = self.STACK_VERSION,
-                        target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains storm_jaas
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/drpc_server.py",
-                       classname = "DrpcServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'storm_jaas': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/drpc_server.py",
-                       classname = "DrpcServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/drpc_server.py",
-                       classname = "DrpcServer",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
index 40972f9..35f057c 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
@@ -261,106 +261,3 @@ class TestStormNimbus(TestStormBase):
       ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
     self.assertNoMoreResources()
-    
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'storm_jaas': {
-        'StormServer': {
-          'keyTab': 'path/to/storm/service/keytab',
-          'principal': 'storm_keytab'
-        }
-      }
-    }
-    result_issues = []
-
-    props_value_check = None
-    props_empty_check = ['StormServer/keyTab', 'StormServer/principal']
-    props_read_check = ['StormServer/keyTab']
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
-                       classname = "Nimbus",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('storm_jaas', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['storm-env']['storm_user'],
-                                                  security_params['storm_jaas']['StormServer']['keyTab'],
-                                                  security_params['storm_jaas']['StormServer']['principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
-                       classname = "Nimbus",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains storm_jaas
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
-                       classname = "Nimbus",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'storm_jaas': "Something bad happened"
-    }
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
-                       classname = "Nimbus",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
-                       classname = "Nimbus",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
index d6497ed..3ac38c7 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
@@ -246,85 +246,3 @@ class TestStormUiServer(TestStormBase):
     self.assertEquals(
       ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
-
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-    result_issues = []
-
-    security_params = {
-      'storm_ui': {
-        'storm_ui_principal_name': 'HTTP/_HOST',
-        'storm_ui_keytab': '/etc/security/keytabs/spnego.service.keytab'
-      }
-    }
-    props_value_check = None
-    props_empty_check = ['storm_ui_principal_name', 'storm_ui_keytab']
-    props_read_check = ['storm_ui_keytab']
-
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ui_server.py",
-                       classname = "UiServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('storm_ui', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['storm-env']['storm_user'],
-                                                  security_params['storm_ui']['storm_ui_keytab'],
-                                                  security_params['storm_ui']['storm_ui_principal_name'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ui_server.py",
-                        classname = "UiServer",
-                        command = "security_status",
-                        config_file="secured.json",
-                        stack_version = self.STACK_VERSION,
-                        target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {}
-    result_issues_with_params['storm_ui']="Something bad happened"
-
-    validate_security_config_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ui_server.py",
-                       classname = "UiServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ui_server.py",
-                       classname = "UiServer",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-


[40/50] [abbrv] ambari git commit: AMBARI-20802. StackAdvisor should not recommend configuration types for services that are not present in the cluster.(vbrodetskyi)

Posted by ao...@apache.org.
AMBARI-20802. StackAdvisor should not recommend configuration types for services that are not present in the cluster.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4b27feb7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4b27feb7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4b27feb7

Branch: refs/heads/branch-3.0-perf
Commit: 4b27feb7fdc3adcefed6c89dd5ba9c357c5b6944
Parents: 2a00812
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Fri Apr 21 18:15:30 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../ambari/server/topology/AmbariContext.java   |  3 +
 .../HIVE/0.12.0.2.0/configuration/hive-env.xml  |  1 +
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     | 25 +++++---
 .../stacks/HDP/2.1/services/stack_advisor.py    | 11 ++--
 .../stacks/HDP/2.2/services/stack_advisor.py    | 61 ++++++++++----------
 .../stacks/2.1/HIVE/test_hive_metastore.py      |  7 +++
 .../stacks/2.1/common/test_stack_advisor.py     |  7 ++-
 .../stacks/2.6/common/test_stack_advisor.py     |  6 +-
 8 files changed, 76 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4b27feb7/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 6d12402..5e0d707 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -427,6 +427,9 @@ public class AmbariContext {
       for (String actualConfigType : updatedConfigTypes) {
         // get the actual cluster config for comparison
         DesiredConfig actualConfig = cluster.getDesiredConfigs().get(actualConfigType);
+        if (actualConfig == null && actualConfigType.equals("core-site")) {
+          continue;
+        }
         if (!actualConfig.getTag().equals(TopologyManager.TOPOLOGY_RESOLVED_TAG)) {
           // if any expected config is not resolved, deployment must wait
           LOG.info("Config type " + actualConfigType + " not resolved yet, Blueprint deployment will wait until configuration update is completed");

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b27feb7/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
index 5f56617..caa598a 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
@@ -220,6 +220,7 @@
     <display-name>hive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
+ export JAVA_HOME={{java64_home}}
  if [ "$SERVICE" = "cli" ]; then
    if [ -z "$DEBUG" ]; then
      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b27feb7/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
index b7b04a2..959e111 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -300,6 +300,15 @@ def setup_metastore():
        content=StaticFile('startMetastore.sh')
   )
 
+  if not is_empty(params.hive_exec_scratchdir):
+    dirPathStr = urlparse(params.hive_exec_scratchdir).path
+    pathComponents = dirPathStr.split("/")
+    if dirPathStr.startswith("/tmp") and len(pathComponents) > 2:
+      Directory (params.hive_exec_scratchdir,
+                 owner = params.hive_user,
+                 create_parents = True,
+                 mode=0777)
+
 def create_metastore_schema():
   import params
 
@@ -345,14 +354,14 @@ def fill_conf_dir(component_conf_dir):
             mode=mode_identified_for_dir
   )
 
-  XmlConfig("mapred-site.xml",
-            conf_dir=component_conf_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=mode_identified_for_file)
-
+  if 'mapred-site' in params.config['configurations']:
+    XmlConfig("mapred-site.xml",
+              conf_dir=component_conf_dir,
+              configurations=params.config['configurations']['mapred-site'],
+              configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+              owner=params.hive_user,
+              group=params.user_group,
+              mode=mode_identified_for_file)
 
   File(format("{component_conf_dir}/hive-default.xml.template"),
        owner=params.hive_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b27feb7/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
index 866d4cb..ddd795f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
@@ -210,12 +210,13 @@ class HDP21StackAdvisor(HDP206StackAdvisor):
             webHcatSitePropertyAttributes("webhcat.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true')
             webHcatSitePropertyAttributes("webhcat.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true')
 
-    if self.is_secured_cluster(services):
-      putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
+    if "HDFS" in servicesList or "YARN" in servicesList:
+      if self.is_secured_cluster(services):
+        putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
 
-      meta = self.get_service_component_meta("HIVE", "WEBHCAT_SERVER", services)
-      if "hostnames" in meta:
-        self.put_proxyuser_value("HTTP", meta["hostnames"], services=services, configurations=configurations, put_function=putCoreSiteProperty)
+        meta = self.get_service_component_meta("HIVE", "WEBHCAT_SERVER", services)
+        if "hostnames" in meta:
+          self.put_proxyuser_value("HTTP", meta["hostnames"], services=services, configurations=configurations, put_function=putCoreSiteProperty)
 
   def recommendTezConfigurations(self, configurations, clusterData, services, hosts):
     putTezProperty = self.putProperty(configurations, "tez-site")

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b27feb7/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 4d672d2..726514b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -422,36 +422,37 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
     container_size = "512"
 
-    if not "yarn-site" in configurations:
-      self.recommendYARNConfigurations(configurations, clusterData, services, hosts)
-    #properties below should be always present as they are provided in HDP206 stack advisor at least
-    yarnMaxAllocationSize = min(30 * int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]), int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
-    #duplicate tez task resource calc logic, direct dependency doesn't look good here (in case of Hive without Tez)
-    container_size = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
-    container_size = min(clusterData['containers'] * clusterData['ramPerContainer'], container_size, yarnMaxAllocationSize)
-
-    putHiveSiteProperty("hive.tez.container.size", min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]), container_size))
-
-    putHiveSitePropertyAttribute("hive.tez.container.size", "minimum", int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))
-    putHiveSitePropertyAttribute("hive.tez.container.size", "maximum", int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
-
-    if "yarn-site" in services["configurations"]:
-      if "yarn.scheduler.minimum-allocation-mb" in services["configurations"]["yarn-site"]["properties"]:
-        putHiveSitePropertyAttribute("hive.tez.container.size", "minimum", int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))
-      if "yarn.scheduler.maximum-allocation-mb" in services["configurations"]["yarn-site"]["properties"]:
-        putHiveSitePropertyAttribute("hive.tez.container.size", "maximum", int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
-
-    putHiveSiteProperty("hive.prewarm.enabled", "false")
-    putHiveSiteProperty("hive.prewarm.numcontainers", "3")
-    putHiveSiteProperty("hive.tez.auto.reducer.parallelism", "true")
-    putHiveSiteProperty("hive.tez.dynamic.partition.pruning", "true")
-
-    container_size = configurations["hive-site"]["properties"]["hive.tez.container.size"]
-    container_size_bytes = int(int(container_size)*0.8*1024*1024) # Xmx == 80% of container
-    # Memory
-    putHiveSiteProperty("hive.auto.convert.join.noconditionaltask.size", int(round(container_size_bytes/3)))
-    putHiveSitePropertyAttribute("hive.auto.convert.join.noconditionaltask.size", "maximum", container_size_bytes)
-    putHiveSiteProperty("hive.exec.reducers.bytes.per.reducer", "67108864")
+    if "YARN" in servicesList:
+      if not "yarn-site" in configurations:
+        self.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+      #properties below should be always present as they are provided in HDP206 stack advisor at least
+      yarnMaxAllocationSize = min(30 * int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]), int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+      #duplicate tez task resource calc logic, direct dependency doesn't look good here (in case of Hive without Tez)
+      container_size = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
+      container_size = min(clusterData['containers'] * clusterData['ramPerContainer'], container_size, yarnMaxAllocationSize)
+
+      putHiveSiteProperty("hive.tez.container.size", min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]), container_size))
+
+      putHiveSitePropertyAttribute("hive.tez.container.size", "minimum", int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))
+      putHiveSitePropertyAttribute("hive.tez.container.size", "maximum", int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+
+      if "yarn-site" in services["configurations"]:
+        if "yarn.scheduler.minimum-allocation-mb" in services["configurations"]["yarn-site"]["properties"]:
+          putHiveSitePropertyAttribute("hive.tez.container.size", "minimum", int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))
+        if "yarn.scheduler.maximum-allocation-mb" in services["configurations"]["yarn-site"]["properties"]:
+          putHiveSitePropertyAttribute("hive.tez.container.size", "maximum", int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+
+      putHiveSiteProperty("hive.prewarm.enabled", "false")
+      putHiveSiteProperty("hive.prewarm.numcontainers", "3")
+      putHiveSiteProperty("hive.tez.auto.reducer.parallelism", "true")
+      putHiveSiteProperty("hive.tez.dynamic.partition.pruning", "true")
+
+      container_size = configurations["hive-site"]["properties"]["hive.tez.container.size"]
+      container_size_bytes = int(int(container_size)*0.8*1024*1024) # Xmx == 80% of container
+      # Memory
+      putHiveSiteProperty("hive.auto.convert.join.noconditionaltask.size", int(round(container_size_bytes/3)))
+      putHiveSitePropertyAttribute("hive.auto.convert.join.noconditionaltask.size", "maximum", container_size_bytes)
+      putHiveSiteProperty("hive.exec.reducers.bytes.per.reducer", "67108864")
 
     # CBO
     if "hive-site" in services["configurations"] and "hive.cbo.enable" in services["configurations"]["hive-site"]["properties"]:

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b27feb7/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
index 94f639a..e34734c 100644
--- a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
@@ -688,6 +688,13 @@ class TestHiveMetastore(RMFTestCase):
         content = StaticFile('startMetastore.sh'),
         mode = 0755,
     )
+
+    self.assertResourceCalled('Directory', '/tmp/hive',
+                              owner = 'hive',
+                              create_parents = True,
+                              mode=0777
+                              )
+
     self.assertResourceCalled('Execute', ('cp',
      '--remove-destination',
      '/usr/share/java/mysql-connector-java.jar',

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b27feb7/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
index e8bd5d0..bdd34fc 100644
--- a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
@@ -272,6 +272,11 @@ class TestHDP21StackAdvisor(TestCase):
       "services": [
         {
           "StackServices": {
+            "service_name": "YARN"
+          }, "components": []
+        },
+        {
+          "StackServices": {
             "service_name": "HIVE",
           },
           "components": [
@@ -338,7 +343,7 @@ class TestHDP21StackAdvisor(TestCase):
     self.assertEqual(configurations["core-site"]["properties"]["hadoop.proxyuser.HTTP.hosts"] == "example.com", True)
 
     newhost_list = ["example.com", "example.org"]
-    services["services"][0]["components"][0]["StackServiceComponents"]["hostnames"] = newhost_list
+    services["services"][1]["components"][0]["StackServiceComponents"]["hostnames"] = newhost_list
     configurations["core-site"]["properties"]["hadoop.proxyuser.HTTP.hosts"] = ""
 
     self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData, services, hosts)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b27feb7/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index 2d7322d..114dd3a 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -1012,7 +1012,11 @@ class TestHDP26StackAdvisor(TestCase):
 
     services = {
       "services":
-        [
+        [{
+           "StackServices": {
+             "service_name": "YARN"
+           }, "components": []
+         },
          {
             "StackServices": {
               "service_name" : "HIVE",


[39/50] [abbrv] ambari git commit: AMBARI-20733. /var/log/krb5kdc.log is growing rapidly on the KDC server (echekanskiy)

Posted by ao...@apache.org.
AMBARI-20733. /var/log/krb5kdc.log is growing rapidly on the KDC server (echekanskiy)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2a00812a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2a00812a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2a00812a

Branch: refs/heads/branch-3.0-perf
Commit: 2a00812a66fb6bd1e92f857ed0ecd4cb5f91a615
Parents: 665a2cd
Author: Eugene Chekanskiy <ec...@hortonworks.com>
Authored: Fri Apr 21 17:54:13 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../src/main/python/ambari_agent/ActionQueue.py |   9 +-
 .../ambari_agent/CustomServiceOrchestrator.py   |  33 +-----
 .../test/python/ambari_agent/TestActionQueue.py |  13 +--
 .../TestCustomServiceOrchestrator.py            |  51 --------
 .../libraries/script/script.py                  |  16 ---
 .../ambari/server/agent/ComponentStatus.java    |  28 +----
 .../ambari/server/agent/HeartbeatProcessor.java |  20 ----
 .../package/scripts/accumulo_script.py          |  50 --------
 .../0.1.0/package/scripts/metrics_collector.py  |  66 +----------
 .../package/scripts/metadata_server.py          |  78 -------------
 .../0.5.0.2.1/package/scripts/falcon_client.py  |  10 --
 .../0.5.0.2.1/package/scripts/falcon_server.py  |  59 ----------
 .../0.96.0.2.0/package/scripts/hbase_master.py  |  49 --------
 .../package/scripts/hbase_regionserver.py       |  49 --------
 .../package/scripts/phoenix_queryserver.py      |   6 +-
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py  |  58 ---------
 .../2.1.0.2.0/package/scripts/hdfs_client.py    |  45 -------
 .../2.1.0.2.0/package/scripts/journalnode.py    |  57 ---------
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  |  57 ---------
 .../2.1.0.2.0/package/scripts/nfsgateway.py     |  58 ---------
 .../HDFS/2.1.0.2.0/package/scripts/snamenode.py |  60 ----------
 .../2.1.0.2.0/package/scripts/zkfc_slave.py     |  43 -------
 .../HDFS/3.0.0.3.0/package/scripts/datanode.py  |  58 ---------
 .../3.0.0.3.0/package/scripts/hdfs_client.py    |  45 -------
 .../3.0.0.3.0/package/scripts/journalnode.py    |  57 ---------
 .../HDFS/3.0.0.3.0/package/scripts/namenode.py  |  57 ---------
 .../3.0.0.3.0/package/scripts/nfsgateway.py     |  58 ---------
 .../HDFS/3.0.0.3.0/package/scripts/snamenode.py |  60 ----------
 .../3.0.0.3.0/package/scripts/zkfc_slave.py     |  43 -------
 .../package/scripts/hive_metastore.py           |  52 ---------
 .../0.12.0.2.0/package/scripts/hive_server.py   |  61 ----------
 .../package/scripts/hive_server_interactive.py  |  61 ----------
 .../package/scripts/webhcat_server.py           |  67 -----------
 .../2.1.0.3.0/package/scripts/hive_metastore.py |  52 ---------
 .../2.1.0.3.0/package/scripts/hive_server.py    |  61 ----------
 .../package/scripts/hive_server_interactive.py  |  61 ----------
 .../2.1.0.3.0/package/scripts/webhcat_server.py |  67 -----------
 .../package/scripts/kerberos_client.py          |  21 ----
 .../0.5.0.2.2/package/scripts/knox_gateway.py   |  61 ----------
 .../4.0.0.2.0/package/scripts/oozie_server.py   |  63 ----------
 .../STORM/0.9.1/package/scripts/drpc_server.py  |  52 ---------
 .../STORM/0.9.1/package/scripts/nimbus.py       |  45 -------
 .../STORM/0.9.1/package/scripts/pacemaker.py    |  52 ---------
 .../STORM/0.9.1/package/scripts/ui_server.py    |  53 ---------
 .../scripts/application_timeline_server.py      |  61 ----------
 .../2.1.0.2.0/package/scripts/historyserver.py  |  56 ---------
 .../2.1.0.2.0/package/scripts/nodemanager.py    |  60 ----------
 .../package/scripts/resourcemanager.py          |  60 ----------
 .../scripts/application_timeline_server.py      |  61 ----------
 .../3.0.0.3.0/package/scripts/historyserver.py  |  56 ---------
 .../3.0.0.3.0/package/scripts/nodemanager.py    |  60 ----------
 .../package/scripts/resourcemanager.py          |  60 ----------
 .../3.4.5/package/scripts/zookeeper_server.py   |  51 --------
 .../KERBEROS/package/scripts/kerberos_client.py |  21 ----
 .../server/agent/HeartbeatProcessorTest.java    |   7 --
 .../server/agent/TestHeartbeatHandler.java      |  13 ---
 .../stacks/2.0.6/HBASE/test_hbase_master.py     | 102 ----------------
 .../2.0.6/HBASE/test_hbase_regionserver.py      | 104 -----------------
 .../python/stacks/2.0.6/HDFS/test_datanode.py   | 111 ------------------
 .../stacks/2.0.6/HDFS/test_hdfs_client.py       | 100 ----------------
 .../stacks/2.0.6/HDFS/test_journalnode.py       | 114 ------------------
 .../python/stacks/2.0.6/HDFS/test_namenode.py   | 114 ------------------
 .../python/stacks/2.0.6/HDFS/test_nfsgateway.py | 116 ------------------
 .../python/stacks/2.0.6/HDFS/test_snamenode.py  | 117 +------------------
 .../test/python/stacks/2.0.6/HDFS/test_zkfc.py  | 102 +---------------
 .../stacks/2.0.6/HIVE/test_hive_server.py       | 112 ------------------
 .../stacks/2.0.6/HIVE/test_webhcat_server.py    | 116 ------------------
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     | 113 ------------------
 .../stacks/2.0.6/YARN/test_historyserver.py     | 106 -----------------
 .../stacks/2.0.6/YARN/test_nodemanager.py       | 109 -----------------
 .../stacks/2.0.6/YARN/test_resourcemanager.py   | 108 -----------------
 .../2.0.6/ZOOKEEPER/test_zookeeper_server.py    | 103 ----------------
 .../stacks/2.1/FALCON/test_falcon_client.py     |  24 ----
 .../stacks/2.1/FALCON/test_falcon_server.py     | 109 -----------------
 .../stacks/2.1/HIVE/test_hive_metastore.py      | 113 ------------------
 .../stacks/2.1/STORM/test_storm_drpc_server.py  | 104 -----------------
 .../stacks/2.1/STORM/test_storm_nimbus.py       | 103 ----------------
 .../stacks/2.1/STORM/test_storm_ui_server.py    |  82 -------------
 .../stacks/2.1/YARN/test_apptimelineserver.py   | 110 -----------------
 .../python/stacks/2.2/KNOX/test_knox_gateway.py | 102 ----------------
 .../stacks/2.5/ATLAS/test_atlas_server.py       |  42 -------
 81 files changed, 13 insertions(+), 5211 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
index 75880c6..1eda5c2 100644
--- a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
+++ b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
@@ -498,16 +498,14 @@ class ActionQueue(threading.Thread):
 
   def execute_status_command_and_security_status(self, command):
     component_status_result = self.customServiceOrchestrator.requestComponentStatus(command)
-    component_security_status_result = self.customServiceOrchestrator.requestComponentSecurityState(command)
-
-    return command, component_status_result, component_security_status_result
+    return command, component_status_result
 
   def process_status_command_result(self, result):
     '''
     Executes commands of type STATUS_COMMAND
     '''
     try:
-      command, component_status_result, component_security_status_result = result
+      command, component_status_result = result
       cluster = command['clusterName']
       service = command['serviceName']
       component = command['componentName']
@@ -548,9 +546,6 @@ class ActionQueue(threading.Thread):
       if self.controller.recovery_manager.enabled():
         result['sendExecCmdDet'] = str(request_execution_cmd)
 
-      # Add security state to the result
-      result['securityState'] = component_security_status_result
-
       if component_extra is not None and len(component_extra) != 0:
         if component_extra.has_key('alerts'):
           result['alerts'] = component_extra['alerts']

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
index a67e16e..8b8a8f9 100644
--- a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
+++ b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
@@ -47,7 +47,6 @@ class CustomServiceOrchestrator():
   SCRIPT_TYPE_PYTHON = "PYTHON"
   COMMAND_TYPE = "commandType"
   COMMAND_NAME_STATUS = "STATUS"
-  COMMAND_NAME_SECURITY_STATUS = "SECURITY_STATUS"
   CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
   CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'
 
@@ -63,7 +62,7 @@ class CustomServiceOrchestrator():
   AMBARI_SERVER_PORT = "ambari_server_port"
   AMBARI_SERVER_USE_SSL = "ambari_server_use_ssl"
 
-  FREQUENT_COMMANDS = [COMMAND_NAME_SECURITY_STATUS, COMMAND_NAME_STATUS]
+  FREQUENT_COMMANDS = [COMMAND_NAME_STATUS]
   DONT_DEBUG_FAILURES_FOR_COMMANDS = FREQUENT_COMMANDS
   REFLECTIVELY_RUN_COMMANDS = FREQUENT_COMMANDS # -- commands which run a lot and often (this increases their speed)
   DONT_BACKUP_LOGS_FOR_COMMANDS = FREQUENT_COMMANDS
@@ -467,36 +466,6 @@ class CustomServiceOrchestrator():
                           override_output_files=override_output_files)
     return res
 
-  def requestComponentSecurityState(self, command):
-    """
-     Determines the current security state of the component
-     A command will be issued to trigger the security_status check and the result of this check will
-     returned to the caller. If the component lifecycle script has no security_status method the
-     check will return non zero exit code and "UNKNOWN" will be returned.
-    """
-    override_output_files=True # by default, we override status command output
-    if logger.level == logging.DEBUG:
-      override_output_files = False
-    security_check_res = self.runCommand(command, self.status_commands_stdout,
-                                         self.status_commands_stderr, self.COMMAND_NAME_SECURITY_STATUS,
-                                         override_output_files=override_output_files)
-    result = 'UNKNOWN'
-
-    if security_check_res is None:
-      logger.warn("The return value of the security_status check was empty, the security status is unknown")
-    elif 'exitcode' not in security_check_res:
-      logger.warn("Missing 'exitcode' value from the security_status check result, the security status is unknown")
-    elif security_check_res['exitcode'] != 0:
-      logger.debug("The 'exitcode' value from the security_status check result indicated the check routine failed to properly execute, the security status is unknown")
-    elif 'structuredOut' not in security_check_res:
-      logger.warn("Missing 'structuredOut' value from the security_status check result, the security status is unknown")
-    elif 'securityState' not in security_check_res['structuredOut']:
-      logger.warn("Missing 'securityState' value from the security_status check structuredOut data set, the security status is unknown")
-    else:
-      result = security_check_res['structuredOut']['securityState']
-
-    return result
-
   def resolve_script_path(self, base_dir, script):
     """
     Encapsulates logic of script location determination.

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py b/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
index ab46f96..faa9b81 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
@@ -988,12 +988,11 @@ class TestActionQueue(TestCase):
 
     dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp())
 
-    result = (self.status_command, {'exitcode': 0 }, 'UNKNOWN')
+    result = (self.status_command, {'exitcode': 0 })
 
     actionQueue.process_status_command_result(result)
     report = actionQueue.result()
-    expected = {'dummy report': '',
-                'securityState' : 'UNKNOWN'}
+    expected = {'dummy report': ''}
 
     self.assertEqual(len(report['componentStatus']), 1)
     self.assertEqual(report['componentStatus'][0], expected)
@@ -1019,12 +1018,11 @@ class TestActionQueue(TestCase):
 
     dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp(), True, False)
 
-    result = (self.status_command, {'exitcode': 0 }, 'UNKNOWN')
+    result = (self.status_command, {'exitcode': 0 })
 
     actionQueue.process_status_command_result(result)
     report = actionQueue.result()
     expected = {'dummy report': '',
-                'securityState' : 'UNKNOWN',
                 'sendExecCmdDet': 'True'}
 
     self.assertEqual(len(report['componentStatus']), 1)
@@ -1033,12 +1031,11 @@ class TestActionQueue(TestCase):
     requires_recovery_mock.return_value = True
     command_exists_mock.return_value = True
     
-    result = (self.status_command, {'exitcode': 0 }, 'UNKNOWN')
+    result = (self.status_command, {'exitcode': 0 })
 
     actionQueue.process_status_command_result(result)
     report = actionQueue.result()
     expected = {'dummy report': '',
-                'securityState' : 'UNKNOWN',
                 'sendExecCmdDet': 'False'}
 
     self.assertEqual(len(report['componentStatus']), 1)
@@ -1062,7 +1059,7 @@ class TestActionQueue(TestCase):
       'structuredOut': {'alerts': [ {'name': 'flume_alert'} ] }
     }
     
-    result = (self.status_command_for_alerts, command_return_value, command_return_value)
+    result = (self.status_command_for_alerts, command_return_value)
     
     build_mock.return_value = {'somestatusresult': 'aresult'}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
index 8e5e9a3..c54ffca 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
@@ -569,57 +569,6 @@ class TestCustomServiceOrchestrator(TestCase):
     status = orchestrator.requestComponentStatus(status_command)
     self.assertEqual(runCommand_mock.return_value, status)
 
-  @patch.object(CustomServiceOrchestrator, "runCommand")
-  @patch.object(FileCache, "__init__")
-  def test_requestComponentSecurityState(self, FileCache_mock, runCommand_mock):
-    FileCache_mock.return_value = None
-    status_command = {
-      "serviceName" : 'HDFS',
-      "commandType" : "STATUS_COMMAND",
-      "clusterName" : "",
-      "componentName" : "DATANODE",
-      'configurations':{}
-    }
-    dummy_controller = MagicMock()
-    orchestrator = CustomServiceOrchestrator(self.config, dummy_controller)
-    # Test securityState
-    runCommand_mock.return_value = {
-      'exitcode' : 0,
-      'structuredOut' : {'securityState': 'UNSECURED'}
-    }
-
-    status = orchestrator.requestComponentSecurityState(status_command)
-    self.assertEqual('UNSECURED', status)
-
-    # Test case where exit code indicates failure
-    runCommand_mock.return_value = {
-      "exitcode" : 1
-    }
-    status = orchestrator.requestComponentSecurityState(status_command)
-    self.assertEqual('UNKNOWN', status)
-
-  @patch.object(FileCache, "__init__")
-  def test_requestComponentSecurityState_realFailure(self, FileCache_mock):
-    '''
-    Tests the case where the CustomServiceOrchestrator attempts to call a service's security_status
-    method, but fails to do so because the script or method was not found.
-    :param FileCache_mock:
-    :return:
-    '''
-    FileCache_mock.return_value = None
-    status_command = {
-      "serviceName" : 'BOGUS_SERVICE',
-      "commandType" : "STATUS_COMMAND",
-      "clusterName" : "",
-      "componentName" : "DATANODE",
-      'configurations':{}
-    }
-    dummy_controller = MagicMock()
-    orchestrator = CustomServiceOrchestrator(self.config, dummy_controller)
-
-    status = orchestrator.requestComponentSecurityState(status_command)
-    self.assertEqual('UNKNOWN', status)
-
 
   @patch.object(CustomServiceOrchestrator, "get_py_executor")
   @patch.object(CustomServiceOrchestrator, "dump_command_to_json")

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index d0e5678..a08feab 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -864,22 +864,6 @@ class Script(object):
     """
     self.fail_with_error('configure method isn\'t implemented')
 
-  def security_status(self, env):
-    """
-    To be overridden by subclasses to provide the current security state of the component.
-    Implementations are required to set the "securityState" property of the structured out data set
-    to one of the following values:
-
-      UNSECURED        - If the component is not configured for any security protocol such as
-                         Kerberos
-      SECURED_KERBEROS - If the component is configured for Kerberos
-      UNKNOWN          - If the security state cannot be determined
-      ERROR            - If the component is supposed to be secured, but there are issues with the
-                         configuration.  For example, if the component is configured for Kerberos
-                         but the configured principal and keytab file fail to kinit
-    """
-    self.put_structured_out({"securityState": "UNKNOWN"})
-
   def generate_configs_get_template_file_content(self, filename, dicts):
     config = self.get_config()
     content = ''

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/java/org/apache/ambari/server/agent/ComponentStatus.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ComponentStatus.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ComponentStatus.java
index 5591ae8..68e1734 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ComponentStatus.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ComponentStatus.java
@@ -28,12 +28,6 @@ public class ComponentStatus {
   private String msg;
   private String status;
 
-  /**
-   * A String declaring the component's security state
-   *
-   * @see org.apache.ambari.server.state.SecurityState
-   */
-  private String securityState;
   private String sendExecCmdDet = "False";
 
   private String serviceName;
@@ -74,26 +68,6 @@ public class ComponentStatus {
     this.status = status;
   }
 
-  /**
-   * Gets the relevant component's security state.
-   *
-   * @return a String declaring this component's security state
-   * @see org.apache.ambari.server.state.SecurityState
-   */
-  public String getSecurityState() {
-    return securityState;
-  }
-
-  /**
-   * Sets the relevant component's security state.
-   *
-   * @param securityState a String declaring this component's security state
-   * @see org.apache.ambari.server.state.SecurityState
-   */
-  public void setSecurityState(String securityState) {
-    this.securityState = securityState;
-  }
-
   public String getStackVersion() {
     return stackVersion;
   }
@@ -158,7 +132,7 @@ public class ComponentStatus {
   @Override
   public String toString() {
     return "ComponentStatus [componentName=" + componentName + ", msg=" + msg
-        + ", status=" + status + ", securityState=" + securityState
+        + ", status=" + status
         + ", serviceName=" + serviceName + ", clusterName=" + clusterName
         + ", stackVersion=" + stackVersion + ", configurationTags="
         + configurationTags + ", extra=" + extra + "]";

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
index 8cd2804..c1028dc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
@@ -56,7 +56,6 @@ import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostHealthStatus;
 import org.apache.ambari.server.state.MaintenanceState;
-import org.apache.ambari.server.state.SecurityState;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
@@ -617,25 +616,6 @@ public class HeartbeatProcessor extends AbstractService{
                 }
               }
 
-              SecurityState prevSecurityState = scHost.getSecurityState();
-              SecurityState currentSecurityState = SecurityState.valueOf(status.getSecurityState());
-              if((prevSecurityState != currentSecurityState)) {
-                if(prevSecurityState.isEndpoint()) {
-                  scHost.setSecurityState(currentSecurityState);
-                  LOG.info(String.format("Security of service component %s of service %s of cluster %s " +
-                          "has changed from %s to %s on host %s",
-                      componentName, status.getServiceName(), status.getClusterName(), prevSecurityState,
-                      currentSecurityState, hostname));
-                }
-                else {
-                  LOG.debug(String.format("Security of service component %s of service %s of cluster %s " +
-                          "has changed from %s to %s on host %s but will be ignored since %s is a " +
-                          "transitional state",
-                      componentName, status.getServiceName(), status.getClusterName(),
-                      prevSecurityState, currentSecurityState, hostname, prevSecurityState));
-                }
-              }
-
               if (null != status.getStackVersion() && !status.getStackVersion().isEmpty()) {
                 scHost.setStackVersion(gson.fromJson(status.getStackVersion(), StackId.class));
               }

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
index 01fbce2..445c996 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
@@ -119,56 +119,6 @@ class AccumuloScript(Script):
 
     # some accumulo components depend on the client, so update that too
     stack_select.select("accumulo-client", params.version)
-
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    props_value_check = {}
-    props_empty_check = ['general.kerberos.keytab',
-                         'general.kerberos.principal']
-    props_read_check = ['general.kerberos.keytab']
-    accumulo_site_expectations = build_expectations('accumulo-site',
-      props_value_check, props_empty_check, props_read_check)
-
-    accumulo_expectations = {}
-    accumulo_expectations.update(accumulo_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.conf_dir,
-      {'accumulo-site.xml': FILE_TYPE_XML})
-
-    result_issues = validate_security_config_properties(security_params, accumulo_expectations)
-    if not result_issues:  # If all validations passed successfully
-      try:
-        # Double check the dict before calling execute
-        if ( 'accumulo-site' not in security_params
-             or 'general.kerberos.keytab' not in security_params['accumulo-site']
-             or 'general.kerberos.principal' not in security_params['accumulo-site']):
-          self.put_structured_out({"securityState": "UNSECURED"})
-          self.put_structured_out(
-            {"securityIssuesFound": "Keytab file or principal are not set property."})
-          return
-
-        cached_kinit_executor(status_params.kinit_path_local,
-          status_params.accumulo_user,
-          security_params['accumulo-site']['general.kerberos.keytab'],
-          security_params['accumulo-site']['general.kerberos.principal'],
-          status_params.hostname,
-          status_params.tmp_dir,
-          30)
-
-        self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      issues = []
-      for cf in result_issues:
-        issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-      self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
index 7073de6..fc2576d 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
@@ -77,71 +77,7 @@ class AmsCollector(Script):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class AmsCollectorDefault(AmsCollector):
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hbase.security.authentication": "kerberos",
-                         "hbase.security.authorization": "true"}
-
-    props_empty_check = ["hbase.zookeeper.property.authProvider.1",
-                         "hbase.master.keytab.file",
-                         "hbase.master.kerberos.principal",
-                         "hbase.regionserver.keytab.file",
-                         "hbase.regionserver.kerberos.principal"
-                         ]
-    props_read_check = ['hbase.master.keytab.file', 'hbase.regionserver.keytab.file']
-    ams_hbase_site_expectations = build_expectations('hbase-site', props_value_check,
-                                                     props_empty_check,
-                                                     props_read_check)
-
-    expectations = {}
-    expectations.update(ams_hbase_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.ams_hbase_conf_dir,
-                                                 {'hbase-site.xml': FILE_TYPE_XML})
-
-    # In case of blueprint deployment security_status might be called before AMS collector is installed.
-    if ('hbase-site' not in security_params or 'hbase.cluster.distributed' not in security_params['hbase-site']) :
-      self.put_structured_out({"securityState": "UNKNOWN"})
-      return
-
-    is_hbase_distributed = security_params['hbase-site']['hbase.cluster.distributed']
-    # for embedded mode, when HBase is backed by file, security state is SECURED_KERBEROS by definition when cluster is secured
-    if status_params.security_enabled and not is_hbase_distributed:
-      self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-      return
-
-    result_issues = validate_security_config_properties(security_params, expectations)
-
-    if not result_issues:  # If all validations passed successfully
-      try:
-        # Double check the dict before calling execute
-        if ('hbase-site' not in security_params or
-                'hbase.master.keytab.file' not in security_params['hbase-site'] or
-                'hbase.master.kerberos.principal' not in security_params['hbase-site']):
-          self.put_structured_out({"securityState": "UNSECURED"})
-          self.put_structured_out(
-            {"securityIssuesFound": "Keytab file or principal are not set property."})
-          return
-
-        cached_kinit_executor(status_params.kinit_path_local,
-                              status_params.hbase_user,
-                              security_params['hbase-site']['hbase.master.keytab.file'],
-                              security_params['hbase-site']['hbase.master.kerberos.principal'],
-                              status_params.hostname,
-                              status_params.tmp_dir)
-        self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      issues = []
-      for cf in result_issues:
-        issues.append("Configuration file %s did not pass the validation. Reason: %s" % (
-          cf, result_issues[cf]))
-      self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-      self.put_structured_out({"securityState": "UNSECURED"})
+  pass
 
 
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py
index 3c62243..1ef77cf 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py
@@ -168,84 +168,6 @@ class MetadataServer(Script):
     env.set_params(status_params)
     check_process_status(status_params.pid_file)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    file_name_key = 'applicaton'
-    props_value_check = {'atlas.authentication.method': 'kerberos',
-                         'atlas.http.authentication.enabled': 'true',
-                         'atlas.http.authentication.type': 'kerberos'}
-    props_empty_check = ['atlas.authentication.principal',
-                         'atlas.authentication.keytab',
-                         'atlas.http.authentication.kerberos.principal',
-                         'atlas.http.authentication.kerberos.keytab']
-    props_read_check = ['atlas.authentication.keytab',
-                        'atlas.http.authentication.kerberos.keytab']
-
-    if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, status_params.version_for_stack_feature_checks):
-      file_name_key = 'atlas-application'
-      props_value_check = {'atlas.authentication.method.kerberos': 'true',
-                           'atlas.solr.kerberos.enable': 'true'}
-      props_empty_check = ['atlas.authentication.principal',
-                           'atlas.authentication.keytab',
-                           'atlas.authentication.method.kerberos.principal',
-                           'atlas.authentication.method.kerberos.keytab']
-      props_read_check = ['atlas.authentication.keytab',
-                          'atlas.authentication.method.kerberos.keytab']
-
-    atlas_site_expectations = build_expectations(file_name_key,
-                                                 props_value_check,
-                                                 props_empty_check,
-                                                 props_read_check)
-
-    atlas_expectations = {}
-    atlas_expectations.update(atlas_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.conf_dir,
-                                                 {status_params.conf_file: FILE_TYPE_PROPERTIES})
-    result_issues = validate_security_config_properties(security_params, atlas_expectations)
-
-    if not result_issues:  # If all validations passed successfully
-      try:
-        # Double check the dict before calling execute
-        if ( file_name_key not in security_params
-             or 'atlas.authentication.keytab' not in security_params[file_name_key]
-             or 'atlas.authentication.principal' not in security_params[file_name_key]):
-          self.put_structured_out({"securityState": "UNSECURED"})
-          self.put_structured_out(
-            {"securityIssuesFound": "Atlas service keytab file or principal are not set property."})
-          return
-
-        if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, status_params.version_for_stack_feature_checks):
-          if ( file_name_key not in security_params
-               or 'atlas.authentication.method.kerberos.keytab' not in security_params[file_name_key]
-               or 'atlas.authentication.method.kerberos.principal' not in security_params[file_name_key]):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Method Authentication keytab file or principal are not set property."})
-            return
-        else:
-          if ( file_name_key not in security_params
-               or 'atlas.http.authentication.kerberos.keytab' not in security_params[file_name_key]
-               or 'atlas.http.authentication.kerberos.principal' not in security_params[file_name_key]):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "HTTP Authentication keytab file or principal are not set property."})
-            return
-
-        self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      issues = []
-      for cf in result_issues:
-        issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-      self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py
index 6d1dbc5..b0f517b 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py
@@ -59,16 +59,6 @@ class FalconClientLinux(FalconClient):
     conf_select.select(params.stack_name, "falcon", params.version)
     stack_select.select("falcon-client", params.version)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class FalconClientWindows(FalconClient):
   def install(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py
index c4960a7..23f9ef8 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py
@@ -89,65 +89,6 @@ class FalconServerLinux(FalconServer):
 
     falcon_server_upgrade.pre_start_restore()
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"*.falcon.authentication.type": "kerberos",
-                           "*.falcon.http.authentication.type": "kerberos"}
-      props_empty_check = ["*.falcon.service.authentication.kerberos.principal",
-                           "*.falcon.service.authentication.kerberos.keytab",
-                           "*.falcon.http.authentication.kerberos.principal",
-                           "*.falcon.http.authentication.kerberos.keytab"]
-      props_read_check = ["*.falcon.service.authentication.kerberos.keytab",
-                          "*.falcon.http.authentication.kerberos.keytab"]
-      falcon_startup_props = build_expectations('startup', props_value_check, props_empty_check,
-                                                  props_read_check)
-
-      falcon_expectations ={}
-      falcon_expectations.update(falcon_startup_props)
-
-      security_params = get_params_from_filesystem('/etc/falcon/conf',
-                                                   {'startup.properties': FILE_TYPE_PROPERTIES})
-      result_issues = validate_security_config_properties(security_params, falcon_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'startup' not in security_params
-               or '*.falcon.service.authentication.kerberos.keytab' not in security_params['startup']
-               or '*.falcon.service.authentication.kerberos.principal' not in security_params['startup']) \
-            or '*.falcon.http.authentication.kerberos.keytab' not in security_params['startup'] \
-            or '*.falcon.http.authentication.kerberos.principal' not in security_params['startup']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.falcon_user,
-                                security_params['startup']['*.falcon.service.authentication.kerberos.keytab'],
-                                security_params['startup']['*.falcon.service.authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.falcon_user,
-                                security_params['startup']['*.falcon.http.authentication.kerberos.keytab'],
-                                security_params['startup']['*.falcon.http.authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.falcon_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
index d2c8089..83af3aa 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
@@ -98,55 +98,6 @@ class HbaseMasterDefault(HbaseMaster):
 
     check_process_status(status_params.hbase_master_pid_file)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hbase.security.authentication" : "kerberos",
-                           "hbase.security.authorization": "true"}
-      props_empty_check = ['hbase.master.keytab.file',
-                           'hbase.master.kerberos.principal']
-      props_read_check = ['hbase.master.keytab.file']
-      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
-                                                  props_read_check)
-
-      hbase_expectations = {}
-      hbase_expectations.update(hbase_site_expectations)
-
-      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
-                                                   {'hbase-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hbase_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'hbase-site' not in security_params
-               or 'hbase.master.keytab.file' not in security_params['hbase-site']
-               or 'hbase.master.kerberos.principal' not in security_params['hbase-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hbase_user,
-                                security_params['hbase-site']['hbase.master.keytab.file'],
-                                security_params['hbase-site']['hbase.master.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
   def get_log_folder(self):
     import params
     return params.log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
index 226e7fd5..75910b1 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
@@ -109,55 +109,6 @@ class HbaseRegionServerDefault(HbaseRegionServer):
 
     check_process_status(status_params.regionserver_pid_file)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hbase.security.authentication" : "kerberos",
-                           "hbase.security.authorization": "true"}
-      props_empty_check = ['hbase.regionserver.keytab.file',
-                           'hbase.regionserver.kerberos.principal']
-      props_read_check = ['hbase.regionserver.keytab.file']
-      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
-                                                   props_read_check)
-
-      hbase_expectations = {}
-      hbase_expectations.update(hbase_site_expectations)
-
-      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
-                                                   {'hbase-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hbase_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'hbase-site' not in security_params
-               or 'hbase.regionserver.keytab.file' not in security_params['hbase-site']
-               or 'hbase.regionserver.kerberos.principal' not in security_params['hbase-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hbase_user,
-                                security_params['hbase-site']['hbase.regionserver.keytab.file'],
-                                security_params['hbase-site']['hbase.regionserver.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
index 77820cc..8a85d6e 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
@@ -71,10 +71,6 @@ class PhoenixQueryServer(Script):
     import status_params
     env.set_params(status_params)
     phoenix_service('status')
-
-
-  def security_status(self, env):
-    self.put_structured_out({"securityState": "UNSECURED"})
     
   def get_log_folder(self):
     import params
@@ -89,4 +85,4 @@ class PhoenixQueryServer(Script):
     return [status_params.phoenix_pid_file]
 
 if __name__ == "__main__":
-  PhoenixQueryServer().execute()
\ No newline at end of file
+  PhoenixQueryServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
index cd52885..da03cce 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
@@ -143,64 +143,6 @@ class DataNodeDefault(DataNode):
     hdfs_binary = self.get_hdfs_binary()
     # ensure the DataNode has started and rejoined the cluster
     datanode_upgrade.post_upgrade_check(hdfs_binary)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.datanode.keytab.file',
-                         'dfs.datanode.kerberos.principal']
-    props_read_check = ['dfs.datanode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.datanode.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.datanode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.datanode.keytab.file'],
-                                security_params['hdfs-site']['dfs.datanode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
index 4dabdbc..51acc9e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
@@ -66,51 +66,6 @@ class HdfsClientDefault(HdfsClient):
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-client", params.version)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    hdfs_expectations ={}
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'core-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues: # If all validations passed successfully
-        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
-          try:
-            cached_kinit_executor(status_params.kinit_path_local,
-                       status_params.hdfs_user,
-                       status_params.hdfs_user_keytab,
-                       status_params.hdfs_user_principal,
-                       status_params.hostname,
-                       status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          self.put_structured_out({"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class HdfsClientWindows(HdfsClient):
   def install(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
index 9448fa6..7fd8d70 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
@@ -105,63 +105,6 @@ class JournalNodeDefault(JournalNode):
     env.set_params(status_params)
     check_process_status(status_params.journalnode_pid_file)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    props_value_check = None
-    props_empty_check = ['dfs.journalnode.keytab.file',
-                         'dfs.journalnode.kerberos.principal']
-    props_read_check = ['dfs.journalnode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(hdfs_site_expectations)
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.journalnode.kerberos.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.journalnode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.journalnode.kerberos.keytab.file'],
-                                security_params['hdfs-site']['dfs.journalnode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
   def get_log_folder(self):
     import params
     return params.hdfs_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 1347f37..65cd378 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -216,63 +216,6 @@ class NameNodeDefault(NameNode):
             try_sleep=10
     )
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.namenode.kerberos.internal.spnego.principal',
-                         'dfs.namenode.keytab.file',
-                         'dfs.namenode.kerberos.principal']
-    props_read_check = ['dfs.namenode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'hdfs-site' not in security_params
-               or 'dfs.namenode.keytab.file' not in security_params['hdfs-site']
-               or 'dfs.namenode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.namenode.keytab.file'],
-                                security_params['hdfs-site']['dfs.namenode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def rebalancehdfs(self, env):
     import params
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
index 7ba1f96..fa451f4 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
@@ -76,64 +76,6 @@ class NFSGateway(Script):
     env.set_params(status_params)
 
     check_process_status(status_params.nfsgateway_pid_file)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['nfs.keytab.file',
-                         'nfs.kerberos.principal']
-    props_read_check = ['nfs.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                'nfs.keytab.file' not in security_params['hdfs-site'] or
-                'nfs.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['nfs.keytab.file'],
-                                security_params['hdfs-site'][
-                                  'nfs.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
index 0f1f438..1408468 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
@@ -74,66 +74,6 @@ class SNameNodeDefault(SNameNode):
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-secondarynamenode", params.version)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.secondary.namenode.kerberos.internal.spnego.principal',
-                         'dfs.secondary.namenode.keytab.file',
-                         'dfs.secondary.namenode.kerberos.principal']
-    props_read_check = ['dfs.secondary.namenode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.secondary.namenode.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.secondary.namenode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.secondary.namenode.keytab.file'],
-                                security_params['hdfs-site'][
-                                  'dfs.secondary.namenode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
index be0d2ed..ca5f605 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
@@ -119,49 +119,6 @@ class ZkfcSlaveDefault(ZkfcSlave):
     env.set_params(status_params)
     check_process_status(status_params.zkfc_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'core-site.xml': FILE_TYPE_XML})
-    result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      if not result_issues:  # If all validations passed successfully
-        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
-          try:
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hdfs_user,
-                                  status_params.hdfs_user_keytab,
-                                  status_params.hdfs_user_principal,
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          self.put_structured_out(
-            {"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def disable_security(self, env):
     import params
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
index 130c021..78a8f4b 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
@@ -98,64 +98,6 @@ class DataNodeDefault(DataNode):
     # ensure the DataNode has started and rejoined the cluster
     datanode_upgrade.post_upgrade_check(hdfs_binary)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.datanode.keytab.file',
-                         'dfs.datanode.kerberos.principal']
-    props_read_check = ['dfs.datanode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.datanode.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.datanode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.datanode.keytab.file'],
-                                security_params['hdfs-site']['dfs.datanode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
   def get_log_folder(self):
     import params
     return params.hdfs_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
index 4dabdbc..51acc9e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
@@ -66,51 +66,6 @@ class HdfsClientDefault(HdfsClient):
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-client", params.version)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    hdfs_expectations ={}
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'core-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues: # If all validations passed successfully
-        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
-          try:
-            cached_kinit_executor(status_params.kinit_path_local,
-                       status_params.hdfs_user,
-                       status_params.hdfs_user_keytab,
-                       status_params.hdfs_user_principal,
-                       status_params.hostname,
-                       status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          self.put_structured_out({"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class HdfsClientWindows(HdfsClient):
   def install(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py
index 9448fa6..7fd8d70 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py
@@ -105,63 +105,6 @@ class JournalNodeDefault(JournalNode):
     env.set_params(status_params)
     check_process_status(status_params.journalnode_pid_file)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    props_value_check = None
-    props_empty_check = ['dfs.journalnode.keytab.file',
-                         'dfs.journalnode.kerberos.principal']
-    props_read_check = ['dfs.journalnode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(hdfs_site_expectations)
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.journalnode.kerberos.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.journalnode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.journalnode.kerberos.keytab.file'],
-                                security_params['hdfs-site']['dfs.journalnode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
   def get_log_folder(self):
     import params
     return params.hdfs_log_dir


[33/50] [abbrv] ambari git commit: AMBARI-20747 - Upgrade is not taking care of AMBARI-15677 (jonathanhurley)

Posted by ao...@apache.org.
AMBARI-20747 - Upgrade is not taking care of AMBARI-15677 (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/665a2cd9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/665a2cd9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/665a2cd9

Branch: refs/heads/branch-3.0-perf
Commit: 665a2cd9b1747f87204f0406b029fd8186919c9a
Parents: 6b549d3
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Apr 20 16:44:52 2017 -0400
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../stacks/HDP/2.3/upgrades/config-upgrade.xml  |   7 +
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml |   7 +
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml |   5 +
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml |   5 +
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml |   5 +
 .../stacks/HDP/2.3/upgrades/upgrade-2.3.xml     |   6 +
 .../stacks/HDP/2.3/upgrades/upgrade-2.4.xml     |   1 +
 .../stacks/HDP/2.3/upgrades/upgrade-2.5.xml     |   1 +
 .../stacks/HDP/2.3/upgrades/upgrade-2.6.xml     |   1 +
 .../stacks/HDP/2.4/upgrades/config-upgrade.xml  |   6 +
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml |   5 +
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml |   4 +
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml |   6 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.4.xml     |   6 +
 .../stacks/HDP/2.4/upgrades/upgrade-2.5.xml     |   1 +
 .../stacks/HDP/2.4/upgrades/upgrade-2.6.xml     |   1 +
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  | 250 ++++++++++---------
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml |   5 +
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |   4 +
 .../stacks/HDP/2.5/upgrades/upgrade-2.5.xml     |   6 +
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |   1 +
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |   6 +
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |   4 +
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |   1 +
 24 files changed, 220 insertions(+), 124 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index 63cf5f5..ffa8e61 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -386,6 +386,13 @@
             <regex-replace  key="content" find="hadoop.security.log.file=SecurityAuth.audit" replace-with="hadoop.security.log.file=SecurityAuth.audit&#10;log4j.additivity.SecurityLogger=false"/>
             <regex-replace  key="content" find="log4j.additivity.SecurityLogger=true" replace-with="log4j.additivity.SecurityLogger=false"/>
           </definition>
+
+          <definition xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics" summary="Prevent Garbage Collection Heuristics">
+            <type>hadoop-env</type>
+            <replace  key="content" find="-XX:+PrintGCDateStamps -Xms{{namenode_heapsize}}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}}"/>
+            <replace  key="content" find="-XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE}"/>
+          </definition>
+
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
index 3d8041e..e111dbf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
@@ -253,6 +253,11 @@
       <direction>UPGRADE</direction>   <!--  prevent config changes on downgrade -->
       <skippable>true</skippable>  <!-- May fix configuration problems manually -->
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!--YARN-->
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
@@ -292,6 +297,8 @@
       <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus">
         <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/>
       </execute-stage>
+
+
     </group>
 
     <!--

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
index fa06291..6230921 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
@@ -264,6 +264,11 @@
         </task>
       </execute-stage>
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!-- YARN -->
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Calculating Yarn Properties for Spark">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig">

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
index 8b8b9a6..44367dc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
@@ -285,6 +285,11 @@
         </task>
       </execute-stage>
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!-- YARN -->
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Calculating Yarn Properties for Spark">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig">

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
index 2bf6e23..9362ae6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
@@ -297,6 +297,11 @@
         <task xsi:type="configure" id="hdfs_securitylogger_additivity"/>
       </execute-stage>
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!-- YARN -->
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Calculating Yarn Properties for Spark">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig">

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
index 97904bf..19031a8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
@@ -547,6 +547,12 @@
 
     <service name="HDFS">
       <component name="NAMENODE">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+        </pre-upgrade>
+
+        <pre-downgrade/>
+
         <upgrade>
           <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
index fd75e49..8cfaa52 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
@@ -574,6 +574,7 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_4_0_0_namenode_ha_adjustments"/>
           <task xsi:type="configure" id="hdfs_log4j_parameterize" />
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
         
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
index 5318a51..6981955 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
@@ -673,6 +673,7 @@
           <task xsi:type="configure" id="hdp_2_4_0_0_namenode_ha_adjustments"/>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db" />
           <task xsi:type="configure" id="hdfs_log4j_parameterize" />
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index d2e6288..0440dd3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -682,6 +682,7 @@
           <task xsi:type="configure" id="hdfs_log4j_parameterize" />
           <task xsi:type="configure" id="hadoop_env_zkfc_security_opts" />
           <task xsi:type="configure" id="hdfs_securitylogger_additivity" />
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
index 2345d08..f99e764 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
@@ -271,6 +271,12 @@
             <regex-replace  key="content" find="hadoop.security.log.file=SecurityAuth.audit" replace-with="hadoop.security.log.file=SecurityAuth.audit&#10;log4j.additivity.SecurityLogger=false"/>
             <regex-replace  key="content" find="log4j.additivity.SecurityLogger=true" replace-with="log4j.additivity.SecurityLogger=false"/>
           </definition>
+
+          <definition xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics" summary="Prevent Garbage Collection Heuristics">
+            <type>hadoop-env</type>
+            <replace  key="content" find="-XX:+PrintGCDateStamps -Xms{{namenode_heapsize}}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}}"/>
+            <replace  key="content" find="-XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE}"/>
+          </definition>
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
index 13f44fb..5120740 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
@@ -251,6 +251,11 @@
       <direction>UPGRADE</direction>   <!--  prevent config changes on downgrade -->
       <skippable>true</skippable>  <!-- May fix configuration problems manually -->
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!--YARN-->
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
index ea261b9..3a42876 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
@@ -379,6 +379,10 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!-- SQOOP -->
       <execute-stage service="SQOOP" component="SQOOP" title="Apply config changes for Sqoop to remove Atlas Configs">
         <!-- Remove Atlas configs that were incorrectly added to sqoop-site instead of Atlas' application.properties. -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
index de6b8ef..b812547 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
@@ -435,20 +435,22 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db"/>
       </execute-stage>
 
-      <!--HDFS-->
       <execute-stage service="HDFS" component="NAMENODE" title="Parameterizing Hdfs Log4J Properties">
         <task xsi:type="configure" id="hdfs_log4j_parameterize">
           <summary>Updating the Hdfs Log4J properties to include parameterizations</summary>
         </task>
       </execute-stage>
 
-      <!--HDFS-->
       <execute-stage service="HDFS" component="NAMENODE" title="Adding HDFS ZKFC Security ACLs">
         <task xsi:type="configure" id="hadoop_env_zkfc_security_opts">
           <summary>Adding HDFS ZKFC Security ACLs</summary>
         </task>
       </execute-stage>
 
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!--SPARK-->
       <execute-stage service="SPARK" component="SPARK_CLIENT" title="Apply config changes for Spark">
         <task xsi:type="configure" id="hdp_2_5_0_0_spark_yarn_queue">

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
index fba7093..80436bf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
@@ -523,6 +523,12 @@
 
     <service name="HDFS">
       <component name="NAMENODE">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+        </pre-upgrade>
+
+        <pre-downgrade/>
+
         <upgrade>
           <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
index 68efed2..5034627 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
@@ -673,6 +673,7 @@
           <task xsi:type="configure" id="hdp_2_5_0_0_namenode_ha_adjustments"/>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db" />
           <task xsi:type="configure" id="hdfs_log4j_parameterize" />
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index 2ed7962..fbd61ea 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -687,6 +687,7 @@
           <task xsi:type="configure" id="hdfs_log4j_parameterize" />
           <task xsi:type="configure" id="hadoop_env_zkfc_security_opts" />
           <task xsi:type="configure" id="hdfs_securitylogger_additivity" />
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index ad936c2..1c2ceff 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -159,115 +159,121 @@
         </changes>
       </component>
     </service>
-   <service name="OOZIE">
-    <component name="OOZIE_SERVER">
-      <changes>
-        <!-- Oozie Rolling properties for log4j need to be parameterized. -->
-        <definition xsi:type="configure" id="oozie_log4j_parameterize" summary="Parameterizing Oozie Log4J Properties">
-          <type>oozie-log4j</type>
-          <set key="oozie_log_maxhistory" value="720"/>
-          <regex-replace key="content" find="^log4j.appender.oozie.RollingPolicy.MaxHistory=([0-9]+)" replace-with="log4j.appender.oozie.RollingPolicy.MaxHistory={{oozie_log_maxhistory}}"/>
-        </definition>
-      </changes>
-    </component>
-  </service>
-  <service name="YARN">
-    <component name="RESOURCEMANAGER">
-      <changes>
-        <!-- Yarn Rolling properties for log4j need to be parameterized. -->
-        <definition xsi:type="configure" id="yarn_log4j_parameterize" summary="Parameterizing Yarn Log4J Properties">
-          <type>yarn-log4j</type>
-          <set key="yarn_rm_summary_log_max_backup_size" value="256"/>
-          <set key="yarn_rm_summary_log_number_of_backup_files" value="20"/>
-          <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RMSUMMARY.MaxFileSize={{yarn_rm_summary_log_max_backup_size}}MB"/>
-          <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RMSUMMARY.MaxBackupIndex={{yarn_rm_summary_log_number_of_backup_files}}"/>
-        </definition>
-        <definition xsi:type="configure" id="yarn_env_security_opts" summary="Adding YARN Security ACLs">
-          <type>yarn-env</type>
-          <insert key="content" value="{% if rm_security_opts is defined %} YARN_OPTS=&quot;{{rm_security_opts}} $YARN_OPTS&quot; {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
-        </definition>
-        <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_priority_utilization_underutilized_preemption">
-          <type>yarn-site</type>
-          <transfer operation="copy"
-                    from-key="yarn.resourcemanager.scheduler.monitor.enable"
-                    to-key="yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled"
-                    default-value="false"/>
-        </definition>
-        <definition xsi:type="configure" id="yarn_site_retained_log_count" summary="Updating Yarn retained file count for continuous Log Aggregation">
-          <type>yarn-site</type>
-          <set key="yarn.nodemanager.log-aggregation.num-log-files-per-app"
-               value="336" />
-        </definition>
-        <definition xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name">
-          <type>yarn-env</type>
-          <set key="service_check.queue.name" value="default" if-type="yarn-env" if-key="service_check.queue.name" if-key-state="absent"/>
-        </definition>
-        <definition xsi:type="configure" id="hdp_2_6_0_0_ats_scan_interval_default">
-          <type>yarn-site</type>
-          <set key="yarn.timeline-service.entity-group-fs-store.scan-interval-seconds" value="15"
-               if-type="yarn-site" if-key="yarn.timeline-service.entity-group-fs-store.scan-interval-seconds" if-value="60"/>
-        </definition>
-      </changes>
-    </component>
-  </service>
+     <service name="OOZIE">
+      <component name="OOZIE_SERVER">
+        <changes>
+          <!-- Oozie Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="oozie_log4j_parameterize" summary="Parameterizing Oozie Log4J Properties">
+            <type>oozie-log4j</type>
+            <set key="oozie_log_maxhistory" value="720"/>
+            <regex-replace key="content" find="^log4j.appender.oozie.RollingPolicy.MaxHistory=([0-9]+)" replace-with="log4j.appender.oozie.RollingPolicy.MaxHistory={{oozie_log_maxhistory}}"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+    <service name="YARN">
+      <component name="RESOURCEMANAGER">
+        <changes>
+          <!-- Yarn Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="yarn_log4j_parameterize" summary="Parameterizing Yarn Log4J Properties">
+            <type>yarn-log4j</type>
+            <set key="yarn_rm_summary_log_max_backup_size" value="256"/>
+            <set key="yarn_rm_summary_log_number_of_backup_files" value="20"/>
+            <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RMSUMMARY.MaxFileSize={{yarn_rm_summary_log_max_backup_size}}MB"/>
+            <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RMSUMMARY.MaxBackupIndex={{yarn_rm_summary_log_number_of_backup_files}}"/>
+          </definition>
+          <definition xsi:type="configure" id="yarn_env_security_opts" summary="Adding YARN Security ACLs">
+            <type>yarn-env</type>
+            <insert key="content" value="{% if rm_security_opts is defined %} YARN_OPTS=&quot;{{rm_security_opts}} $YARN_OPTS&quot; {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
+          </definition>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_priority_utilization_underutilized_preemption">
+            <type>yarn-site</type>
+            <transfer operation="copy"
+                      from-key="yarn.resourcemanager.scheduler.monitor.enable"
+                      to-key="yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled"
+                      default-value="false"/>
+          </definition>
+          <definition xsi:type="configure" id="yarn_site_retained_log_count" summary="Updating Yarn retained file count for continuous Log Aggregation">
+            <type>yarn-site</type>
+            <set key="yarn.nodemanager.log-aggregation.num-log-files-per-app"
+                 value="336" />
+          </definition>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name">
+            <type>yarn-env</type>
+            <set key="service_check.queue.name" value="default" if-type="yarn-env" if-key="service_check.queue.name" if-key-state="absent"/>
+          </definition>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_ats_scan_interval_default">
+            <type>yarn-site</type>
+            <set key="yarn.timeline-service.entity-group-fs-store.scan-interval-seconds" value="15"
+                 if-type="yarn-site" if-key="yarn.timeline-service.entity-group-fs-store.scan-interval-seconds" if-value="60"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
 
-  <service name="MAPREDUCE2">
-    <component name="MAPREDUCE2_CLIENT">
-      <changes>
-        <definition xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
-          <type>mapred-site</type>
-          <set key="mapreduce.job.queuename" value="default" if-type="mapred-site" if-key="mapreduce.job.queuename" if-key-state="absent"/>
-        </definition>
-      </changes>
-    </component>
-  </service>
+    <service name="MAPREDUCE2">
+      <component name="MAPREDUCE2_CLIENT">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
+            <type>mapred-site</type>
+            <set key="mapreduce.job.queuename" value="default" if-type="mapred-site" if-key="mapreduce.job.queuename" if-key-state="absent"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
 
-  <service name="HDFS">
-    <component name="NAMENODE">
+    <service name="HDFS">
+      <component name="NAMENODE">
+        <changes>
+          <!-- HDFS Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="hdfs_log4j_parameterize" summary="Parameterizing Hdfs Log4J Properties">
+            <type>hdfs-log4j</type>
+            <set key="hadoop_log_max_backup_size" value="256"/>
+            <set key="hadoop_log_number_of_backup_files" value="10"/>
+            <set key="hadoop_security_log_max_backup_size" value="256"/>
+            <set key="hadoop_security_log_number_of_backup_files" value="20"/>
+            <regex-replace  key="content" find="log4j.appender.RFA.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RFA.MaxFileSize={{hadoop_log_max_backup_size}}MB"/>
+            <regex-replace  key="content" find="log4j.appender.RFA.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RFA.MaxBackupIndex={{hadoop_log_number_of_backup_files}}"/>
+            <regex-replace  key="content" find="hadoop.security.log.maxfilesize=([0-9]+)MB" replace-with="hadoop.security.log.maxfilesize={{hadoop_security_log_max_backup_size}}MB"/>
+            <regex-replace  key="content" find="hadoop.security.log.maxbackupindex=([0-9]+)" replace-with="hadoop.security.log.maxbackupindex={{hadoop_security_log_number_of_backup_files}}"/>
+          </definition>
+          <definition xsi:type="configure" id="hadoop_env_zkfc_security_opts" summary="Adding HDFS ZKFC Security ACLs">
+            <type>hadoop-env</type>
+            <insert key="content" value="{% if hadoop_zkfc_opts is defined %} export HADOOP_ZKFC_OPTS=&quot;{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS&quot; {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
+          </definition>
+          <definition xsi:type="configure" id="hdfs_securitylogger_additivity" summary="Set additivity of SecurityLogger to false">
+            <type>hdfs-log4j</type>
+            <regex-replace  key="content" find="hadoop.security.log.file=SecurityAuth.audit" replace-with="hadoop.security.log.file=SecurityAuth.audit&#10;log4j.additivity.SecurityLogger=false"/>
+            <regex-replace  key="content" find="log4j.additivity.SecurityLogger=true" replace-with="log4j.additivity.SecurityLogger=false"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics" summary="Prevent Garbage Collection Heuristics">
+            <type>hadoop-env</type>
+            <replace  key="content" find="-XX:+PrintGCDateStamps -Xms{{namenode_heapsize}}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}}"/>
+            <replace  key="content" find="-XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE}"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+    <service name="HBASE">
+      <component name="HBASE_MASTER">
       <changes>
-        <!-- HDFS Rolling properties for log4j need to be parameterized. -->
-        <definition xsi:type="configure" id="hdfs_log4j_parameterize" summary="Parameterizing Hdfs Log4J Properties">
-          <type>hdfs-log4j</type>
-          <set key="hadoop_log_max_backup_size" value="256"/>
-          <set key="hadoop_log_number_of_backup_files" value="10"/>
-          <set key="hadoop_security_log_max_backup_size" value="256"/>
-          <set key="hadoop_security_log_number_of_backup_files" value="20"/>
-          <regex-replace  key="content" find="log4j.appender.RFA.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RFA.MaxFileSize={{hadoop_log_max_backup_size}}MB"/>
-          <regex-replace  key="content" find="log4j.appender.RFA.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RFA.MaxBackupIndex={{hadoop_log_number_of_backup_files}}"/>
-          <regex-replace  key="content" find="hadoop.security.log.maxfilesize=([0-9]+)MB" replace-with="hadoop.security.log.maxfilesize={{hadoop_security_log_max_backup_size}}MB"/>
-          <regex-replace  key="content" find="hadoop.security.log.maxbackupindex=([0-9]+)" replace-with="hadoop.security.log.maxbackupindex={{hadoop_security_log_number_of_backup_files}}"/>
-        </definition>
-        <definition xsi:type="configure" id="hadoop_env_zkfc_security_opts" summary="Adding HDFS ZKFC Security ACLs">
-          <type>hadoop-env</type>
-          <insert key="content" value="{% if hadoop_zkfc_opts is defined %} export HADOOP_ZKFC_OPTS=&quot;{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS&quot; {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
-        </definition>
-        <definition xsi:type="configure" id="hdfs_securitylogger_additivity" summary="Set additivity of SecurityLogger to false">
-          <type>hdfs-log4j</type>
-          <regex-replace  key="content" find="hadoop.security.log.file=SecurityAuth.audit" replace-with="hadoop.security.log.file=SecurityAuth.audit&#10;log4j.additivity.SecurityLogger=false"/>
-          <regex-replace  key="content" find="log4j.additivity.SecurityLogger=true" replace-with="log4j.additivity.SecurityLogger=false"/>
+      <!-- HBase Rolling properties for log4j need to be parameterized. -->
+        <definition xsi:type="configure" id="hbase_log4j_parameterize" summary="Parameterizing HBase Log4J Properties">
+            <type>hbase-log4j</type>
+            <set key="hbase_log_maxfilesize" value="256"/>
+            <set key="hbase_log_maxbackupindex" value="20"/>
+            <set key="hbase_security_log_maxfilesize" value="256"/>
+            <set key="hbase_security_log_maxbackupindex" value="20"/>
+            <regex-replace key="content" find="hbase.log.maxfilesize=([0-9]+)MB" replace-with="hbase.log.maxfilesize={{hbase_log_maxfilesize}}MB"/>
+            <regex-replace key="content" find="hbase.log.maxbackupindex=([0-9]+)" replace-with="hbase.log.maxbackupindex={{hbase_log_maxbackupindex}}"/>
+            <regex-replace key="content" find="hbase.security.log.maxfilesize=([0-9]+)MB" replace-with="hbase.security.log.maxfilesize={{hbase_security_log_maxfilesize}}MB"/>
+            <regex-replace key="content" find="hbase.security.log.maxbackupindex=([0-9]+)" replace-with="hbase.security.log.maxbackupindex={{hbase_security_log_maxbackupindex}}"/>
         </definition>
       </changes>
-    </component>
-  </service>
-  <service name="HBASE">
-    <component name="HBASE_MASTER">
-    <changes>
-    <!-- HBase Rolling properties for log4j need to be parameterized. -->
-      <definition xsi:type="configure" id="hbase_log4j_parameterize" summary="Parameterizing HBase Log4J Properties">
-          <type>hbase-log4j</type>
-          <set key="hbase_log_maxfilesize" value="256"/>
-          <set key="hbase_log_maxbackupindex" value="20"/>
-          <set key="hbase_security_log_maxfilesize" value="256"/>
-          <set key="hbase_security_log_maxbackupindex" value="20"/>
-          <regex-replace key="content" find="hbase.log.maxfilesize=([0-9]+)MB" replace-with="hbase.log.maxfilesize={{hbase_log_maxfilesize}}MB"/>
-          <regex-replace key="content" find="hbase.log.maxbackupindex=([0-9]+)" replace-with="hbase.log.maxbackupindex={{hbase_log_maxbackupindex}}"/>
-          <regex-replace key="content" find="hbase.security.log.maxfilesize=([0-9]+)MB" replace-with="hbase.security.log.maxfilesize={{hbase_security_log_maxfilesize}}MB"/>
-          <regex-replace key="content" find="hbase.security.log.maxbackupindex=([0-9]+)" replace-with="hbase.security.log.maxbackupindex={{hbase_security_log_maxbackupindex}}"/>
-      </definition>
-    </changes>
-    </component>
-  </service>
-  <service name="FALCON">
+      </component>
+    </service>
+    <service name="FALCON">
       <component name="FALCON_SERVER">
         <changes>
           <definition xsi:type="configure" id="falcon_log4j_parameterize" summary="Parameterizing Falcon Log4J Properties">
@@ -371,24 +377,24 @@
     </component>
     </service>
     <service name="KNOX">
-    <component name="KNOX_GATEWAY">
-    <changes>
-      <definition xsi:type="configure" id="knox_gateway_log4j_parameterize" summary="Parameterizing Knox Gateway Log4J Properties">
-        <type>gateway-log4j</type>
-        <set key="knox_gateway_log_maxfilesize" value="256"/>
-        <set key="knox_gateway_log_maxbackupindex" value="20"/>
-        <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_gateway_log_maxfilesize}}MB"/>
-        <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_gateway_log_maxbackupindex}}"/>
-        </definition>
-      <definition xsi:type="configure" id="knox_ldap_log4j_parameterize" summary="Parameterizing Knox Ldap Log4J Properties">
-        <type>ldap-log4j</type>
-        <set key="knox_ldap_log_maxfilesize" value="256"/>
-        <set key="knox_ldap_log_maxbackupindex" value="20"/>
-        <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_ldap_log_maxfilesize}}MB"/>
-        <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_ldap_log_maxbackupindex}}"/>
-      </definition>
-    </changes>
-    </component>
+      <component name="KNOX_GATEWAY">
+        <changes>
+          <definition xsi:type="configure" id="knox_gateway_log4j_parameterize" summary="Parameterizing Knox Gateway Log4J Properties">
+            <type>gateway-log4j</type>
+            <set key="knox_gateway_log_maxfilesize" value="256"/>
+            <set key="knox_gateway_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_gateway_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_gateway_log_maxbackupindex}}"/>
+            </definition>
+          <definition xsi:type="configure" id="knox_ldap_log4j_parameterize" summary="Parameterizing Knox Ldap Log4J Properties">
+            <type>ldap-log4j</type>
+            <set key="knox_ldap_log_maxfilesize" value="256"/>
+            <set key="knox_ldap_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_ldap_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_ldap_log_maxbackupindex}}"/>
+          </definition>
+        </changes>
+      </component>
     </service>
 
     <service name="PIG">

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
index d5387c6..be8b750 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
@@ -271,6 +271,11 @@
       <direction>UPGRADE</direction>   <!--  prevent config changes on downgrade -->
       <skippable>true</skippable>  <!-- May fix configuration problems manually -->
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!--YARN-->
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index d46353e..d822778 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -281,6 +281,10 @@
         <task xsi:type="configure" id="hdfs_securitylogger_additivity"/>
       </execute-stage>
 
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!--YARN-->
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
index 1af96dd..210c4e2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
@@ -573,6 +573,12 @@
 
     <service name="HDFS">
       <component name="NAMENODE">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+        </pre-upgrade>
+
+        <pre-downgrade/>
+
         <upgrade>
           <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 94aab48..9a06dd0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -616,6 +616,7 @@
           <task xsi:type="configure" id="hdfs_log4j_parameterize" />
           <task xsi:type="configure" id="hadoop_env_zkfc_security_opts" />
           <task xsi:type="configure" id="hdfs_securitylogger_additivity" />
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
         <pre-downgrade />
         <upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index ceed59b..87646fa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -38,6 +38,12 @@
             <set key="ranger.plugin.hdfs.ambari.cluster.name" value="{{cluster_name}}"
               if-type="ranger-hdfs-plugin-properties" if-key="ranger-hdfs-plugin-enabled" if-key-state="present"/>
           </definition>
+
+          <definition xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics" summary="Prevent Garbage Collection Heuristics">
+            <type>hadoop-env</type>
+            <replace  key="content" find="-XX:+PrintGCDateStamps -Xms{{namenode_heapsize}}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}}"/>
+            <replace  key="content" find="-XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE}"/>
+          </definition>
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index ce07f7a..7f0c818 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -294,6 +294,10 @@
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_hdfs_plugin_cluster_name"/>
       </execute-stage>
 
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Ranger Hive plugin">
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_hive_plugin_cluster_name"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/665a2cd9/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index 5b8f53b..7279870 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -607,6 +607,7 @@
       <component name="NAMENODE">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_6_maint_ranger_hdfs_plugin_cluster_name"/>
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->
         <upgrade>


[15/50] [abbrv] ambari git commit: AMBARI-20799 - Ambari fails to upgrade from 2.4.2.0 to 2.5.1.0 during schema upgrade with NPE (jonathanhurley)

Posted by ao...@apache.org.
AMBARI-20799 - Ambari fails to upgrade from 2.4.2.0 to 2.5.1.0 during schema upgrade with NPE (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c77e4d22
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c77e4d22
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c77e4d22

Branch: refs/heads/branch-3.0-perf
Commit: c77e4d2255c35afa002e5adc3298d6e81ccd742b
Parents: 4d4e767
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Apr 20 08:58:15 2017 -0400
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../server/upgrade/AbstractUpgradeCatalog.java  |  3 --
 .../server/upgrade/UpgradeCatalog212.java       |  3 --
 .../server/upgrade/UpgradeCatalog251.java       | 13 ++++++++
 .../server/upgrade/UpgradeCatalog212Test.java   | 34 +++-----------------
 4 files changed, 18 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c77e4d22/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index 9a78ea1..6c59784 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -150,9 +150,6 @@ public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
     registerCatalog(this);
   }
 
-  protected AbstractUpgradeCatalog() {
-  }
-
   /**
    * Every subclass needs to register itself
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/c77e4d22/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
index 20f0d72..90854dd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
@@ -87,9 +87,6 @@ public class UpgradeCatalog212 extends AbstractUpgradeCatalog {
     daoUtils = injector.getInstance(DaoUtils.class);
   }
 
-  protected UpgradeCatalog212() {
-  }
-
   // ----- UpgradeCatalog ----------------------------------------------------
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/c77e4d22/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
index a5f38a9..6f8f2a6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
@@ -22,6 +22,9 @@ import java.sql.SQLException;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
 
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+
 /**
  * The {@link UpgradeCatalog251} upgrades Ambari from 2.5.0 to 2.5.1.
  */
@@ -31,6 +34,16 @@ public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
   static final String HRC_IS_BACKGROUND_COLUMN = "is_background";
 
   /**
+   * Constructor.
+   *
+   * @param injector
+   */
+  @Inject
+  public UpgradeCatalog251(Injector injector) {
+    super(injector);
+  }
+
+  /**
    * {@inheritDoc}
    */
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/c77e4d22/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
index 3743b53..896602b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
@@ -52,8 +52,6 @@ import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
@@ -69,6 +67,7 @@ import org.easymock.MockType;
 import org.easymock.TestSubject;
 import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 
@@ -114,15 +113,11 @@ public class UpgradeCatalog212Test {
   private ResultSet resultSet;
 
   @TestSubject
-  private UpgradeCatalog212 testSubject = new UpgradeCatalog212();
+  private UpgradeCatalog212 testSubject = new UpgradeCatalog212(
+      EasyMock.createNiceMock(Injector.class));
 
-
-  private UpgradeCatalogHelper upgradeCatalogHelper;
-  private StackEntity desiredStackEntity;
-
-
-  // This method to be called only when an IOC is needed - typically by functional tests
-  public void setupIoCContext() {
+  @Before
+  public void setUp() {
     reset(entityManagerProvider);
     expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
     replay(entityManagerProvider);
@@ -130,12 +125,8 @@ public class UpgradeCatalog212Test {
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
     injector.getInstance(GuiceJpaInitializer.class);
 
-    upgradeCatalogHelper = injector.getInstance(UpgradeCatalogHelper.class);
     // inject AmbariMetaInfo to ensure that stacks get populated in the DB
     injector.getInstance(AmbariMetaInfo.class);
-    // load the stack entity
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    desiredStackEntity = stackDAO.find("HDP", "2.2.0");
   }
 
   @After
@@ -148,7 +139,6 @@ public class UpgradeCatalog212Test {
 
   @Test
   public void testFinilizeTopologyDDL() throws Exception {
-    setupIoCContext();
     final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
     dbAccessor.dropColumn(eq("topology_request"), eq("cluster_name"));
     dbAccessor.setColumnNullable(eq("topology_request"), eq("cluster_id"), eq(false));
@@ -173,7 +163,6 @@ public class UpgradeCatalog212Test {
 
   @Test
   public void testExecuteDDLUpdates() throws Exception {
-    setupIoCContext();
     final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
     Configuration configuration = createNiceMock(Configuration.class);
     Connection connection = createNiceMock(Connection.class);
@@ -211,7 +200,6 @@ public class UpgradeCatalog212Test {
 
   @Test
   public void testExecuteDMLUpdates() throws Exception {
-    setupIoCContext();
     Method addMissingConfigs = UpgradeCatalog212.class.getDeclaredMethod("addMissingConfigs");
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
 
@@ -235,7 +223,6 @@ public class UpgradeCatalog212Test {
 
   @Test
   public void testUpdateHBaseAdnClusterConfigs() throws Exception {
-    setupIoCContext();
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
     final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
@@ -302,7 +289,6 @@ public class UpgradeCatalog212Test {
 
   @Test
   public void testUpdateHBaseAdnClusterConfigsTrue() throws Exception {
-    setupIoCContext();
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
     final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
@@ -355,7 +341,6 @@ public class UpgradeCatalog212Test {
 
   @Test
   public void testUpdateHBaseAdnClusterConfigsNoHBaseEnv() throws Exception {
-    setupIoCContext();
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
     final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
@@ -397,7 +382,6 @@ public class UpgradeCatalog212Test {
 
   @Test
   public void testUpdateHBaseAdnClusterConfigsNoOverrideHBaseUID() throws Exception {
-    setupIoCContext();
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
     final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
@@ -415,9 +399,6 @@ public class UpgradeCatalog212Test {
     expect(mockHbaseEnv.getProperties()).andReturn(propertiesHbaseEnv).once();
     final Config mockClusterEnv = easyMockSupport.createNiceMock(Config.class);
 
-    final Map<String, String> propertiesExpectedHbaseEnv = new HashMap<String, String>() {{
-      put("hbase_user", "hbase");
-    }};
     final Map<String, String> propertiesExpectedClusterEnv = new HashMap<String, String>() {{
       put("override_uid", "false");
     }};
@@ -451,17 +432,14 @@ public class UpgradeCatalog212Test {
 
   @Test
   public void testUpdateHiveConfigs() throws Exception {
-    setupIoCContext();
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
     final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
 
     final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
     final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Config mockHiveEnv = easyMockSupport.createNiceMock(Config.class);
     final Config mockHiveSite = easyMockSupport.createNiceMock(Config.class);
 
-    final Map<String, String> propertiesExpectedHiveEnv = new HashMap<>();
     final Map<String, String> propertiesExpectedHiveSite = new HashMap<String, String>() {{
       put("hive.heapsize", "512");
       put("hive.server2.custom.authentication.class", "");
@@ -498,7 +476,6 @@ public class UpgradeCatalog212Test {
 
   @Test
   public void testUpdateOozieConfigs() throws Exception {
-    setupIoCContext();
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
     final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
@@ -539,7 +516,6 @@ public class UpgradeCatalog212Test {
 
   @Test
   public void testUpdateHiveEnvContent() throws Exception {
-    setupIoCContext();
     final Injector mockInjector = Guice.createInjector(new AbstractModule() {
       @Override
       protected void configure() {


[13/50] [abbrv] ambari git commit: AMBARI-20778. Add NIFI StackFeatures to HDP StackFeatures. (mradhakrishnan)

Posted by ao...@apache.org.
AMBARI-20778. Add NIFI StackFeatures to HDP StackFeatures. (mradhakrishnan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d853ad83
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d853ad83
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d853ad83

Branch: refs/heads/branch-3.0-perf
Commit: d853ad8366ee150aca0a7ee883b01edf98724cd4
Parents: 8d0ec08
Author: Madhuvanthi Radhakrishnan <mr...@hortonworks.com>
Authored: Wed Apr 19 13:43:31 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../stacks/HDP/2.0.6/properties/stack_features.json      | 11 +++++++++++
 1 file changed, 11 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d853ad83/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index 34f3c07..bbd8bfc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -397,6 +397,17 @@
       "name": "ranger_kms_ssl",
       "description": "Ranger KMS SSL properties in ambari stack",
       "min_version": "2.6.0.0"
+    },
+    {
+      "name": "nifi_encrypt_config",
+      "description": "Encrypt sensitive properties written to nifi property file",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "toolkit_config_update",
+      "description": "Support separate input and output for toolkit configuration",
+      "min_version": "2.6.0.0"
     }
+
   ]
 }


[11/50] [abbrv] ambari git commit: AMBARI-20765. Installer wizard: "Register Hosts" page has delete icon under action column is invisible. (Ishan Bhatt via Jaimin)

Posted by ao...@apache.org.
AMBARI-20765. Installer wizard: "Register Hosts" page has delete icon under action column is invisible. (Ishan Bhatt via Jaimin)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/92ac66ff
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/92ac66ff
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/92ac66ff

Branch: refs/heads/branch-3.0-perf
Commit: 92ac66ff26a5f07c37895e04c4f78066b1cdd75e
Parents: ebb24be
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Wed Apr 19 10:09:09 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/styles/wizard.less         | 3 +++
 ambari-web/app/templates/wizard/step3.hbs | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/92ac66ff/ambari-web/app/styles/wizard.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/wizard.less b/ambari-web/app/styles/wizard.less
index 45d9e28..130896dd 100644
--- a/ambari-web/app/styles/wizard.less
+++ b/ambari-web/app/styles/wizard.less
@@ -177,6 +177,9 @@
         float:left;
         white-space: nowrap;
       }
+      #display-action {
+        visibility:visible;
+      }
     }
     #warningsSection {
       margin: 0px 10px;

http://git-wip-us.apache.org/repos/asf/ambari/blob/92ac66ff/ambari-web/app/templates/wizard/step3.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step3.hbs b/ambari-web/app/templates/wizard/step3.hbs
index 2a96379..e00012c 100644
--- a/ambari-web/app/templates/wizard/step3.hbs
+++ b/ambari-web/app/templates/wizard/step3.hbs
@@ -96,7 +96,7 @@
                       </a>
                     </td>
                     <td class="step3-table-action" {{QAAttr "confirm-hosts-table-body-cell"}}>
-                      <a class="action" href="#" {{action remove target="view"}} {{bindAttr disabled="isBackDisabled"}} {{QAAttr "remove-host-button"}}>
+                      <a class="action" id="display-action" href="#" {{action remove target="view"}} {{bindAttr disabled="isBackDisabled"}} {{QAAttr "remove-host-button"}}>
                         <i class="glyphicon glyphicon-trash" {{translateAttr title="common.remove"}}></i>
                       </a>
                     </td>


[35/50] [abbrv] ambari git commit: AMBARI-20733. /var/log/krb5kdc.log is growing rapidly on the KDC server (echekanskiy)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
index 5730783..530d1d9 100644
--- a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
+++ b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
@@ -266,116 +266,6 @@ class TestAppTimelineServer(RMFTestCase):
                               group = 'hadoop',
                               )
 
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'yarn-site': {
-        'yarn.timeline-service.keytab': '/path/to/applicationtimeline/keytab',
-        'yarn.timeline-service.principal': 'applicationtimeline_principal',
-        'yarn.timeline-service.http-authentication.kerberos.keytab': 'path/to/timeline/kerberos/keytab',
-        'yarn.timeline-service.http-authentication.kerberos.principal': 'timeline_principal'
-      }
-    }
-    result_issues = []
-    props_value_check = {"yarn.timeline-service.enabled": "true",
-                         "yarn.timeline-service.http-authentication.type": "kerberos",
-                         "yarn.acl.enable": "true"}
-    props_empty_check = ["yarn.timeline-service.principal",
-                         "yarn.timeline-service.keytab",
-                         "yarn.timeline-service.http-authentication.kerberos.principal",
-                         "yarn.timeline-service.http-authentication.kerberos.keytab"]
-
-    props_read_check = ["yarn.timeline-service.keytab",
-                        "yarn.timeline-service.http-authentication.kerberos.keytab"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
-                       classname="ApplicationTimelineServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('yarn-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['yarn-env']['yarn_user'],
-                                                  security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.keytab'],
-                                                  security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
-                         classname="ApplicationTimelineServer",
-                         command="security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains yarn-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
-                       classname="ApplicationTimelineServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'yarn-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
-                       classname="ApplicationTimelineServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
-                       classname="ApplicationTimelineServer",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   @patch.object(resource_management.libraries.functions, "get_stack_version", new = MagicMock(return_value='2.3.0.0-1234'))
   def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py b/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
index e0118de..32b5d70 100644
--- a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
+++ b/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
@@ -128,108 +128,6 @@ class TestKnoxGateway(RMFTestCase):
     )
     self.assertNoMoreResources()
 
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock,
-                           validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      "krb5JAASLogin":
-        {
-          'keytab': "/path/to/keytab",
-          'principal': "principal"
-        },
-      "gateway-site" : {
-        "gateway.hadoop.kerberos.secured" : "true"
-      }
-    }
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
-                       classname = "KnoxGateway",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    self.assertTrue(build_exp_mock.call_count, 2)
-    build_exp_mock.assert_called_with('gateway-site', {"gateway.hadoop.kerberos.secured": "true"}, None, None)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 1)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['knox-env']['knox_user'],
-                                                  security_params['krb5JAASLogin']['keytab'],
-                                                  security_params['krb5JAASLogin']['principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
-                         classname = "KnoxGateway",
-                         command="security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains krb5JAASLogin
-    empty_security_params = {"krb5JAASLogin" : {}}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
-                       classname = "KnoxGateway",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file and principal are not set."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {'krb5JAASLogin': "Something bad happened"}
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
-                       classname = "KnoxGateway",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
-                       classname = "KnoxGateway",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   @patch("os.path.isdir")
   def test_pre_upgrade_restart(self, isdir_mock):
     isdir_mock.return_value = True

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a00812a/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py b/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py
index cd2fac8..539bef5 100644
--- a/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py
+++ b/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py
@@ -184,45 +184,3 @@ class TestAtlasServer(RMFTestCase):
     self.assertResourceCalled('File', '/usr/hdp/current/atlas-server/conf/hdfs-site.xml',action = ['delete'],)
 
     self.assertNoMoreResources()
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-
-    security_params = {
-      'atlas-application': {
-        'atlas.authentication.keytab': '/etc/security/keytabs/atlas.service.keytab',
-        'atlas.authentication.method.file': 'true',
-        'atlas.authentication.method.kerberos': 'true',
-        'atlas.authentication.method.kerberos.keytab': '/etc/security/keytabs/spnego.service.keytab',
-        'atlas.authentication.method.kerberos.principal': 'HTTP/_HOST@EXAMPLE.COM',
-        'atlas.authentication.principal': 'atlas/_HOST@EXAMPLE.COM'
-      }
-    }
-    result_issues = []
-    props_value_check = {'atlas.authentication.method.kerberos': 'true',
-                         'atlas.solr.kerberos.enable': 'true'}
-    props_empty_check = ['atlas.authentication.principal',
-                         'atlas.authentication.keytab',
-                         'atlas.authentication.method.kerberos.principal',
-                         'atlas.authentication.method.kerberos.keytab']
-    props_read_check = ['atlas.authentication.keytab',
-                        'atlas.authentication.method.kerberos.keytab']
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
-                       classname = "MetadataServer",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    build_exp_mock.assert_called_with('atlas-application', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-
-    self.assertNoMoreResources()


[21/50] [abbrv] ambari git commit: AMBARI-20813. Convert ambari-web build process to use yarn package manager to fix dependencies and to make the process faster. (yusaku)

Posted by ao...@apache.org.
AMBARI-20813. Convert ambari-web build process to use yarn package manager to fix dependencies and to make the process faster. (yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/78048fcf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/78048fcf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/78048fcf

Branch: refs/heads/branch-3.0-perf
Commit: 78048fcfa9a1dd1cf5a98cf773d51ef69eff6db0
Parents: dfb3449
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Thu Apr 20 17:56:09 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 ambari-web/pom.xml | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/78048fcf/ambari-web/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-web/pom.xml b/ambari-web/pom.xml
index 895d2f7..2f6d571 100644
--- a/ambari-web/pom.xml
+++ b/ambari-web/pom.xml
@@ -126,29 +126,35 @@
       <plugin>
         <groupId>com.github.eirslett</groupId>
         <artifactId>frontend-maven-plugin</artifactId>
-        <version>1.3</version>
+        <version>1.4</version>
         <configuration>
           <nodeVersion>v4.5.0</nodeVersion>
-          <npmVersion>2.15.0</npmVersion>
+          <yarnVersion>v0.23.2</yarnVersion>
           <workingDirectory>${basedir}</workingDirectory>
           <npmInheritsProxyConfigFromMaven>false</npmInheritsProxyConfigFromMaven>
+          <!-- setting npm_config_tmp environment variable is a workaround for 
+               https://github.com/Medium/phantomjs/issues/673 -->
+          <environmentVariables>
+            <npm_config_tmp>/tmp/npm_config_tmp</npm_config_tmp>
+          </environmentVariables>
         </configuration>
         <executions>
           <execution>
-            <id>install node and npm</id>
+            <id>install node and yarn</id>
             <phase>generate-sources</phase>
             <goals>
-              <goal>install-node-and-npm</goal>
+              <goal>install-node-and-yarn</goal>
             </goals>
           </execution>
           <execution>
-            <id>npm install</id>
+            <id>yarn install</id>
             <phase>generate-sources</phase>
             <goals>
-              <goal>npm</goal>
+              <goal>yarn</goal>
             </goals>
             <configuration>
               <arguments>install</arguments>
+              <arguments>--ignore-engines</arguments>
             </configuration>
           </execution>
         </executions>
@@ -267,6 +273,7 @@
             <exclude>node_modules/**</exclude>
             <exclude>node/**</exclude>
             <exclude>npm-debug.log</exclude>
+            <exclude>yarn.lock</exclude>
           </excludes>
         </configuration>
         <executions>


[16/50] [abbrv] ambari git commit: AMBARI-20798 Do not run Log Search service check if there is no portal in the cluster. (mgergely)

Posted by ao...@apache.org.
AMBARI-20798 Do not run Log Search service check if there is no portal in the cluster. (mgergely)

Change-Id: I98e176ce49cde3943c7ece2dce4b20bad07d4da9


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9f113f96
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9f113f96
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9f113f96

Branch: refs/heads/branch-3.0-perf
Commit: 9f113f96cedabc49d51be62f90460a91fb57d1c6
Parents: 7b61d0c
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Fri Apr 21 13:39:14 2017 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../LOGSEARCH/0.5.0/package/scripts/service_check.py        | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9f113f96/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/service_check.py
index c81268c..b794036 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/service_check.py
@@ -27,9 +27,12 @@ class LogSearchServiceCheck(Script):
     env.set_params(params)
 
     try:
-      Execute(params.smoke_logsearch_cmd, user=params.logsearch_user,
-              tries=15, try_sleep=5, timeout=10)
-      Logger.info('Log Search Server up and running')
+      if params.logsearch_server_host:
+        Execute(params.smoke_logsearch_cmd, user=params.logsearch_user,
+                tries=15, try_sleep=5, timeout=10)
+        Logger.info('Log Search Server up and running')
+      else:
+        Logger.info('No portal is installed on the cluster thus no service check is required')
     except:
       Logger.error('Log Search Server not running')
       raise


[27/50] [abbrv] ambari git commit: AMBARI-20792: Atlas admin username and password should be configurable through ambari (Vishal Suvagia via mugdha)

Posted by ao...@apache.org.
AMBARI-20792: Atlas admin username and password should be configurable through ambari (Vishal Suvagia via mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2049199b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2049199b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2049199b

Branch: refs/heads/branch-3.0-perf
Commit: 2049199b59a6d0581ace945c986580621ee33696
Parents: 8322ed5
Author: Vishal Suvagia <vi...@yahoo.com>
Authored: Thu Apr 20 10:36:26 2017 +0530
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../ATLAS/0.1.0.2.3/package/scripts/metadata.py | 14 ++++++-
 .../ATLAS/0.1.0.2.3/package/scripts/params.py   |  3 ++
 .../services/ATLAS/configuration/atlas-env.xml  | 41 ++++++++++++++++++++
 3 files changed, 56 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2049199b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
index c25445c..36c4598 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
@@ -18,6 +18,7 @@ limitations under the License.
 
 """
 import os
+import hashlib
 
 from resource_management import Package
 from resource_management import StackFeature
@@ -31,11 +32,13 @@ from resource_management.libraries.functions.stack_features import check_stack_f
 from resource_management.libraries.resources.properties_file import PropertiesFile
 from resource_management.libraries.resources.template_config import TemplateConfig
 from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.resources.modify_properties_file import ModifyPropertiesFile
 
 
 def metadata(type='server'):
     import params
-    
+
     # Needed by both Server and Client
     Directory(params.conf_dir,
               mode=0755,
@@ -97,7 +100,14 @@ def metadata(type='server'):
            mode=0755,
            content=InlineTemplate(params.metadata_env_content)
       )
- 
+
+      if not is_empty(params.atlas_admin_username) and not is_empty(params.atlas_admin_password):
+        psswd_output = hashlib.sha256(params.atlas_admin_password).hexdigest()
+        ModifyPropertiesFile(format("{conf_dir}/users-credentials.properties"),
+            properties = {format('{atlas_admin_username}') : format('ROLE_ADMIN::{psswd_output}')},
+            owner = params.metadata_user
+        )
+
       files_to_chown = [format("{conf_dir}/policy-store.txt"), format("{conf_dir}/users-credentials.properties")]
       for file in files_to_chown:
         if os.path.exists(file):

http://git-wip-us.apache.org/repos/asf/ambari/blob/2049199b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
index bf4848b..d26df33 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
@@ -412,3 +412,6 @@ if stack_supports_atlas_ranger_plugin and enable_ranger_atlas:
     'type': 'atlas',
     }
 # ranger atlas plugin section end
+# atlas admin login username password
+atlas_admin_username = config['configurations']['atlas-env']['atlas.admin.username']
+atlas_admin_password = config['configurations']['atlas-env']['atlas.admin.password']

http://git-wip-us.apache.org/repos/asf/ambari/blob/2049199b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/atlas-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/atlas-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/atlas-env.xml
new file mode 100644
index 0000000..d54cf15
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/atlas-env.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>atlas.admin.username</name>
+    <display-name>Admin username</display-name>
+    <description>Admin Login user</description>
+    <value>admin</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.admin.password</name>
+    <display-name>Admin password</display-name>
+    <description>Admin Login password</description>
+    <value>admin</value>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>


[05/50] [abbrv] ambari git commit: AMBARI-20738 - Cannot add repo for additional OS to an existing HDP version (rzang)

Posted by ao...@apache.org.
AMBARI-20738 - Cannot add repo for additional OS to an existing HDP version (rzang)

Change-Id: I4cb6e7d077b85bfeb2c4cf5e84d270ef5fafbe5c


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b95dcc3b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b95dcc3b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b95dcc3b

Branch: refs/heads/branch-3.0-perf
Commit: b95dcc3b5940ba9edba5dd57db66a3a30a1cc1fb
Parents: 13dd115
Author: Richard Zang <rz...@apache.org>
Authored: Mon Apr 17 15:05:15 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../stackVersions/StackVersionsEditCtrl.js      | 102 ++++---------------
 1 file changed, 20 insertions(+), 82 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b95dcc3b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
index c6ba241..a84a97c 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
@@ -24,15 +24,15 @@ angular.module('ambariAdminConsole')
     os: $t('versions.os')
   };
   $scope.editController = true;
-  $scope.osList = [];
+  $scope.osList = []; // view modal for display repo urls of various OSes
   $scope.skipValidation = false;
   $scope.useRedhatSatellite = false;
-  $scope.selectedOS = 0;
   $scope.upgradeStack = {
     stack_name: '',
     stack_version: '',
     display_name: ''
   };
+  $scope.defaulfOSRepos = {}; // a copy of initial loaded repo info for "changed" check later
 
   $scope.loadStackVersionInfo = function () {
     return Stack.getRepo($routeParams.versionId, $routeParams.stackName).then(function (response) {
@@ -54,8 +54,6 @@ angular.module('ambariAdminConsole')
             var skipServices = ['MAPREDUCE2', 'GANGLIA', 'KERBEROS'];
             return skipServices.indexOf(service.name) === -1;
           }) || [];
-      //save default values of repos to check if they were changed
-      $scope.defaulfOSRepos = {};
       response.updateObj.operating_systems.forEach(function(os) {
         $scope.defaulfOSRepos[os.OperatingSystems.os_type] = {};
         os.repositories.forEach(function(repo) {
@@ -84,8 +82,6 @@ angular.module('ambariAdminConsole')
       } else {
         $scope.deleteEnabled = $scope.isDeletable();
       }
-      // fetch all repos to display the left menu
-      $scope.fetchRepos();
     });
   };
 
@@ -133,33 +129,34 @@ angular.module('ambariAdminConsole')
     });
   };
 
-  $scope.defaulfOSRepos = {};
-
   $scope.save = function () {
     $scope.editVersionDisabled = true;
     delete $scope.updateObj.href;
     $scope.updateObj.operating_systems = [];
-    var updateRepoUrl = false;
+    // check if there is any change in repo list
+    var changed = false;
     angular.forEach($scope.osList, function (os) {
       var savedUrls = $scope.defaulfOSRepos[os.OperatingSystems.os_type];
-      os.OperatingSystems.ambari_managed_repositories = !$scope.useRedhatSatellite;
-      if (os.selected) {
-        var currentRepos = os.repositories;
-        var urlChanged = false;
-        angular.forEach(currentRepos, function (repo) {
-          if (repo.Repositories.base_url != savedUrls[repo.Repositories.repo_id]) {
-            urlChanged = true;
-          }
-        });
-        if (!savedUrls || urlChanged) {
-          updateRepoUrl = true;
+      if (os.selected) { // currently shown?
+        if (savedUrls) { // initially loaded?
+          angular.forEach(os.repositories, function (repo) {
+            if (repo.Repositories.base_url != savedUrls[repo.Repositories.repo_id]) {
+              changed = true; // modified
+            }
+          });
+        } else {
+          changed = true; // added
         }
+        os.OperatingSystems.ambari_managed_repositories = !$scope.useRedhatSatellite;
         $scope.updateObj.operating_systems.push(os);
-      } else if (savedUrls) {
-        updateRepoUrl = true;
+      } else {
+        if (savedUrls) {
+          changed = true; // removed
+        }
       }
     });
-    if (updateRepoUrl && !$scope.deleteEnabled) {
+    // show confirmation when making changes to current/installed repo
+    if (changed && !$scope.deleteEnabled) {
       ConfirmationModal.show(
           $t('versions.changeBaseURLConfirmation.title'),
           $t('versions.changeBaseURLConfirmation.message'),
@@ -342,64 +339,5 @@ angular.module('ambariAdminConsole')
     return hasErrors;
   };
 
-
-  // add all repos list
-  $scope.filter = {
-    version: '',
-    cluster: {
-      options: [],
-      current: null
-    }
-  };
-
-  $scope.pagination = {
-    totalRepos: 100,
-    maxVisiblePages: 1,
-    itemsPerPage: 100,
-    currentPage: 1
-  };
-  $scope.allRepos = [];
-  $scope.stackVersions = [];
-
-
-
-  /**
-   *  Formatted object to display all repos:
-   *
-   *  [{ 'name': 'HDP-2.3',
-   *     'repos': ['2.3.6.0-2343', '2.3.4.1', '2.3.4.0-56']
-   *   },
-   *   { 'name': 'HDP-2.2',
-   *     'repos': ['2.2.6.0', '2.2.4.5', '2.2.4.0']
-   *   }
-   *  ]
-   *
-   */
-  $scope.fetchRepos = function () {
-    return Stack.allRepos($scope.filter, $scope.pagination).then(function (repos) {
-      $scope.allRepos = repos.items.sort(function(a, b){return a.repository_version < b.repository_version});
-      var existingStackHash = {};
-      var stackVersions = [];
-      angular.forEach($scope.allRepos, function (repo) {
-        var stackVersionName = repo.stack_name + '-' + repo.stack_version;
-        var currentStackVersion = $scope.upgradeStack.stack_name + '-' + $scope.upgradeStack.stack_version;
-        repo.isActive = $scope.actualVersion == repo.repository_version;
-        if (!existingStackHash[stackVersionName]) {
-          existingStackHash[stackVersionName] = true;
-          stackVersions.push({
-            'name': stackVersionName,
-            'isOpened': stackVersionName == currentStackVersion,
-            'repos': [repo]
-          });
-        } else {
-          if (stackVersions[stackVersions.length -1].repos) {
-            stackVersions[stackVersions.length -1].repos.push(repo);
-          }
-        }
-      });
-      $scope.stackVersions = stackVersions;
-    });
-  };
-
   $scope.loadStackVersionInfo();
 }]);


[45/50] [abbrv] ambari git commit: AMBARI-20326. HDP 3.0 TP - support for HBase with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/setup_ranger_hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/setup_ranger_hbase.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/setup_ranger_hbase.py
new file mode 100644
index 0000000..d32dce1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/setup_ranger_hbase.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.logger import Logger
+
+def setup_ranger_hbase(upgrade_type=None, service_name="hbase-master"):
+  import params
+
+  if params.enable_ranger_hbase:
+
+    stack_version = None
+
+    if upgrade_type is not None:
+      stack_version = params.version
+
+    if params.retryAble:
+      Logger.info("HBase: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("HBase: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.xml_configurations_supported and params.enable_ranger_hbase and params.xa_audit_hdfs_is_enabled and service_name == 'hbase-master' :
+      params.HdfsResource("/ranger/audit",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hdfs_user,
+                         group=params.hdfs_user,
+                         mode=0755,
+                         recursive_chmod=True
+      )
+      params.HdfsResource("/ranger/audit/hbaseMaster",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user,
+                         group=params.hbase_user,
+                         mode=0700,
+                         recursive_chmod=True
+      )
+      params.HdfsResource("/ranger/audit/hbaseRegional",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user,
+                         group=params.hbase_user,
+                         mode=0700,
+                         recursive_chmod=True
+      )
+      params.HdfsResource(None, action="execute")
+
+    if params.xml_configurations_supported:
+      api_version=None
+      if params.stack_supports_ranger_kerberos:
+        api_version='v2'
+      from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+      setup_ranger_plugin('hbase-client', 'hbase', params.previous_jdbc_jar, params.downloaded_custom_connector,
+                          params.driver_curl_source, params.driver_curl_target, params.java64_home,
+                          params.repo_name, params.hbase_ranger_plugin_repo,
+                          params.ranger_env, params.ranger_plugin_properties,
+                          params.policy_user, params.policymgr_mgr_url,
+                          params.enable_ranger_hbase, conf_dict=params.hbase_conf_dir,
+                          component_user=params.hbase_user, component_group=params.user_group, cache_service_list=['hbaseMaster', 'hbaseRegional'],
+                          plugin_audit_properties=params.config['configurations']['ranger-hbase-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hbase-audit'],
+                          plugin_security_properties=params.config['configurations']['ranger-hbase-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hbase-security'],
+                          plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hbase-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hbase-policymgr-ssl'],
+                          component_list=['hbase-client', 'hbase-master', 'hbase-regionserver'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                          credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                          ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                          stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble, api_version=api_version,
+                          is_security_enabled = params.security_enabled,
+                          is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos if params.security_enabled else None,
+                          component_user_principal=params.ranger_hbase_principal if params.security_enabled else None,
+                          component_user_keytab=params.ranger_hbase_keytab if params.security_enabled else None)
+
+    else:
+      from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
+      setup_ranger_plugin('hbase-client', 'hbase', params.previous_jdbc_jar,
+                        params.downloaded_custom_connector, params.driver_curl_source,
+                        params.driver_curl_target, params.java64_home,
+                        params.repo_name, params.hbase_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_hbase, conf_dict=params.hbase_conf_dir,
+                        component_user=params.hbase_user, component_group=params.user_group, cache_service_list=['hbaseMaster', 'hbaseRegional'],
+                        plugin_audit_properties=params.config['configurations']['ranger-hbase-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hbase-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-hbase-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hbase-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hbase-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hbase-policymgr-ssl'],
+                        component_list=['hbase-client', 'hbase-master', 'hbase-regionserver'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
+  else:
+    Logger.info('Ranger HBase plugin is not enabled')

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..bc75c78
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/status_params.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons.os_check import OSCheck
+
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'HBASE_MASTER' : 'hbase-master',
+  'HBASE_REGIONSERVER' : 'hbase-regionserver',
+  'HBASE_CLIENT' : 'hbase-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HBASE_CLIENT")
+
+config = Script.get_config()
+
+if OSCheck.is_windows_family():
+  hbase_master_win_service_name = "master"
+  hbase_regionserver_win_service_name = "regionserver"
+else:
+  pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
+  hbase_user = config['configurations']['hbase-env']['hbase_user']
+
+  hbase_master_pid_file = format("{pid_dir}/hbase-{hbase_user}-master.pid")
+  regionserver_pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
+  phoenix_pid_file = format("{pid_dir}/phoenix-{hbase_user}-server.pid")
+
+  # Security related/required params
+  hostname = config['hostname']
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  tmp_dir = Script.get_tmp_dir()
+  
+  stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+  stack_version_formatted = format_stack_version(stack_version_unformatted)
+  stack_root = Script.get_stack_root()
+
+  hbase_conf_dir = "/etc/hbase/conf"
+  limits_conf_dir = "/etc/security/limits.d"
+  if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+    hbase_conf_dir = format("{stack_root}/current/{component_directory}/conf")
+    
+stack_name = default("/hostLevelParams/stack_name", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/upgrade.py
new file mode 100644
index 0000000..703fe26
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/upgrade.py
@@ -0,0 +1,106 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import re
+import socket
+
+from resource_management.core import shell
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import check_process_status
+
+
+def prestart(env, stack_component):
+  import params
+
+  if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+    conf_select.select(params.stack_name, "hbase", params.version)
+    stack_select.select(stack_component, params.version)
+
+def post_regionserver(env):
+  import params
+  env.set_params(params)
+
+  check_cmd = "echo 'status \"simple\"' | {0} shell".format(params.hbase_cmd)
+
+  exec_cmd = "{0} {1}".format(params.kinit_cmd, check_cmd)
+  is_regionserver_registered(exec_cmd, params.hbase_user, params.hostname, re.IGNORECASE)
+
+
+def is_region_server_process_running():
+  try:
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
+    check_process_status(pid_file)
+    return True
+  except ComponentIsNotRunning:
+    return False
+
+
+@retry(times=30, sleep_time=30, err_class=Fail)
+def is_regionserver_registered(cmd, user, hostname, regex_search_flags):
+  """
+  Queries HBase through the HBase shell to see which servers have successfully registered. This is
+  useful in cases, such as upgrades, where we must ensure that a RegionServer has not only started,
+  but also completed it's registration handshake before moving into upgrading the next RegionServer.
+
+  The hbase shell is used along with the "show 'simple'" command in order to determine if the
+  specified host has registered.
+  :param cmd:
+  :param user:
+  :param hostname:
+  :param regex_search_flags:
+  :return:
+  """
+  if not is_region_server_process_running():
+    Logger.info("RegionServer process is not running")
+    raise Fail("RegionServer process is not running")
+
+  # use hbase shell with "status 'simple'" command
+  code, out = shell.call(cmd, user=user)
+
+  # if we don't have ouput, then we can't check
+  if not out:
+    raise Fail("Unable to retrieve status information from the HBase shell")
+
+  # try matching the hostname with a colon (which indicates a bound port)
+  bound_hostname_to_match = hostname + ":"
+  match = re.search(bound_hostname_to_match, out, regex_search_flags)
+
+  # if there's no match, try again with the IP address
+  if not match:
+    try:
+      ip_address = socket.gethostbyname(hostname)
+      bound_ip_address_to_match = ip_address + ":"
+      match = re.search(bound_ip_address_to_match, out, regex_search_flags)
+    except socket.error:
+      # this is merely a backup, so just log that it failed
+      Logger.warning("Unable to lookup the IP address of {0}, reverse DNS lookup may not be working.".format(hostname))
+      pass
+
+  # failed with both a hostname and an IP address, so raise the Fail and let the function auto retry
+  if not match:
+    raise Fail(
+      "The RegionServer named {0} has not yet registered with the HBase Master".format(hostname))

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase-smoke.sh.j2
new file mode 100644
index 0000000..458da95
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase-smoke.sh.j2
@@ -0,0 +1,44 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+create 'ambarismoketest','family'
+put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
+scan 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase.conf.j2
new file mode 100644
index 0000000..3580db0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hbase_user}}   - nofile   {{hbase_user_nofile_limit}}
+{{hbase_user}}   - nproc    {{hbase_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_client_jaas.conf.j2
new file mode 100644
index 0000000..38f9721
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_grant_permissions.j2
new file mode 100644
index 0000000..3378983
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_grant_permissions.j2
@@ -0,0 +1,39 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_master_jaas.conf.j2
new file mode 100644
index 0000000..a93c36c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_master_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{master_keytab_path}}"
+principal="{{master_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_queryserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_queryserver_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_queryserver_jaas.conf.j2
new file mode 100644
index 0000000..c5a6c3f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_queryserver_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{queryserver_keytab_path}}"
+principal="{{queryserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_regionserver_jaas.conf.j2
new file mode 100644
index 0000000..7097481
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_regionserver_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{regionserver_keytab_path}}"
+principal="{{regionserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/input.config-hbase.json.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/input.config-hbase.json.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/input.config-hbase.json.j2
new file mode 100644
index 0000000..94fbc64
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/input.config-hbase.json.j2
@@ -0,0 +1,79 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"hbase_master",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/hbase-*-master-*.log"
+    },
+    {
+      "type":"hbase_regionserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/hbase-*-regionserver-*.log"
+    },
+    {
+      "type":"hbase_phoenix_server",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/phoenix-*-server.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hbase_master",
+            "hbase_regionserver"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hbase_phoenix_server"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/regionservers.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/regionservers.j2
new file mode 100644
index 0000000..fc6cc37
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/regionservers.j2
@@ -0,0 +1,20 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in rs_hosts %}{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..5568122
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/quicklinks/quicklinks.json
@@ -0,0 +1,97 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"http"
+    },
+
+    "links": [
+      {
+        "name": "hbase_master_ui",
+        "label": "HBase Master UI",
+        "url":"%@://%@:%@/master-status",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "hbase.master.info.port",
+          "http_default_port": "60010",
+          "https_property": "hbase.master.info.port",
+          "https_default_port": "60443",
+          "regex": "",
+          "site": "hbase-site"
+        }
+      },
+      {
+        "name": "hbase_logs",
+        "label": "HBase Logs",
+        "url":"%@://%@:%@/logs",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "hbase.master.info.port",
+          "http_default_port": "60010",
+          "https_property": "hbase.master.info.port",
+          "https_default_port": "60443",
+          "regex": "",
+          "site": "hbase-site"
+        }
+      },
+      {
+        "name": "zookeeper_info",
+        "label": "Zookeeper Info",
+        "url":"%@://%@:%@/zk.jsp",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "hbase.master.info.port",
+          "http_default_port": "60010",
+          "https_property": "hbase.master.info.port",
+          "https_default_port": "60443",
+          "regex": "",
+          "site": "hbase-site"
+        }
+      },
+      {
+        "name": "hbase_master_jmx",
+        "label": "HBase Master JMX",
+        "url":"%@://%@:%@/jmx",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "hbase.master.info.port",
+          "http_default_port": "60010",
+          "https_property": "hbase.master.info.port",
+          "https_default_port": "60443",
+          "regex": "",
+          "site": "hbase-site"
+        }
+      },
+      {
+        "name": "debug_dump",
+        "label": "Debug Dump",
+        "url":"%@://%@:%@/dump",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "hbase.master.info.port",
+          "http_default_port": "60010",
+          "https_property": "hbase.master.info.port",
+          "https_default_port": "60443",
+          "regex": "",
+          "site": "hbase-site"
+        }
+      },
+      {
+        "name": "thread_stacks",
+        "label": "Thread Stacks",
+        "url":"%@://%@:%@/stacks",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "hbase.master.info.port",
+          "http_default_port": "60010",
+          "https_property": "hbase.master.info.port",
+          "https_default_port": "60443",
+          "regex": "",
+          "site": "hbase-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
new file mode 100644
index 0000000..44d0c61
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
@@ -0,0 +1,10 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for HBase",
+    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"],
+    "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/themes/theme.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/themes/theme.json
new file mode 100644
index 0000000..6ed89a1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/themes/theme.json
@@ -0,0 +1,407 @@
+{
+  "name": "default",
+  "description": "Default theme for HBASE service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "3",
+              "tab-rows": "3",
+              "sections": [
+                {
+                  "name": "section-hbase-memory",
+                  "display-name": "Server",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-memory-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-hbase-memory-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-client",
+                  "display-name": "Client",
+                  "row-index": "0",
+                  "column-index": "2",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-client-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-disk",
+                  "display-name": "Disk",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "3",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-disk-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-hbase-disk-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-hbase-disk-col3",
+                      "row-index": "0",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-timeouts",
+                  "display-name": "Timeouts",
+                  "row-index": "2",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-timeouts-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-security",
+                  "display-name": "Security",
+                  "row-index": "2",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-security-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-phoenix",
+                  "display-name": "Phoenix SQL",
+                  "row-index": "2",
+                  "column-index": "2",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-phoenix-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hbase-env/hbase_master_heapsize",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-env/hbase_regionserver_heapsize",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-site/hfile.block.cache.size",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.memstore.flush.size",
+          "subsection-name": "subsection-hbase-memory-col2"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.memstore.block.multiplier",
+          "subsection-name": "subsection-hbase-memory-col2"
+        },
+        {
+          "config": "hbase-site/hbase.regionserver.handler.count",
+          "subsection-name": "subsection-hbase-memory-col2"
+        },
+        {
+          "config": "hbase-site/hbase.client.retries.number",
+          "subsection-name": "subsection-hbase-client-col1"
+        },
+        {
+          "config": "hbase-site/hbase.client.keyvalue.maxsize",
+          "subsection-name": "subsection-hbase-client-col1"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.max.filesize",
+          "subsection-name": "subsection-hbase-disk-col1"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.majorcompaction",
+          "subsection-name": "subsection-hbase-disk-col2"
+        },
+        {
+          "config": "hbase-site/hbase.hstore.compaction.max",
+          "subsection-name": "subsection-hbase-disk-col3"
+        },
+        {
+          "config": "hbase-site/zookeeper.session.timeout",
+          "subsection-name": "subsection-hbase-timeouts-col1"
+        },
+        {
+          "config": "hbase-site/hbase.rpc.timeout",
+          "subsection-name": "subsection-hbase-timeouts-col1"
+        },
+        {
+          "config": "hbase-site/hbase.security.authentication",
+          "subsection-name": "subsection-hbase-security-col1"
+        },
+        {
+          "config": "hbase-site/hbase.security.authorization",
+          "subsection-name": "subsection-hbase-security-col1"
+        },
+        {
+          "config": "hbase-env/phoenix_sql_enabled",
+          "subsection-name": "subsection-hbase-phoenix-col1"
+        },
+        {
+          "config": "hbase-site/phoenix.query.timeoutMs",
+          "subsection-name": "subsection-hbase-phoenix-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hbase-site/hbase.regionserver.global.memstore.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-env/hbase_master_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-env/hbase_regionserver_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hfile.block.cache.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.memstore.flush.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.memstore.block.multiplier",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hbase-site/hbase.regionserver.handler.count",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.client.retries.number",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.client.keyvalue.maxsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.max.filesize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.majorcompaction",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "days,hours"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hstore.compaction.max",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hbase-site/zookeeper.session.timeout",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "minutes,seconds"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.rpc.timeout",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "minutes,seconds"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.security.authentication",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hbase-site/hbase.security.authorization",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hbase-env/phoenix_sql_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hbase-site/phoenix.query.timeoutMs",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "minutes,seconds"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/widgets.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/widgets.json
new file mode 100644
index 0000000..ae47833
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/widgets.json
@@ -0,0 +1,510 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_hbase_dashboard",
+      "display_name": "Standard HBase Dashboard",
+      "section_name": "HBASE_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Reads and Writes",
+          "description": "Rate (per second) of read and write requests on all regions in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.Get_num_ops._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/Get_num_ops._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.ScanNext_num_ops._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/ScanNext_num_ops._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Append_num_ops._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/Append_num_ops._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Delete_num_ops._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/Delete_num_ops._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Increment_num_ops._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/Increment_num_ops._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Mutate_num_ops._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/Mutate_num_ops._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Read Requests",
+              "value": "${regionserver.Server.Get_num_ops._rate + regionserver.Server.ScanNext_num_ops._rate}"
+            },
+            {
+              "name": "Write Requests",
+              "value": "${regionserver.Server.Append_num_ops._rate + regionserver.Server.Delete_num_ops._rate + regionserver.Server.Increment_num_ops._rate + regionserver.Server.Mutate_num_ops._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Read Latency",
+          "description": "maximum of 95% read latency.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.Get_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Get_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.ScanNext_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/ScanNext_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Cluster wide maximum of 95% Get Latency",
+              "value": "${regionserver.Server.Get_95th_percentile._max}"
+            },
+            {
+              "name": "Cluster wide maximum of 95% ScanNext Latency",
+              "value": "${regionserver.Server.ScanNext_95th_percentile._max}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Write Latency",
+          "description": "maximum of 95% write latency.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.Mutate_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Mutate_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Increment_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Increment_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Append_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Append_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Delete_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Delete_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Cluster wide maximum of 95% Mutate Latency",
+              "value": "${regionserver.Server.Mutate_95th_percentile._max}"
+            },
+            {
+              "name": "Cluster wide maximum of 95% Increment Latency",
+              "value": "${regionserver.Server.Increment_95th_percentile._max}"
+            },
+            {
+              "name": "Cluster wide maximum of 95% Append Latency",
+              "value": "${regionserver.Server.Append_95th_percentile._max}"
+            },
+            {
+              "name": "Cluster wide maximum of 95% Delete Latency",
+              "value": "${regionserver.Server.Delete_95th_percentile._max}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Open Connections",
+          "description": "Count of open connections across all RegionServer. This is indicative of RegionServer load in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.RegionServer.numOpenConnections._sum",
+              "metric_path": "metrics/hbase/ipc/IPC/numOpenConnections._sum",
+              "category": "",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Open Connections",
+              "value": "${regionserver.RegionServer.numOpenConnections._sum}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Request Handlers",
+          "description": "Count of Active handlers vs count of calls waiting in the general queue.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.RegionServer.numActiveHandler._sum",
+              "metric_path": "metrics/hbase/ipc/IPC/numActiveHandler._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.RegionServer.numCallsInGeneralQueue._sum",
+              "metric_path": "metrics/hbase/ipc/IPC/numCallsInGeneralQueue._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Active Handlers",
+              "value": "${regionserver.RegionServer.numActiveHandler._sum}"
+            },
+            {
+              "name": "Calls in General Queue",
+              "value": "${regionserver.RegionServer.numCallsInGeneralQueue._sum}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Files Local",
+          "description": "Average percentage of local files to RegionServer in the cluster.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.percentFilesLocal",
+              "metric_path": "metrics/hbase/regionserver/Server/percentFilesLocal",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Files Local",
+              "value": "${regionserver.Server.percentFilesLocal}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "error_threshold":"25",
+            "warning_threshold": "75"
+          }
+        },
+        {
+          "widget_name": "Blocked Updates",
+          "description": "Number of milliseconds updates have been blocked so the memstore can be flushed.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.updatesBlockedTime._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/updatesBlockedTime._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Updates Blocked Time",
+              "value": "${regionserver.Server.updatesBlockedTime._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster CPU",
+          "description": "Percentage of CPU utilized across all RegionServer hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system._sum",
+              "metric_path": "metrics/cpu/cpu_system._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "cpu_user._sum",
+              "metric_path": "metrics/cpu/cpu_user._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "cpu_nice._sum",
+              "metric_path": "metrics/cpu/cpu_nice._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "cpu_idle._sum",
+              "metric_path": "metrics/cpu/cpu_idle._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "cpu_wio._sum",
+              "metric_path": "metrics/cpu/cpu_wio._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        },
+        {
+          "widget_name": "Cluster Network",
+          "description": "Average of Network IO utilized across all RegionServer hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "pkts_in._avg",
+              "metric_path": "metrics/network/pkts_in._avg",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "pkts_out._avg",
+              "metric_path": "metrics/network/pkts_out._avg",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Packets In",
+              "value": "${pkts_in._avg}"
+            },
+            {
+              "name": "Packets Out",
+              "value": "${pkts_out._avg}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Disk",
+          "description": "Sum of disk throughput for all RegionServer hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "read_bps._sum",
+              "metric_path": "metrics/disk/read_bps._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "write_bps._sum",
+              "metric_path": "metrics/disk/write_bps._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Read throughput",
+              "value": "${read_bps._sum/1048576}"
+            },
+            {
+              "name": "Write throughput",
+              "value": "${write_bps._sum/1048576}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Mbps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_hbase_heatmap",
+      "display_name": "HBase Heatmaps",
+      "section_name": "HBASE_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "HBase Compaction Queue Size",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
+              "metric_path": "metrics/hbase/regionserver/compactionQueueSize",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Compaction Queue Size",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength} "
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "10"
+          }
+        },
+        {
+          "widget_name": "HBase Memstore Sizes",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
+              "metric_path": "metrics/hbase/regionserver/memstoreSize",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Memstore Sizes",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize}"
+            }
+          ],
+          "properties": {
+            "display_unit": "B",
+            "max_limit": "104857600"
+          }
+        },
+        {
+          "widget_name": "HBase Read Request Count",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
+              "metric_path": "metrics/hbase/regionserver/readRequestsCount",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Read Request Count",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount}"
+            }
+          ],
+          "properties": {
+            "max_limit": "200"
+          }
+        },
+        {
+          "widget_name": "HBase Write Request Count",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
+              "metric_path": "metrics/hbase/regionserver/writeRequestsCount",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Write Request Count",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount}"
+            }
+          ],
+          "properties": {
+            "max_limit": "200"
+          }
+        },
+        {
+          "widget_name": "HBase Regions",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
+              "metric_path": "metrics/hbase/regionserver/regions",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Regions",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount}"
+            }
+          ],
+          "properties": {
+            "max_limit": "10"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/stacks/HDP/3.0/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..04adb75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HBASE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <extends>common-services/HBASE/2.0.0.3.0</extends>
+    </service>
+  </services>
+</metainfo>


[12/50] [abbrv] ambari git commit: AMBARI-20780 - Update HBase Configuration group is ignored in upgrade.xml

Posted by ao...@apache.org.
AMBARI-20780 - Update HBase Configuration group is ignored in upgrade.xml


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8d0ec084
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8d0ec084
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8d0ec084

Branch: refs/heads/branch-3.0-perf
Commit: 8d0ec0847abb12cd0be7cb457711733f27b4d168
Parents: 92ac66f
Author: Tim Thorpe <tt...@apache.org>
Authored: Wed Apr 19 11:15:09 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../stacks/HDP/2.3/upgrades/upgrade-2.4.xml     |  2 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.5.xml     |  2 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.6.xml     |  2 +-
 .../ambari/server/state/UpgradeHelperTest.java  | 33 ++++++++++++++++----
 .../HDP/2.2.0/upgrades/upgrade_test_checks.xml  | 21 ++++++++++++-
 5 files changed, 50 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8d0ec084/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
index 3757121..fd75e49 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
@@ -130,7 +130,7 @@
       </service>
     </group>
 
-    <group name="HBASE" title="Update HBase Configuration">
+    <group xsi:type="cluster" name="HBASE" title="Update HBase Configuration">
       <skippable>true</skippable>
 
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">

http://git-wip-us.apache.org/repos/asf/ambari/blob/8d0ec084/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
index f7fd175..5318a51 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
@@ -158,7 +158,7 @@
       </service>
     </group>
 
-    <group name="HBASE" title="Update HBase Configuration">
+    <group xsi:type="cluster" name="HBASE" title="Update HBase Configuration">
       <skippable>true</skippable>
 
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">

http://git-wip-us.apache.org/repos/asf/ambari/blob/8d0ec084/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index 78fe831..d2e6288 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -160,7 +160,7 @@
       </service>
     </group>
 
-    <group name="HBASE" title="Update HBase Configuration">
+    <group xsi:type="cluster" name="HBASE" title="Update HBase Configuration">
       <skippable>true</skippable>
 
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">

http://git-wip-us.apache.org/repos/asf/ambari/blob/8d0ec084/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
index 0dd7f58..d80b03c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
@@ -1039,12 +1039,9 @@ public class UpgradeHelperTest {
     UpgradePack upgrade = upgrades.get("upgrade_test_checks");
     assertNotNull(upgrade);
 
-    Cluster c = makeCluster();
     // HBASE and PIG have service checks, but not TEZ.
     Set<String> additionalServices = new HashSet<String>() {{ add("HBASE"); add("PIG"); add("TEZ"); add("AMBARI_METRICS"); }};
-    for(String service : additionalServices) {
-      c.addService(service);
-    }
+    Cluster c = makeCluster(true, additionalServices);
 
     int numServiceChecksExpected = 0;
     Collection<Service> services = c.getServices().values();
@@ -1071,9 +1068,9 @@ public class UpgradeHelperTest {
 
     List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
 
-    assertEquals(7, groups.size());
+    assertEquals(8, groups.size());
 
-    UpgradeGroupHolder holder = groups.get(3);
+    UpgradeGroupHolder holder = groups.get(4);
     assertEquals(holder.name, "SERVICE_CHECK_1");
     assertEquals(7, holder.items.size());
     int numServiceChecksActual = 0;
@@ -1100,6 +1097,13 @@ public class UpgradeHelperTest {
     assertEquals(
         "This is a manual task with a placeholder of placeholder-rendered-properly",
         manualTask.messages.get(0));
+
+    UpgradeGroupHolder clusterGroup = groups.get(3);
+    assertEquals(clusterGroup.name, "HBASE");
+    assertEquals(clusterGroup.title, "Update HBase Configuration");
+    assertEquals(1, clusterGroup.items.size());
+    StageWrapper stage = clusterGroup.items.get(0);
+    assertEquals(stage.getText(), "Update HBase Configuration");
   }
 
   @Test
@@ -1225,6 +1229,14 @@ public class UpgradeHelperTest {
    * @throws AmbariException
    */
   private Cluster makeCluster(boolean clean) throws AmbariException, AuthorizationException {
+    return makeCluster(clean, new HashSet<String>());
+  }
+
+  /**
+   * Create an HA cluster
+   * @throws AmbariException
+   */
+  private Cluster makeCluster(boolean clean, Set<String> additionalServices) throws AmbariException, AuthorizationException {
     Clusters clusters = injector.getInstance(Clusters.class);
     ServiceFactory serviceFactory = injector.getInstance(ServiceFactory.class);
 
@@ -1369,6 +1381,15 @@ public class UpgradeHelperTest {
 
     expect(m_masterHostResolver.getCluster()).andReturn(c).anyTimes();
 
+    for(String service : additionalServices) {
+      c.addService(service);
+      if (service.equals("HBASE")) {
+        type = new HostsType();
+        type.hosts.addAll(Arrays.asList("h1", "h2"));
+        expect(m_masterHostResolver.getMasterAndHosts("HBASE", "HBASE_MASTER")).andReturn(type).anyTimes();
+      }
+    }
+
     replay(m_masterHostResolver);
 
     return c;

http://git-wip-us.apache.org/repos/asf/ambari/blob/8d0ec084/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
index 4d4d972..b7027c5 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
@@ -78,7 +78,16 @@
       </service>
       <service-check>false</service-check>
     </group>
-    
+
+    <group xsi:type="cluster" name="HBASE" title="Update HBase Configuration">
+      <skippable>true</skippable>
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.HBaseEnvMaxDirectMemorySizeAction">
+          <summary>Update HBase Env Configuration</summary>
+        </task>
+      </execute-stage>
+    </group>
+
     <group name="SERVICE_CHECK_1" title="Post-Master Service Checks" xsi:type="service-check">
       <priority>
         <service>HDFS</service>
@@ -108,6 +117,16 @@
         <message>Please run additional tests</message>
       </batch>
     </group>
+
+    <!-- This group will be ignored because it is an invalid syntax. It is not a cluster type but contains an execute-stage -->
+    <group name="HBASE" title="Update HBase Configuration">
+      <skippable>true</skippable>
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.HBaseEnvMaxDirectMemorySizeAction">
+          <summary>Update HBase Env Configuration</summary>
+        </task>
+      </execute-stage>
+    </group>
     
     <group name="SERVICE_CHECK_2" title="Post-Slave Service Checks" xsi:type="service-check">
       <priority>


[22/50] [abbrv] ambari git commit: AMBARI-20813. Convert ambari-web build process to use yarn package manager to fix dependencies and to make the process faster. (yusaku)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/8ead1899/ambari-web/yarn.lock
----------------------------------------------------------------------
diff --git a/ambari-web/yarn.lock b/ambari-web/yarn.lock
new file mode 100644
index 0000000..d562817
--- /dev/null
+++ b/ambari-web/yarn.lock
@@ -0,0 +1,4170 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abab@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/abab/-/abab-1.0.3.tgz#b81de5f7274ec4e756d797cd834f303642724e5d"
+
+abbrev@1, abbrev@1.0.x:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135"
+
+accepts@1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca"
+  dependencies:
+    mime-types "~2.1.11"
+    negotiator "0.6.1"
+
+acorn-globals@^3.1.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-3.1.0.tgz#fd8270f71fbb4996b004fa880ee5d46573a731bf"
+  dependencies:
+    acorn "^4.0.4"
+
+acorn@^4.0.3, acorn@^4.0.4:
+  version "4.0.11"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.11.tgz#edcda3bd937e7556410d42ed5860f67399c794c0"
+
+adm-zip@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/adm-zip/-/adm-zip-0.2.1.tgz#e801cedeb5bd9a4e98d699c5c0f4239e2731dcbf"
+
+after@0.8.2:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/after/-/after-0.8.2.tgz#fedb394f9f0e02aa9768e702bda23b505fae7e1f"
+
+ajv@^4.9.1:
+  version "4.11.7"
+  resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.7.tgz#8655a5d86d0824985cc471a1d913fb6729a0ec48"
+  dependencies:
+    co "^4.6.0"
+    json-stable-stringify "^1.0.1"
+
+align-text@^0.1.1, align-text@^0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117"
+  dependencies:
+    kind-of "^3.0.2"
+    longest "^1.0.1"
+    repeat-string "^1.5.2"
+
+alter@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/alter/-/alter-0.2.0.tgz#c7588808617572034aae62480af26b1d4d1cb3cd"
+  dependencies:
+    stable "~0.1.3"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-color@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-color/-/ansi-color-0.2.1.tgz#3e75c037475217544ed763a8db5709fa9ae5bf9a"
+
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
+ansi-styles@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
+
+anymatch@^1.0.0, anymatch@^1.3.0, anymatch@~1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507"
+  dependencies:
+    arrify "^1.0.0"
+    micromatch "^2.1.5"
+
+anymatch@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.0.0.tgz#0aed64d30bc25973afdb3155eb87ae6881e21b1c"
+  dependencies:
+    minimatch "~1.0.0"
+
+anysort@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/anysort/-/anysort-1.0.1.tgz#341bd5d5ba1485f64e55ae865f1d45994b507fc4"
+  dependencies:
+    anymatch "~1.3.0"
+
+aproba@^1.0.3:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.1.1.tgz#95d3600f07710aa0e9298c726ad5ecf2eacbabab"
+
+are-we-there-yet@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.2.tgz#80e470e95a084794fe1899262c5667c6e88de1b3"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.0 || ^1.1.13"
+
+argparse@^1.0.7:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86"
+  dependencies:
+    sprintf-js "~1.0.2"
+
+arr-diff@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf"
+  dependencies:
+    arr-flatten "^1.0.1"
+
+arr-flatten@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1"
+
+array-equal@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
+
+array-find-index@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/array-find-index/-/array-find-index-1.0.2.tgz#df010aa1287e164bbda6f9723b0a96a1ec4187a1"
+
+array-slice@^0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/array-slice/-/array-slice-0.2.3.tgz#dd3cfb80ed7973a75117cdac69b0b99ec86186f5"
+
+array-unique@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53"
+
+arraybuffer.slice@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca"
+
+arrify@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
+
+asn1@0.1.11:
+  version "0.1.11"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.1.11.tgz#559be18376d08a4ec4dbe80877d27818639b2df7"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.1.5.tgz#ee74009413002d84cec7219c6ac811812e723160"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+assertion-error@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/assertion-error/-/assertion-error-1.0.2.tgz#13ca515d86206da0bac66e834dd397d87581094c"
+
+assetsmanager-brunch@~1.8.1:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/assetsmanager-brunch/-/assetsmanager-brunch-1.8.1.tgz#e2fcff9c1b5fb5f5feb3fcbd0d762a08ad91072d"
+  dependencies:
+    fs-extra "~0.6.3"
+    glob "~3.2.3"
+
+ast-traverse@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ast-traverse/-/ast-traverse-0.1.1.tgz#69cf2b8386f19dcda1bb1e05d68fe359d8897de6"
+
+ast-types@0.8.12:
+  version "0.8.12"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.8.12.tgz#a0d90e4351bb887716c83fd637ebf818af4adfcc"
+
+ast-types@0.8.15:
+  version "0.8.15"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.8.15.tgz#8eef0827f04dff0ec8857ba925abe3fea6194e52"
+
+ast-types@0.9.6:
+  version "0.9.6"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.6.tgz#102c9e9e9005d3e7e3829bf0c4fa24ee862ee9b9"
+
+async-each@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.1.tgz#19d386a1d9edc6e7c1c85d388aedbcc56d33602d"
+
+async-each@~0.1.2, async-each@~0.1.3, async-each@~0.1.5:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/async-each/-/async-each-0.1.6.tgz#b67e99edcddf96541e44af56290cd7d5c6e70439"
+
+async-waterfall@~0.1.2:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/async-waterfall/-/async-waterfall-0.1.5.tgz#398bd48b0eac5d40ffbe400fe9e37a53ba966dae"
+
+async@1.x, async@^1.4.0:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a"
+
+async@^2.0.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-2.3.0.tgz#1013d1051047dd320fe24e494d5c66ecaf6147d9"
+  dependencies:
+    lodash "^4.14.0"
+
+async@~0.9.0:
+  version "0.9.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.9.2.tgz#aea74d5e61c1f899613bf64bda66d4c78f2fd17d"
+
+asynckit@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
+
+aws-sign2@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.5.0.tgz#c57103f7a17fc037f02d7c2e64b602ea223f7d63"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws4@^1.2.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
+
+babel-brunch@^5.1.2:
+  version "5.1.2"
+  resolved "https://registry.yarnpkg.com/babel-brunch/-/babel-brunch-5.1.2.tgz#4b476b9ea2b64dbfd8864ac3665932f634469bdc"
+  dependencies:
+    anymatch "^1.0.0"
+    babel-core "^5.0.0"
+
+babel-code-frame@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.22.0.tgz#027620bee567a88c32561574e7fd0801d33118e4"
+  dependencies:
+    chalk "^1.1.0"
+    esutils "^2.0.2"
+    js-tokens "^3.0.0"
+
+babel-core@^5.0.0:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-5.8.38.tgz#1fcaee79d7e61b750b00b8e54f6dfc9d0af86558"
+  dependencies:
+    babel-plugin-constant-folding "^1.0.1"
+    babel-plugin-dead-code-elimination "^1.0.2"
+    babel-plugin-eval "^1.0.1"
+    babel-plugin-inline-environment-variables "^1.0.1"
+    babel-plugin-jscript "^1.0.4"
+    babel-plugin-member-expression-literals "^1.0.1"
+    babel-plugin-property-literals "^1.0.1"
+    babel-plugin-proto-to-assign "^1.0.3"
+    babel-plugin-react-constant-elements "^1.0.3"
+    babel-plugin-react-display-name "^1.0.3"
+    babel-plugin-remove-console "^1.0.1"
+    babel-plugin-remove-debugger "^1.0.1"
+    babel-plugin-runtime "^1.0.7"
+    babel-plugin-undeclared-variables-check "^1.0.2"
+    babel-plugin-undefined-to-void "^1.1.6"
+    babylon "^5.8.38"
+    bluebird "^2.9.33"
+    chalk "^1.0.0"
+    convert-source-map "^1.1.0"
+    core-js "^1.0.0"
+    debug "^2.1.1"
+    detect-indent "^3.0.0"
+    esutils "^2.0.0"
+    fs-readdir-recursive "^0.1.0"
+    globals "^6.4.0"
+    home-or-tmp "^1.0.0"
+    is-integer "^1.0.4"
+    js-tokens "1.0.1"
+    json5 "^0.4.0"
+    lodash "^3.10.0"
+    minimatch "^2.0.3"
+    output-file-sync "^1.1.0"
+    path-exists "^1.0.0"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    regenerator "0.8.40"
+    regexpu "^1.3.0"
+    repeating "^1.1.2"
+    resolve "^1.1.6"
+    shebang-regex "^1.0.0"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+    source-map-support "^0.2.10"
+    to-fast-properties "^1.0.0"
+    trim-right "^1.0.0"
+    try-resolve "^1.0.0"
+
+babel-core@^6.0.0, babel-core@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.24.1.tgz#8c428564dce1e1f41fb337ec34f4c3b022b5ad83"
+  dependencies:
+    babel-code-frame "^6.22.0"
+    babel-generator "^6.24.1"
+    babel-helpers "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-register "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    babylon "^6.11.0"
+    convert-source-map "^1.1.0"
+    debug "^2.1.1"
+    json5 "^0.5.0"
+    lodash "^4.2.0"
+    minimatch "^3.0.2"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+
+babel-generator@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.24.1.tgz#e715f486c58ded25649d888944d52aa07c5d9497"
+  dependencies:
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    detect-indent "^4.0.0"
+    jsesc "^1.3.0"
+    lodash "^4.2.0"
+    source-map "^0.5.0"
+    trim-right "^1.0.1"
+
+babel-helper-call-delegate@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz#ece6aacddc76e41c3461f88bfc575bd0daa2df8d"
+  dependencies:
+    babel-helper-hoist-variables "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-define-map@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-define-map/-/babel-helper-define-map-6.24.1.tgz#7a9747f258d8947d32d515f6aa1c7bd02204a080"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-helper-function-name@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz#d3475b8c03ed98242a25b48351ab18399d3580a9"
+  dependencies:
+    babel-helper-get-function-arity "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-get-function-arity@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz#8f7782aa93407c41d3aa50908f89b031b1b6853d"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-hoist-variables@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz#1ecb27689c9d25513eadbc9914a73f5408be7a76"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-optimise-call-expression@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz#f7a13427ba9f73f8f4fa993c54a97882d1244257"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-regex@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-regex/-/babel-helper-regex-6.24.1.tgz#d36e22fab1008d79d88648e32116868128456ce8"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-helper-replace-supers@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz#bf6dbfe43938d17369a213ca8a8bf74b6a90ab1a"
+  dependencies:
+    babel-helper-optimise-call-expression "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helpers@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helpers/-/babel-helpers-6.24.1.tgz#3471de9caec388e5c850e597e58a26ddf37602b2"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-messages@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-messages/-/babel-messages-6.23.0.tgz#f3cdf4703858035b2a2951c6ec5edf6c62f2630e"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-check-es2015-constants@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz#35157b101426fd2ffd3da3f75c7d1e91835bbf8a"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-constant-folding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-constant-folding/-/babel-plugin-constant-folding-1.0.1.tgz#8361d364c98e449c3692bdba51eff0844290aa8e"
+
+babel-plugin-dead-code-elimination@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-dead-code-elimination/-/babel-plugin-dead-code-elimination-1.0.2.tgz#5f7c451274dcd7cccdbfbb3e0b85dd28121f0f65"
+
+babel-plugin-eval@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-eval/-/babel-plugin-eval-1.0.1.tgz#a2faed25ce6be69ade4bfec263f70169195950da"
+
+babel-plugin-inline-environment-variables@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-inline-environment-variables/-/babel-plugin-inline-environment-variables-1.0.1.tgz#1f58ce91207ad6a826a8bf645fafe68ff5fe3ffe"
+
+babel-plugin-jscript@^1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-jscript/-/babel-plugin-jscript-1.0.4.tgz#8f342c38276e87a47d5fa0a8bd3d5eb6ccad8fcc"
+
+babel-plugin-member-expression-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-member-expression-literals/-/babel-plugin-member-expression-literals-1.0.1.tgz#cc5edb0faa8dc927170e74d6d1c02440021624d3"
+
+babel-plugin-property-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-property-literals/-/babel-plugin-property-literals-1.0.1.tgz#0252301900192980b1c118efea48ce93aab83336"
+
+babel-plugin-proto-to-assign@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-proto-to-assign/-/babel-plugin-proto-to-assign-1.0.4.tgz#c49e7afd02f577bc4da05ea2df002250cf7cd123"
+  dependencies:
+    lodash "^3.9.3"
+
+babel-plugin-react-constant-elements@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-constant-elements/-/babel-plugin-react-constant-elements-1.0.3.tgz#946736e8378429cbc349dcff62f51c143b34e35a"
+
+babel-plugin-react-display-name@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-display-name/-/babel-plugin-react-display-name-1.0.3.tgz#754fe38926e8424a4e7b15ab6ea6139dee0514fc"
+
+babel-plugin-remove-console@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-console/-/babel-plugin-remove-console-1.0.1.tgz#d8f24556c3a05005d42aaaafd27787f53ff013a7"
+
+babel-plugin-remove-debugger@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-debugger/-/babel-plugin-remove-debugger-1.0.1.tgz#fd2ea3cd61a428ad1f3b9c89882ff4293e8c14c7"
+
+babel-plugin-runtime@^1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-runtime/-/babel-plugin-runtime-1.0.7.tgz#bf7c7d966dd56ecd5c17fa1cb253c9acb7e54aaf"
+
+babel-plugin-transform-es2015-arrow-functions@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz#452692cb711d5f79dc7f85e440ce41b9f244d221"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-block-scoped-functions@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz#bbc51b49f964d70cb8d8e0b94e820246ce3a6141"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-block-scoping@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.24.1.tgz#76c295dc3a4741b1665adfd3167215dcff32a576"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-plugin-transform-es2015-classes@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz#5a4c58a50c9c9461e564b4b2a3bfabc97a2584db"
+  dependencies:
+    babel-helper-define-map "^6.24.1"
+    babel-helper-function-name "^6.24.1"
+    babel-helper-optimise-call-expression "^6.24.1"
+    babel-helper-replace-supers "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-computed-properties@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz#6fe2a8d16895d5634f4cd999b6d3480a308159b3"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-destructuring@^6.22.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz#997bb1f1ab967f682d2b0876fe358d60e765c56d"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-duplicate-keys@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz#73eb3d310ca969e3ef9ec91c53741a6f1576423e"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-for-of@^6.22.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz#f47c95b2b613df1d3ecc2fdb7573623c75248691"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-function-name@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz#834c89853bc36b1af0f3a4c5dbaa94fd8eacaa8b"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-literals@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz#4f54a02d6cd66cf915280019a31d31925377ca2e"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-modules-amd@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz#3b3e54017239842d6d19c3011c4bd2f00a00d154"
+  dependencies:
+    babel-plugin-transform-es2015-modules-commonjs "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-modules-commonjs@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.24.1.tgz#d3e310b40ef664a36622200097c6d440298f2bfe"
+  dependencies:
+    babel-plugin-transform-strict-mode "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-modules-systemjs@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz#ff89a142b9119a906195f5f106ecf305d9407d23"
+  dependencies:
+    babel-helper-hoist-variables "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-modules-umd@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz#ac997e6285cd18ed6176adb607d602344ad38468"
+  dependencies:
+    babel-plugin-transform-es2015-modules-amd "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-object-super@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz#24cef69ae21cb83a7f8603dad021f572eb278f8d"
+  dependencies:
+    babel-helper-replace-supers "^6.24.1"
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-parameters@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz#57ac351ab49caf14a97cd13b09f66fdf0a625f2b"
+  dependencies:
+    babel-helper-call-delegate "^6.24.1"
+    babel-helper-get-function-arity "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-shorthand-properties@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz#24f875d6721c87661bbd99a4622e51f14de38aa0"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-spread@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz#d6d68a99f89aedc4536c81a542e8dd9f1746f8d1"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-sticky-regex@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz#00c1cdb1aca71112cdf0cf6126c2ed6b457ccdbc"
+  dependencies:
+    babel-helper-regex "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-template-literals@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz#a84b3450f7e9f8f1f6839d6d687da84bb1236d8d"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-typeof-symbol@^6.22.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz#dec09f1cddff94b52ac73d505c84df59dcceb372"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-unicode-regex@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz#d38b12f42ea7323f729387f18a7c5ae1faeb35e9"
+  dependencies:
+    babel-helper-regex "^6.24.1"
+    babel-runtime "^6.22.0"
+    regexpu-core "^2.0.0"
+
+babel-plugin-transform-regenerator@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.24.1.tgz#b8da305ad43c3c99b4848e4fe4037b770d23c418"
+  dependencies:
+    regenerator-transform "0.9.11"
+
+babel-plugin-transform-strict-mode@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz#d5faf7aa578a65bbe591cf5edae04a0c67020758"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-undeclared-variables-check@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undeclared-variables-check/-/babel-plugin-undeclared-variables-check-1.0.2.tgz#5cf1aa539d813ff64e99641290af620965f65dee"
+  dependencies:
+    leven "^1.0.2"
+
+babel-plugin-undefined-to-void@^1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undefined-to-void/-/babel-plugin-undefined-to-void-1.1.6.tgz#7f578ef8b78dfae6003385d8417a61eda06e2f81"
+
+babel-preset-es2015@^6.18.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-preset-es2015/-/babel-preset-es2015-6.24.1.tgz#d44050d6bc2c9feea702aaf38d727a0210538939"
+  dependencies:
+    babel-plugin-check-es2015-constants "^6.22.0"
+    babel-plugin-transform-es2015-arrow-functions "^6.22.0"
+    babel-plugin-transform-es2015-block-scoped-functions "^6.22.0"
+    babel-plugin-transform-es2015-block-scoping "^6.24.1"
+    babel-plugin-transform-es2015-classes "^6.24.1"
+    babel-plugin-transform-es2015-computed-properties "^6.24.1"
+    babel-plugin-transform-es2015-destructuring "^6.22.0"
+    babel-plugin-transform-es2015-duplicate-keys "^6.24.1"
+    babel-plugin-transform-es2015-for-of "^6.22.0"
+    babel-plugin-transform-es2015-function-name "^6.24.1"
+    babel-plugin-transform-es2015-literals "^6.22.0"
+    babel-plugin-transform-es2015-modules-amd "^6.24.1"
+    babel-plugin-transform-es2015-modules-commonjs "^6.24.1"
+    babel-plugin-transform-es2015-modules-systemjs "^6.24.1"
+    babel-plugin-transform-es2015-modules-umd "^6.24.1"
+    babel-plugin-transform-es2015-object-super "^6.24.1"
+    babel-plugin-transform-es2015-parameters "^6.24.1"
+    babel-plugin-transform-es2015-shorthand-properties "^6.24.1"
+    babel-plugin-transform-es2015-spread "^6.22.0"
+    babel-plugin-transform-es2015-sticky-regex "^6.24.1"
+    babel-plugin-transform-es2015-template-literals "^6.22.0"
+    babel-plugin-transform-es2015-typeof-symbol "^6.22.0"
+    babel-plugin-transform-es2015-unicode-regex "^6.24.1"
+    babel-plugin-transform-regenerator "^6.24.1"
+
+babel-register@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.24.1.tgz#7e10e13a2f71065bdfad5a1787ba45bca6ded75f"
+  dependencies:
+    babel-core "^6.24.1"
+    babel-runtime "^6.22.0"
+    core-js "^2.4.0"
+    home-or-tmp "^2.0.0"
+    lodash "^4.2.0"
+    mkdirp "^0.5.1"
+    source-map-support "^0.4.2"
+
+babel-runtime@^6.18.0, babel-runtime@^6.22.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.23.0.tgz#0a9489f144de70efb3ce4300accdb329e2fc543b"
+  dependencies:
+    core-js "^2.4.0"
+    regenerator-runtime "^0.10.0"
+
+babel-template@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.24.1.tgz#04ae514f1f93b3a2537f2a0f60a5a45fb8308333"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    babylon "^6.11.0"
+    lodash "^4.2.0"
+
+babel-traverse@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.24.1.tgz#ab36673fd356f9a0948659e7b338d5feadb31695"
+  dependencies:
+    babel-code-frame "^6.22.0"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    babylon "^6.15.0"
+    debug "^2.2.0"
+    globals "^9.0.0"
+    invariant "^2.2.0"
+    lodash "^4.2.0"
+
+babel-types@^6.19.0, babel-types@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.24.1.tgz#a136879dc15b3606bda0d90c1fc74304c2ff0975"
+  dependencies:
+    babel-runtime "^6.22.0"
+    esutils "^2.0.2"
+    lodash "^4.2.0"
+    to-fast-properties "^1.0.1"
+
+babel@^6.5.2:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel/-/babel-6.23.0.tgz#d0d1e7d803e974765beea3232d4e153c0efb90f4"
+
+babylon@^5.8.38:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-5.8.38.tgz#ec9b120b11bf6ccd4173a18bf217e60b79859ffd"
+
+babylon@^6.11.0, babylon@^6.15.0:
+  version "6.17.0"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.17.0.tgz#37da948878488b9c4e3c4038893fa3314b3fc932"
+
+backo2@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
+
+balanced-match@^0.4.1:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838"
+
+base64-arraybuffer@0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz#73926771923b5a19747ad666aa5cd4bf9c6e9ce8"
+
+base64id@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/base64id/-/base64id-1.0.0.tgz#47688cb99bb6804f0e06d3e763b1c32e57d8e6b6"
+
+batch@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/batch/-/batch-0.2.1.tgz#4463997bb4d5fd1c7a011548813e52aa189c2c79"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+better-assert@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522"
+  dependencies:
+    callsite "1.0.0"
+
+binary-extensions@^1.0.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.8.0.tgz#48ec8d16df4377eae5fa5884682480af4d95c774"
+
+bl@~1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.0.3.tgz#fc5421a28fd4226036c3b3891a66a25bc64d226e"
+  dependencies:
+    readable-stream "~2.0.5"
+
+blob@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921"
+
+block-stream@*:
+  version "0.0.9"
+  resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.9.tgz#13ebfe778a03205cfe03751481ebb4b3300c126a"
+  dependencies:
+    inherits "~2.0.0"
+
+bluebird@^2.9.33:
+  version "2.11.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1"
+
+bluebird@^3.3.0:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.0.tgz#791420d7f551eea2897453a8a77653f96606d67c"
+
+body-parser@^1.16.1:
+  version "1.17.1"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.17.1.tgz#75b3bc98ddd6e7e0d8ffe750dfaca5c66993fa47"
+  dependencies:
+    bytes "2.4.0"
+    content-type "~1.0.2"
+    debug "2.6.1"
+    depd "~1.1.0"
+    http-errors "~1.6.1"
+    iconv-lite "0.4.15"
+    on-finished "~2.3.0"
+    qs "6.4.0"
+    raw-body "~2.2.0"
+    type-is "~1.6.14"
+
+boom@0.4.x:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-0.4.2.tgz#7a636e9ded4efcefb19cef4947a3c67dfaee911b"
+  dependencies:
+    hoek "0.9.x"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+brace-expansion@^1.0.0:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.7.tgz#3effc3c50e000531fb720eaff80f0ae8ef23cf59"
+  dependencies:
+    balanced-match "^0.4.1"
+    concat-map "0.0.1"
+
+braces@^0.1.2:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-0.1.5.tgz#c085711085291d8b75fdd74eab0f8597280711e6"
+  dependencies:
+    expand-range "^0.1.0"
+
+braces@^1.8.2:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7"
+  dependencies:
+    expand-range "^1.8.1"
+    preserve "^0.2.0"
+    repeat-element "^1.1.2"
+
+breakable@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/breakable/-/breakable-1.0.0.tgz#784a797915a38ead27bad456b5572cb4bbaa78c1"
+
+brunch@1.7.20:
+  version "1.7.20"
+  resolved "https://registry.yarnpkg.com/brunch/-/brunch-1.7.20.tgz#0d755255b2bc695b14d45742a176c2b1fc9d88c6"
+  dependencies:
+    anymatch "~1.0.0"
+    anysort "~1.0.0"
+    async-each "~0.1.2"
+    async-waterfall "~0.1.2"
+    chokidar "~0.12.0"
+    coffee-script "~1.8.0"
+    commander "~2.0.0"
+    commonjs-require-definition "~0.1.0"
+    debug "~0.7.2"
+    init-skeleton "~0.2.0"
+    loggy "~0.2.0"
+    mkdirp "~0.3.5"
+    ncp "~0.4.2"
+    pushserve "~0.1.6"
+    read-components "~0.6.0"
+    source-map "~0.1.35"
+
+buffer-crc32@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/buffer-crc32/-/buffer-crc32-0.2.1.tgz#be3e5382fc02b6d6324956ac1af98aa98b08534c"
+
+buffer-shims@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51"
+
+builtin-modules@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f"
+
+buster-core@=0.6.4:
+  version "0.6.4"
+  resolved "https://registry.yarnpkg.com/buster-core/-/buster-core-0.6.4.tgz#27bf6bad674244ea720f311d900a0ca1cb786050"
+
+buster-format@~0.5:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/buster-format/-/buster-format-0.5.6.tgz#2b86c322ecf5e1b0ae6e6e7905ebfcf387d2ab95"
+  dependencies:
+    buster-core "=0.6.4"
+
+bytes@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-0.2.0.tgz#aad33ec14e3dc2ca74e8e7d451f9ba053ad4f7a0"
+
+bytes@2.4.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.4.0.tgz#7d97196f9d5baf7f6935e25985549edd2a6c2339"
+
+callsite@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20"
+
+camelcase-keys@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/camelcase-keys/-/camelcase-keys-2.1.0.tgz#308beeaffdf28119051efa1d932213c91b8f92e7"
+  dependencies:
+    camelcase "^2.0.0"
+    map-obj "^1.0.0"
+
+camelcase@^1.0.2, camelcase@^1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+camelcase@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-2.1.1.tgz#7c1d16d679a1bbe59ca02cacecfb011e201f5a1f"
+
+caseless@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7"
+
+caseless@~0.12.0:
+  version "0.12.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc"
+
+center-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad"
+  dependencies:
+    align-text "^0.1.3"
+    lazy-cache "^1.0.3"
+
+chai@~3.5.0:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/chai/-/chai-3.5.0.tgz#4d02637b067fe958bdbfdd3a40ec56fef7373247"
+  dependencies:
+    assertion-error "^1.0.1"
+    deep-eql "^0.1.3"
+    type-detect "^1.0.0"
+
+chalk@^1.0.0, chalk@^1.1.0, chalk@^1.1.1:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
+  dependencies:
+    ansi-styles "^2.2.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chokidar@^1.4.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-1.6.1.tgz#2f4447ab5e96e50fb3d789fd90d4c72e0e4c70c2"
+  dependencies:
+    anymatch "^1.3.0"
+    async-each "^1.0.0"
+    glob-parent "^2.0.0"
+    inherits "^2.0.1"
+    is-binary-path "^1.0.0"
+    is-glob "^2.0.0"
+    path-is-absolute "^1.0.0"
+    readdirp "^2.0.0"
+  optionalDependencies:
+    fsevents "^1.0.0"
+
+chokidar@~0.12.0:
+  version "0.12.6"
+  resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-0.12.6.tgz#be204f5b9634e009311256e5d6e8e0e508284d2f"
+  dependencies:
+    async-each "~0.1.5"
+    readdirp "~1.3.0"
+  optionalDependencies:
+    fsevents "~0.3.1"
+
+"clean-css-brunch@>= 1.0 < 1.5":
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/clean-css-brunch/-/clean-css-brunch-1.4.0.tgz#7df42f1c2bc7de4eb9db1931c96061ced54e0bd9"
+  dependencies:
+    clean-css "0.8.x"
+    coffee-script "1.3.3"
+
+clean-css@0.8.x:
+  version "0.8.3"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-0.8.3.tgz#13bca09d4aaa8b0e73e4805530e8ba3dc76c22d0"
+  dependencies:
+    optimist "0.3.x"
+
+cliui@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1"
+  dependencies:
+    center-align "^0.1.1"
+    right-align "^0.1.1"
+    wordwrap "0.0.2"
+
+co@^4.6.0:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184"
+
+code-point-at@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
+
+coffee-script@1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/coffee-script/-/coffee-script-1.3.3.tgz#150d6b4cb522894369efed6a2101c20bc7f4a4f4"
+
+coffee-script@1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/coffee-script/-/coffee-script-1.4.0.tgz#5e3bc8aac26c01a8e27bf107722c5655f5ad7d36"
+
+coffee-script@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/coffee-script/-/coffee-script-1.8.0.tgz#9c9f1d2b4a52a000ded15b659791703648263c1d"
+  dependencies:
+    mkdirp "~0.3.5"
+
+colors@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.1.2.tgz#168a4701756b6a7f51a12ce0c97bfa28c084ed63"
+
+combine-lists@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/combine-lists/-/combine-lists-1.0.1.tgz#458c07e09e0d900fc28b70a3fec2dacd1d2cb7f6"
+  dependencies:
+    lodash "^4.5.0"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+combined-stream@~0.0.4:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-0.0.7.tgz#0137e657baa5a7541c57ac37ac5fc07d73b4dc1f"
+  dependencies:
+    delayed-stream "0.0.5"
+
+commander@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-0.6.1.tgz#fa68a14f6a945d54dbbe50d8cdb3320e9e3b1a06"
+
+commander@1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-1.2.0.tgz#fd5713bfa153c7d6cc599378a5ab4c45c535029e"
+  dependencies:
+    keypress "0.1.x"
+
+commander@2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.3.0.tgz#fd430e889832ec353b9acd1de217c11cb3eef873"
+
+commander@^2.5.0, commander@^2.8.1, commander@^2.9.0:
+  version "2.9.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.0.0.tgz#d1b86f901f8b64bd941bdeadaf924530393be928"
+
+commoner@~0.10.3:
+  version "0.10.8"
+  resolved "https://registry.yarnpkg.com/commoner/-/commoner-0.10.8.tgz#34fc3672cd24393e8bb47e70caa0293811f4f2c5"
+  dependencies:
+    commander "^2.5.0"
+    detective "^4.3.1"
+    glob "^5.0.15"
+    graceful-fs "^4.1.2"
+    iconv-lite "^0.4.5"
+    mkdirp "^0.5.0"
+    private "^0.1.6"
+    q "^1.1.2"
+    recast "^0.11.17"
+
+commonjs-require-definition@~0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/commonjs-require-definition/-/commonjs-require-definition-0.1.2.tgz#93720e42b3383a00e4097f6a4a979f10f376dc2d"
+
+component-bind@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1"
+
+component-builder@~0.10.0:
+  version "0.10.1"
+  resolved "https://registry.yarnpkg.com/component-builder/-/component-builder-0.10.1.tgz#d29c7ab70241a678e3f8dbea4d1759c68b6f8f9b"
+  dependencies:
+    batch "0.2.1"
+    component-require "0.3.1"
+    cp "~0.1.0"
+    debug "*"
+    mkdirp "0.3.4"
+    string-to-js "0.0.1"
+
+component-emitter@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3"
+
+component-emitter@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6"
+
+component-inherit@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143"
+
+component-require@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/component-require/-/component-require-0.3.1.tgz#50a00e2e2cb0fe273ab4268fe20ae4804f35fe6d"
+
+concat-map@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+
+concat-stream@1.5.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.5.0.tgz#53f7d43c51c5e43f81c8fdd03321c631be68d611"
+  dependencies:
+    inherits "~2.0.1"
+    readable-stream "~2.0.0"
+    typedarray "~0.0.5"
+
+config-chain@~1.1.1:
+  version "1.1.11"
+  resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.11.tgz#aba09747dfbe4c3e70e766a6e41586e1859fc6f2"
+  dependencies:
+    ini "^1.3.4"
+    proto-list "~1.2.1"
+
+connect-slashes@~0.0.9:
+  version "0.0.11"
+  resolved "https://registry.yarnpkg.com/connect-slashes/-/connect-slashes-0.0.11.tgz#4b44efae7599cc03ee20b24e9287272f41d62258"
+
+connect@1.x:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-1.9.2.tgz#42880a22e9438ae59a8add74e437f58ae8e52807"
+  dependencies:
+    formidable "1.0.x"
+    mime ">= 0.0.1"
+    qs ">= 0.4.0"
+
+connect@2.8.8:
+  version "2.8.8"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-2.8.8.tgz#b9abf8caf0bd9773cb3dea29344119872582446d"
+  dependencies:
+    buffer-crc32 "0.2.1"
+    bytes "0.2.0"
+    cookie "0.1.0"
+    cookie-signature "1.0.1"
+    debug "*"
+    formidable "1.0.14"
+    fresh "0.2.0"
+    methods "0.0.1"
+    pause "0.0.1"
+    qs "0.6.5"
+    send "0.1.4"
+    uid2 "0.0.2"
+
+connect@^3.6.0:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.1.tgz#b7760693a74f0454face1d9378edb3f885b43227"
+  dependencies:
+    debug "2.6.3"
+    finalhandler "1.0.1"
+    parseurl "~1.3.1"
+    utils-merge "1.0.0"
+
+console-control-strings@^1.0.0, console-control-strings@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e"
+
+content-type-parser@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/content-type-parser/-/content-type-parser-1.0.1.tgz#c3e56988c53c65127fb46d4032a3a900246fdc94"
+
+content-type@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed"
+
+convert-source-map@^1.1.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.0.tgz#9acd70851c6d5dfdd93d9282e5edf94a03ff46b5"
+
+cookie-signature@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.1.tgz#44e072148af01e6e8e24afbf12690d68ae698ecb"
+
+cookie@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.1.0.tgz#90eb469ddce905c866de687efc43131d8801f9d0"
+
+cookie@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
+
+core-js@^1.0.0:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636"
+
+core-js@^2.2.0, core-js@^2.4.0:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.4.1.tgz#4de911e667b0eae9124e34254b53aea6fc618d3e"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cp@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/cp/-/cp-0.1.1.tgz#3946a76c1a53ffe0e68593f341c124b336c1f06d"
+
+cryptiles@0.2.x:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-0.2.2.tgz#ed91ff1f17ad13d3748288594f8a48a0d26f325c"
+  dependencies:
+    boom "0.4.x"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+"css-brunch@>= 1.0 < 1.5":
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/css-brunch/-/css-brunch-1.3.0.tgz#82b07ee0ea8887e5f97d8ee6b9eda8abb939644b"
+  dependencies:
+    coffee-script "1.3.3"
+
+cssom@0.2.x:
+  version "0.2.5"
+  resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.2.5.tgz#2682709b5902e7212df529116ff788cd5b254894"
+
+cssom@0.3.x, "cssom@>= 0.3.2 < 0.4.0":
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.3.2.tgz#b8036170c79f07a90ff2f16e22284027a243848b"
+
+cssstyle@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-0.2.3.tgz#fc6a0cb8c72b99f10b17d2028adcdb9bcc73cb49"
+  dependencies:
+    cssom "0.2.x"
+
+"cssstyle@>= 0.2.37 < 0.3.0":
+  version "0.2.37"
+  resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-0.2.37.tgz#541097234cb2513c83ceed3acddc27ff27987d54"
+  dependencies:
+    cssom "0.3.x"
+
+ctype@0.5.3:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/ctype/-/ctype-0.5.3.tgz#82c18c2461f74114ef16c135224ad0b9144ca12f"
+
+currently-unhandled@^0.4.1:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/currently-unhandled/-/currently-unhandled-0.4.1.tgz#988df33feab191ef799a61369dd76c17adf957ea"
+  dependencies:
+    array-find-index "^1.0.1"
+
+custom-event@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/custom-event/-/custom-event-1.0.1.tgz#5d02a46850adf1b4a317946a3928fccb5bfd0425"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+date-utils@~1.2.17:
+  version "1.2.21"
+  resolved "https://registry.yarnpkg.com/date-utils/-/date-utils-1.2.21.tgz#61fb16cdc1274b3c9acaaffe9fc69df8720a2b64"
+
+dateformat@~1.0.6:
+  version "1.0.12"
+  resolved "https://registry.yarnpkg.com/dateformat/-/dateformat-1.0.12.tgz#9f124b67594c937ff706932e4a642cca8dbbfee9"
+  dependencies:
+    get-stdin "^4.0.1"
+    meow "^3.3.0"
+
+debug@*, debug@2.2.0, debug@^2.1.1:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da"
+  dependencies:
+    ms "0.7.1"
+
+debug@0.7.4, debug@~0.7.2:
+  version "0.7.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-0.7.4.tgz#06e1ea8082c2cb14e39806e22e2f6f757f92af39"
+
+debug@2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.3.3.tgz#40c453e67e6e13c901ddec317af8986cda9eff8c"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.1.tgz#79855090ba2c4e3115cc7d8769491d58f0491351"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.3, debug@^2.2.0:
+  version "2.6.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.3.tgz#0f7eb8c30965ec08c72accfa0130c8b79984141d"
+  dependencies:
+    ms "0.7.2"
+
+decamelize@^1.0.0, decamelize@^1.1.2:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+deep-eql@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/deep-eql/-/deep-eql-0.1.3.tgz#ef558acab8de25206cd713906d74e56930eb69f2"
+  dependencies:
+    type-detect "0.1.1"
+
+deep-extend@~0.4.0:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.4.1.tgz#efe4113d08085f4e6f9687759810f807469e2253"
+
+deep-is@~0.1.2, deep-is@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34"
+
+defined@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693"
+
+defs@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/defs/-/defs-1.1.1.tgz#b22609f2c7a11ba7a3db116805c139b1caffa9d2"
+  dependencies:
+    alter "~0.2.0"
+    ast-traverse "~0.1.1"
+    breakable "~1.0.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    simple-fmt "~0.1.0"
+    simple-is "~0.2.0"
+    stringmap "~0.2.2"
+    stringset "~0.2.1"
+    tryor "~0.1.2"
+    yargs "~3.27.0"
+
+delayed-stream@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-0.0.5.tgz#d4b1f43a93e8296dfe02694f4680bc37a313c73f"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+delegates@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
+
+depd@1.1.0, depd@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3"
+
+detect-indent@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-3.0.1.tgz#9dc5e5ddbceef8325764b9451b02bc6d54084f75"
+  dependencies:
+    get-stdin "^4.0.1"
+    minimist "^1.1.0"
+    repeating "^1.1.0"
+
+detect-indent@^4.0.0:
+  version "4.0.0"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-4.0.0.tgz#f76d064352cdf43a1cb6ce619c4ee3a9475de208"
+  dependencies:
+    repeating "^2.0.0"
+
+detective@^4.3.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/detective/-/detective-4.5.0.tgz#6e5a8c6b26e6c7a254b1c6b6d7490d98ec91edd1"
+  dependencies:
+    acorn "^4.0.3"
+    defined "^1.0.0"
+
+di@^0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/di/-/di-0.0.1.tgz#806649326ceaa7caa3306d75d985ea2748ba913c"
+
+diff@1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/diff/-/diff-1.4.0.tgz#7f28d2eb9ee7b15a97efd89ce63dcfdaa3ccbabf"
+
+dom-serialize@^2.2.0:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/dom-serialize/-/dom-serialize-2.2.1.tgz#562ae8999f44be5ea3076f5419dcd59eb43ac95b"
+  dependencies:
+    custom-event "~1.0.0"
+    ent "~2.2.0"
+    extend "^3.0.0"
+    void-elements "^2.0.0"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+ee-first@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
+
+ember-precompile-brunch@^0.1.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/ember-precompile-brunch/-/ember-precompile-brunch-0.1.2.tgz#54e73d5054a35afde79aa1c1b454f6ed0e37611c"
+  dependencies:
+    coffee-script "1.4.0"
+    jsdom "^9.0.0"
+
+ember-radio-button@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/ember-radio-button/-/ember-radio-button-0.1.2.tgz#6cbf20977ec2ea741ba361e5f4295b1d804b7c19"
+
+encodeurl@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20"
+
+engine.io-client@1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.3.tgz#1798ed93451246453d4c6f635d7a201fe940d5ab"
+  dependencies:
+    component-emitter "1.2.1"
+    component-inherit "0.0.3"
+    debug "2.3.3"
+    engine.io-parser "1.3.2"
+    has-cors "1.1.0"
+    indexof "0.0.1"
+    parsejson "0.0.3"
+    parseqs "0.0.5"
+    parseuri "0.0.5"
+    ws "1.1.2"
+    xmlhttprequest-ssl "1.5.3"
+    yeast "0.1.2"
+
+engine.io-parser@1.3.2:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.3.2.tgz#937b079f0007d0893ec56d46cb220b8cb435220a"
+  dependencies:
+    after "0.8.2"
+    arraybuffer.slice "0.0.6"
+    base64-arraybuffer "0.1.5"
+    blob "0.0.4"
+    has-binary "0.1.7"
+    wtf-8 "1.0.0"
+
+engine.io@1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.3.tgz#8de7f97895d20d39b85f88eeee777b2bd42b13d4"
+  dependencies:
+    accepts "1.3.3"
+    base64id "1.0.0"
+    cookie "0.3.1"
+    debug "2.3.3"
+    engine.io-parser "1.3.2"
+    ws "1.1.2"
+
+ent@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/ent/-/ent-2.2.0.tgz#e964219325a21d05f44466a2f686ed6ce5f5dd1d"
+
+error-ex@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.1.tgz#f855a86ce61adc4e8621c3cda21e7a7612c3a8dc"
+  dependencies:
+    is-arrayish "^0.2.1"
+
+es6-promise@~4.0.3:
+  version "4.0.5"
+  resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-4.0.5.tgz#7882f30adde5b240ccfa7f7d78c548330951ae42"
+
+escape-html@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
+
+escape-string-regexp@1.0.2, escape-string-regexp@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.2.tgz#4dbc2fe674e71949caf3fb2695ce7f2dc1d9a8d1"
+
+escodegen@1.7.x:
+  version "1.7.1"
+  resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.7.1.tgz#30ecfcf66ca98dc67cd2fd162abeb6eafa8ce6fc"
+  dependencies:
+    esprima "^1.2.2"
+    estraverse "^1.9.1"
+    esutils "^2.0.2"
+    optionator "^0.5.0"
+  optionalDependencies:
+    source-map "~0.2.0"
+
+escodegen@^1.6.1:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.8.1.tgz#5a5b53af4693110bebb0867aa3430dd3b70a1018"
+  dependencies:
+    esprima "^2.7.1"
+    estraverse "^1.9.1"
+    esutils "^2.0.2"
+    optionator "^0.8.1"
+  optionalDependencies:
+    source-map "~0.2.0"
+
+esprima-fb@~15001.1001.0-dev-harmony-fb:
+  version "15001.1001.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-15001.1001.0-dev-harmony-fb.tgz#43beb57ec26e8cf237d3dd8b33e42533577f2659"
+
+esprima@1.2.x, esprima@^1.2.2:
+  version "1.2.5"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-1.2.5.tgz#0993502feaf668138325756f30f9a51feeec11e9"
+
+esprima@2.5.x:
+  version "2.5.0"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.5.0.tgz#f387a46fd344c1b1a39baf8c20bfb43b6d0058cc"
+
+esprima@^2.6.0, esprima@^2.7.1:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581"
+
+esprima@^3.1.1, esprima@~3.1.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633"
+
+estraverse@^1.9.1:
+  version "1.9.3"
+  resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-1.9.3.tgz#af67f2dc922582415950926091a4005d29c9bb44"
+
+estraverse@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-1.8.0.tgz#3f1264fb62c8500dbae5e4f73705cd576d6af428"
+
+esutils@^2.0.0, esutils@^2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b"
+
+eventemitter3@1.x.x:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508"
+
+expand-braces@^0.1.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/expand-braces/-/expand-braces-0.1.2.tgz#488b1d1d2451cb3d3a6b192cfc030f44c5855fea"
+  dependencies:
+    array-slice "^0.2.3"
+    array-unique "^0.2.1"
+    braces "^0.1.2"
+
+expand-brackets@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b"
+  dependencies:
+    is-posix-bracket "^0.1.0"
+
+expand-range@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-0.1.1.tgz#4cb8eda0993ca56fa4f41fc42f3cbb4ccadff044"
+  dependencies:
+    is-number "^0.1.1"
+    repeat-string "^0.2.2"
+
+expand-range@^1.8.1:
+  version "1.8.2"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337"
+  dependencies:
+    fill-range "^2.1.0"
+
+express@2.5.8:
+  version "2.5.8"
+  resolved "https://registry.yarnpkg.com/express/-/express-2.5.8.tgz#f166b55d4e8c6d2307ef88ad1768209613f7452a"
+  dependencies:
+    connect "1.x"
+    mime "1.2.4"
+    mkdirp "0.3.0"
+    qs "0.4.x"
+
+express@~3.3.0:
+  version "3.3.8"
+  resolved "https://registry.yarnpkg.com/express/-/express-3.3.8.tgz#8e98ac30d81f4c95b85d71d2af6cf84f62ef19bd"
+  dependencies:
+    buffer-crc32 "0.2.1"
+    commander "1.2.0"
+    connect "2.8.8"
+    cookie "0.1.0"
+    cookie-signature "1.0.1"
+    debug "*"
+    fresh "0.2.0"
+    methods "0.0.1"
+    mkdirp "0.3.5"
+    range-parser "0.0.4"
+    send "0.1.4"
+
+extend@^3.0.0, extend@~3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.0.tgz#5a474353b9f3353ddd8176dfd37b91c83a46f1d4"
+
+extglob@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1"
+  dependencies:
+    is-extglob "^1.0.0"
+
+extract-zip@~1.5.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/extract-zip/-/extract-zip-1.5.0.tgz#92ccf6d81ef70a9fa4c1747114ccef6d8688a6c4"
+  dependencies:
+    concat-stream "1.5.0"
+    debug "0.7.4"
+    mkdirp "0.5.0"
+    yauzl "2.4.1"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+fast-levenshtein@~1.0.0:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-1.0.7.tgz#0178dcdee023b92905193af0959e8a7639cfdcb9"
+
+fast-levenshtein@~2.0.4:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917"
+
+fd-slicer@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/fd-slicer/-/fd-slicer-1.0.1.tgz#8b5bcbd9ec327c5041bf9ab023fd6750f1177e65"
+  dependencies:
+    pend "~1.2.0"
+
+filename-regex@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.0.tgz#996e3e80479b98b9897f15a8a58b3d084e926775"
+
+fileset@0.1.x:
+  version "0.1.8"
+  resolved "https://registry.yarnpkg.com/fileset/-/fileset-0.1.8.tgz#506b91a9396eaa7e32fb42a84077c7a0c736b741"
+  dependencies:
+    glob "3.x"
+    minimatch "0.x"
+
+fileset@0.2.x:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/fileset/-/fileset-0.2.1.tgz#588ef8973c6623b2a76df465105696b96aac8067"
+  dependencies:
+    glob "5.x"
+    minimatch "2.x"
+
+fill-range@^2.1.0:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723"
+  dependencies:
+    is-number "^2.1.0"
+    isobject "^2.0.0"
+    randomatic "^1.1.3"
+    repeat-element "^1.1.2"
+    repeat-string "^1.5.2"
+
+finalhandler@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.1.tgz#bcd15d1689c0e5ed729b6f7f541a6df984117db8"
+  dependencies:
+    debug "2.6.3"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+find-up@^1.0.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f"
+  dependencies:
+    path-exists "^2.0.0"
+    pinkie-promise "^2.0.0"
+
+for-in@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
+
+for-own@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
+  dependencies:
+    for-in "^1.0.1"
+
+forever-agent@~0.5.0:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.5.2.tgz#6d0e09c4921f94a27f63d3b49c5feff1ea4c5130"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~0.1.0:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-0.1.4.tgz#91abd788aba9702b1aabfa8bc01031a2ac9e3b12"
+  dependencies:
+    async "~0.9.0"
+    combined-stream "~0.0.4"
+    mime "~1.2.11"
+
+form-data@~1.0.0-rc3:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.1.tgz#ae315db9a4907fa065502304a66d7733475ee37c"
+  dependencies:
+    async "^2.0.1"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.11"
+
+form-data@~2.1.1:
+  version "2.1.4"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.4.tgz#33c183acf193276ecaa98143a69e94bfee1750d1"
+  dependencies:
+    asynckit "^0.4.0"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.12"
+
+formidable@1.0.14:
+  version "1.0.14"
+  resolved "https://registry.yarnpkg.com/formidable/-/formidable-1.0.14.tgz#2b3f4c411cbb5fdd695c44843e2a23514a43231a"
+
+formidable@1.0.x:
+  version "1.0.17"
+  resolved "https://registry.yarnpkg.com/formidable/-/formidable-1.0.17.tgz#ef5491490f9433b705faa77249c99029ae348559"
+
+fresh@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.2.0.tgz#bfd9402cf3df12c4a4c310c79f99a3dde13d34a7"
+
+fs-extra@~0.26.4:
+  version "0.26.7"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.26.7.tgz#9ae1fdd94897798edab76d0918cf42d0c3184fa9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@~0.6.3:
+  version "0.6.4"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.6.4.tgz#f46f0c75b7841f8d200b3348cd4d691d5a099d15"
+  dependencies:
+    jsonfile "~1.0.1"
+    mkdirp "0.3.x"
+    ncp "~0.4.2"
+    rimraf "~2.2.0"
+
+fs-extra@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-1.0.0.tgz#cd3ce5f7e7cb6145883fcae3191e9877f8587950"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+
+fs-readdir-recursive@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-0.1.2.tgz#315b4fb8c1ca5b8c47defef319d073dad3568059"
+
+fs.realpath@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
+
+fsevents@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.1.1.tgz#f19fd28f43eeaf761680e519a203c4d0b3d31aff"
+  dependencies:
+    nan "^2.3.0"
+    node-pre-gyp "^0.6.29"
+
+fsevents@~0.3.1:
+  version "0.3.8"
+  resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-0.3.8.tgz#9992f1032c925c829554d0d59801dca0313a5356"
+  dependencies:
+    nan "^2.0.2"
+
+fstream-ignore@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105"
+  dependencies:
+    fstream "^1.0.0"
+    inherits "2"
+    minimatch "^3.0.0"
+
+fstream@^1.0.0, fstream@^1.0.10, fstream@^1.0.2:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
+  dependencies:
+    graceful-fs "^4.1.2"
+    inherits "~2.0.0"
+    mkdirp ">=0.5 0"
+    rimraf "2"
+
+gauge@~2.7.1:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.3.tgz#1c23855f962f17b3ad3d0dc7443f304542edfe09"
+  dependencies:
+    aproba "^1.0.3"
+    console-control-strings "^1.0.0"
+    has-unicode "^2.0.0"
+    object-assign "^4.1.0"
+    signal-exit "^3.0.0"
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wide-align "^1.1.0"
+
+generate-function@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74"
+
+generate-object-property@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0"
+  dependencies:
+    is-property "^1.0.0"
+
+get-stdin@^4.0.1:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe"
+
+getpass@^0.1.1:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.6.tgz#283ffd9fc1256840875311c1b60e8c40187110e6"
+  dependencies:
+    assert-plus "^1.0.0"
+
+glob-base@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4"
+  dependencies:
+    glob-parent "^2.0.0"
+    is-glob "^2.0.0"
+
+glob-parent@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28"
+  dependencies:
+    is-glob "^2.0.0"
+
+glob@3.2.11, glob@3.x, glob@~3.2.3:
+  version "3.2.11"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-3.2.11.tgz#4a973f635b9190f715d10987d5c00fd2815ebe3d"
+  dependencies:
+    inherits "2"
+    minimatch "0.3"
+
+glob@5.x, glob@^5.0.15:
+  version "5.0.15"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^7.0.5, glob@^7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8"
+  dependencies:
+    fs.realpath "^1.0.0"
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^3.0.2"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+globals@^6.4.0:
+  version "6.4.1"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-6.4.1.tgz#8498032b3b6d1cc81eebc5f79690d8fe29fabf4f"
+
+globals@^9.0.0:
+  version "9.17.0"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-9.17.0.tgz#0c0ca696d9b9bb694d2e5470bd37777caad50286"
+
+graceful-fs@^4.1.2, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+graceful-fs@~2.0.0:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-2.0.3.tgz#7cd2cdb228a4a3f36e95efa6cc142de7d1a136d0"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growl@1.9.2:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/growl/-/growl-1.9.2.tgz#0ea7743715db8d8de2c5ede1775e1b45ac85c02f"
+
+growl@~1.8.1:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/growl/-/growl-1.8.1.tgz#4b2dec8d907e93db336624dcec0183502f8c9428"
+
+handlebars@^4.0.1:
+  version "4.0.6"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.0.6.tgz#2ce4484850537f9c97a8026d5399b935c4ed4ed7"
+  dependencies:
+    async "^1.4.0"
+    optimist "^0.6.1"
+    source-map "^0.4.4"
+  optionalDependencies:
+    uglify-js "^2.6"
+
+har-schema@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-1.0.5.tgz#d263135f43307c02c602afc8fe95970c0151369e"
+
+har-validator@~2.0.2, har-validator@~2.0.6:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d"
+  dependencies:
+    chalk "^1.1.1"
+    commander "^2.9.0"
+    is-my-json-valid "^2.12.4"
+    pinkie-promise "^2.0.0"
+
+har-validator@~4.2.1:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-4.2.1.tgz#33481d0f1bbff600dd203d75812a6a5fba002e2a"
+  dependencies:
+    ajv "^4.9.1"
+    har-schema "^1.0.5"
+
+has-ansi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+has-binary@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.7.tgz#68e61eb16210c9545a0a5cce06a873912fe1e68c"
+  dependencies:
+    isarray "0.0.1"
+
+has-cors@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39"
+
+has-flag@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-1.0.0.tgz#9d9e793165ce017a00f00418c43f942a7b1d11fa"
+
+has-unicode@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
+
+hasha@^2.2.0, hasha@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/hasha/-/hasha-2.2.0.tgz#78d7cbfc1e6d66303fe79837365984517b2f6ee1"
+  dependencies:
+    is-stream "^1.0.1"
+    pinkie-promise "^2.0.0"
+
+hawk@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-1.0.0.tgz#b90bb169807285411da7ffcb8dd2598502d3b52d"
+  dependencies:
+    boom "0.4.x"
+    cryptiles "0.2.x"
+    hoek "0.9.x"
+    sntp "0.2.x"
+
+hawk@~3.1.0, hawk@~3.1.3:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+hoek@0.9.x:
+  version "0.9.1"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-0.9.1.tgz#3d322462badf07716ea7eb85baf88079cddce505"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+home-or-tmp@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-1.0.0.tgz#4b9f1e40800c3e50c6c27f781676afcce71f3985"
+  dependencies:
+    os-tmpdir "^1.0.1"
+    user-home "^1.1.1"
+
+home-or-tmp@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-2.0.0.tgz#e36c3f2d2cae7d746a857e38d18d5f32a7882db8"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.1"
+
+hosted-git-info@^2.1.4:
+  version "2.4.2"
+  resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.4.2.tgz#0076b9f46a270506ddbaaea56496897460612a67"
+
+html-encoding-sniffer@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-1.0.1.tgz#79bf7a785ea495fe66165e734153f363ff5437da"
+  dependencies:
+    whatwg-encoding "^1.0.1"
+
+http-errors@~1.6.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.1.tgz#5f8b8ed98aca545656bf572997387f904a722257"
+  dependencies:
+    depd "1.1.0"
+    inherits "2.0.3"
+    setprototypeof "1.0.3"
+    statuses ">= 1.3.1 < 2"
+
+http-proxy@^1.13.0:
+  version "1.16.2"
+  resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.16.2.tgz#06dff292952bf64dbe8471fa9df73066d4f37742"
+  dependencies:
+    eventemitter3 "1.x.x"
+    requires-port "1.x.x"
+
+http-signature@~0.10.0:
+  version "0.10.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-0.10.1.tgz#4fbdac132559aa8323121e540779c0a012b27e66"
+  dependencies:
+    asn1 "0.1.11"
+    assert-plus "^0.1.5"
+    ctype "0.5.3"
+
+http-signature@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf"
+  dependencies:
+    assert-plus "^0.2.0"
+    jsprim "^1.2.2"
+    sshpk "^1.7.0"
+
+ibrik@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ibrik/-/ibrik-2.0.0.tgz#89a2434f2a5c82b92166c3d97de3b5636eea2e9c"
+  dependencies:
+    coffee-script "~1.8.0"
+    esprima "1.2.x"
+    estraverse "~1.8.0"
+    fileset "0.1.x"
+    istanbul "~0.3.2"
+    lodash "~2.4.1"
+    mkdirp "~0.5.0"
+    optimist "~0.6.1"
+    which "~1.0.5"
+
+iconv-lite@0.4.13:
+  version "0.4.13"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.13.tgz#1f88aba4ab0b1508e8312acc39345f36e992e2f2"
+
+iconv-lite@0.4.15, iconv-lite@^0.4.5:
+  version "0.4.15"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.15.tgz#fe265a218ac6a57cfe854927e9d04c19825eddeb"
+
+indent-string@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-2.1.0.tgz#8e2d48348742121b4a8218b7a137e9a52049dc80"
+  dependencies:
+    repeating "^2.0.0"
+
+indexof@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d"
+
+inflight@^1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
+  dependencies:
+    once "^1.3.0"
+    wrappy "1"
+
+inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@~2.0.0, inherits@~2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
+
+inherits@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-1.0.2.tgz#ca4309dadee6b54cc0b8d247e8d7c7a0975bdc9b"
+
+ini@^1.3.4, ini@~1.3.0:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.4.tgz#0537cb79daf59b59a1a517dff706c86ec039162e"
+
+ini@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ini/-/ini-1.1.0.tgz#4e808c2ce144c6c1788918e034d6797bc6cf6281"
+
+init-skeleton@~0.2.0:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/init-skeleton/-/init-skeleton-0.2.4.tgz#82655737a62d3b3b0153371c5847209132171863"
+  dependencies:
+    commander "~2.0.0"
+    loggy "~0.2.0"
+    mkdirp "~0.3.5"
+    ncp "~0.4.2"
+    rimraf "~2.2.1"
+
+invariant@^2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.2.tgz#9e1f56ac0acdb6bf303306f338be3b204ae60360"
+  dependencies:
+    loose-envify "^1.0.0"
+
+invert-kv@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6"
+
+is-arrayish@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
+
+is-binary-path@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898"
+  dependencies:
+    binary-extensions "^1.0.0"
+
+is-buffer@^1.0.2:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.5.tgz#1f3b26ef613b214b88cbca23cc6c01d87961eecc"
+
+is-builtin-module@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe"
+  dependencies:
+    builtin-modules "^1.0.0"
+
+is-dotfile@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.2.tgz#2c132383f39199f8edc268ca01b9b007d205cc4d"
+
+is-equal-shallow@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534"
+  dependencies:
+    is-primitive "^2.0.0"
+
+is-extendable@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89"
+
+is-extglob@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0"
+
+is-finite@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.0.2.tgz#cc6677695602be550ef11e8b4aa6305342b6d0aa"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-fullwidth-code-point@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-glob@^2.0.0, is-glob@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863"
+  dependencies:
+    is-extglob "^1.0.0"
+
+is-integer@^1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/is-integer/-/is-integer-1.0.6.tgz#5273819fada880d123e1ac00a938e7172dd8d95e"
+  dependencies:
+    is-finite "^1.0.0"
+
+is-my-json-valid@^2.12.4:
+  version "2.16.0"
+  resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.16.0.tgz#f079dd9bfdae65ee2038aae8acbc86ab109e3693"
+  dependencies:
+    generate-function "^2.0.0"
+    generate-object-property "^1.1.0"
+    jsonpointer "^4.0.0"
+    xtend "^4.0.0"
+
+is-number@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-0.1.1.tgz#69a7af116963d47206ec9bd9b48a14216f1e3806"
+
+is-number@^2.0.2, is-number@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f"
+  dependencies:
+    kind-of "^3.0.2"
+
+is-posix-bracket@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4"
+
+is-primitive@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575"
+
+is-property@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84"
+
+is-stream@^1.0.1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44"
+
+is-typedarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
+
+is-utf8@^0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72"
+
+isarray@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
+
+isarray@1.0.0, isarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
+
+isbinaryfile@^3.0.0:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/isbinaryfile/-/isbinaryfile-3.0.2.tgz#4a3e974ec0cba9004d3fc6cde7209ea69368a621"
+
+isexe@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
+
+isobject@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89"
+  dependencies:
+    isarray "1.0.0"
+
+isstream@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
+
+istanbul@~0.3.0, istanbul@~0.3.2:
+  version "0.3.22"
+  resolved "https://registry.yarnpkg.com/istanbul/-/istanbul-0.3.22.tgz#3e164d85021fe19c985d1f0e7ef0c3e22d012eb6"
+  dependencies:
+    abbrev "1.0.x"
+    async "1.x"
+    escodegen "1.7.x"
+    esprima "2.5.x"
+    fileset "0.2.x"
+    handlebars "^4.0.1"
+    js-yaml "3.x"
+    mkdirp "0.5.x"
+    nopt "3.x"
+    once "1.x"
+    resolve "1.1.x"
+    supports-color "^3.1.0"
+    which "^1.1.1"
+    wordwrap "^1.0.0"
+
+jade@0.26.3:
+  version "0.26.3"
+  resolved "https://registry.yarnpkg.com/jade/-/jade-0.26.3.tgz#8f10d7977d8d79f2f6ff862a81b0513ccb25686c"
+  dependencies:
+    commander "0.6.1"
+    mkdirp "0.3.0"
+
+"javascript-brunch@>= 1.0 < 1.5":
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/javascript-brunch/-/javascript-brunch-1.3.0.tgz#c72067f5f6971c6e7e636a139e5fab8f88339b83"
+  dependencies:
+    coffee-script "1.3.3"
+
+jodid25519@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967"
+  dependencies:
+    jsbn "~0.1.0"
+
+js-tokens@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-1.0.1.tgz#cc435a5c8b94ad15acb7983140fc80182c89aeae"
+
+js-tokens@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.1.tgz#08e9f132484a2c45a30907e9dc4d5567b7f114d7"
+
+js-yaml@3.x:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.8.3.tgz#33a05ec481c850c8875929166fe1beb61c728766"
+  dependencies:
+    argparse "^1.0.7"
+    esprima "^3.1.1"
+
+jsbn@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
+
+jsdom@^9.0.0:
+  version "9.12.0"
+  resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-9.12.0.tgz#e8c546fffcb06c00d4833ca84410fed7f8a097d4"
+  dependencies:
+    abab "^1.0.3"
+    acorn "^4.0.4"
+    acorn-globals "^3.1.0"
+    array-equal "^1.0.0"
+    content-type-parser "^1.0.1"
+    cssom ">= 0.3.2 < 0.4.0"
+    cssstyle ">= 0.2.37 < 0.3.0"
+    escodegen "^1.6.1"
+    html-encoding-sniffer "^1.0.1"
+    nwmatcher ">= 1.3.9 < 2.0.0"
+    parse5 "^1.5.1"
+    request "^2.79.0"
+    sax "^1.2.1"
+    symbol-tree "^3.2.1"
+    tough-cookie "^2.3.2"
+    webidl-conversions "^4.0.0"
+    whatwg-encoding "^1.0.1"
+    whatwg-url "^4.3.0"
+    xml-name-validator "^2.0.1"
+
+jsesc@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-1.3.0.tgz#46c3fec8c1892b12b0833db9bc7622176dbab34b"
+
+jsesc@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d"
+
+json-schema@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13"
+
+json-stable-stringify@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af"
+  dependencies:
+    jsonify "~0.0.0"
+
+json-stringify-safe@~5.0.0, json-stringify-safe@~5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
+
+json3@3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1"
+
+json5@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.4.0.tgz#054352e4c4c80c86c0923877d449de176a732c8d"
+
+json5@^0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821"
+
+jsonfile@^2.1.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8"
+  optionalDependencies:
+    graceful-fs "^4.1.6"
+
+jsonfile@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-1.0.1.tgz#ea5efe40b83690b98667614a7392fc60e842c0dd"
+
+jsonify@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73"
+
+jsonpointer@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9"
+
+jsprim@^1.2.2:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.0.tgz#a3b87e40298d8c380552d8cc7628a0bb95a22918"
+  dependencies:
+    assert-plus "1.0.0"
+    extsprintf "1.0.2"
+    json-schema "0.2.3"
+    verror "1.3.6"
+
+karma-babel-preprocessor@^6.0.1:
+  version "6.0.1"
+  resolved "https://registry.yarnpkg.com/karma-babel-preprocessor/-/karma-babel-preprocessor-6.0.1.tgz#7ae1d3e64950dbe11f421b74040ab08fb5a66c21"
+  dependencies:
+    babel-core "^6.0.0"
+
+karma-chai@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/karma-chai/-/karma-chai-0.1.0.tgz#bee5ad40400517811ae34bb945f762909108b79a"
+
+karma-commonjs-require@~0.0.1:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/karma-commonjs-require/-/karma-commonjs-require-0.0.3.tgz#3b8426535b84b4635ec11fc238f5fe4b24ec0241"
+  dependencies:
+    commonjs-require-definition "~0.1.0"
+
+karma-coverage@~0.2.0:
+  version "0.2.7"
+  resolved "https://registry.yarnpkg.com/karma-coverage/-/karma-coverage-0.2.7.tgz#f76740b275bbf30a0ab9f41d8cf56843a0994576"
+  dependencies:
+    dateformat "~1.0.6"
+    ibrik "~2.0.0"
+    istanbul "~0.3.0"
+    minimatch "~0.3.0"
+
+karma-ember-precompiler-brunch@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/karma-ember-precompiler-brunch/-/karma-ember-precompiler-brunch-1.0.0.tgz#d7753a71c47d2de5f930f10da1262b602

<TRUNCATED>

[19/50] [abbrv] ambari git commit: AMBARI-20334 - Yarn Queue manager capacity field allows entering invalid values. (Anita Jebaraj via sangeetar)

Posted by ao...@apache.org.
AMBARI-20334 - Yarn Queue manager capacity field allows entering invalid values. (Anita Jebaraj via sangeetar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ae5c17ed
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ae5c17ed
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ae5c17ed

Branch: refs/heads/branch-3.0-perf
Commit: ae5c17eddac8f0ac9f14ef76704c790712383bb1
Parents: c1f9aba
Author: Sangeeta Ravindran <sa...@apache.org>
Authored: Thu Apr 20 12:28:30 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../src/main/resources/ui/app/components/capacityInput.js          | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ae5c17ed/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/capacityInput.js
----------------------------------------------------------------------
diff --git a/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/capacityInput.js b/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/capacityInput.js
index b6c419a..0646b79 100644
--- a/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/capacityInput.js
+++ b/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/capacityInput.js
@@ -257,6 +257,8 @@ App.DecimalCapacityInputComponent = Ember.TextField.extend({
       } else {
         this.set('value', (parseFloat(val) > maxVal)? parseFloat(maxVal) : parseFloat(val));
       }
+    } else {
+      this.set('value', (!Em.isBlank(this.get('value')) && !isNaN(parseFloat(this.get('value')))) ? parseFloat(val) : null);
     }
   }.observes('value').on('change')
 });


[03/50] [abbrv] ambari git commit: AMBARI-20753. HDP 3.0 TP - temporarily install RPMs hive2 and remove Pig dependency from Hive (alejandro)

Posted by ao...@apache.org.
AMBARI-20753. HDP 3.0 TP - temporarily install RPMs hive2 and remove Pig dependency from Hive (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5ab5de4b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5ab5de4b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5ab5de4b

Branch: refs/heads/branch-3.0-perf
Commit: 5ab5de4ba46a25c309540524a1881d08385e3623
Parents: 422c30f
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Apr 12 17:35:29 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../common-services/HIVE/2.1.0.3.0/metainfo.xml          | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5ab5de4b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml
index 6d9098f..4225f19 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml
@@ -424,6 +424,9 @@
         <osSpecific>
           <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
           <packages>
+            <!--
+            TODO AMBARI-20753
+            Re-add after Hive has all of its packages using the correct RPM name.
             <package>
               <name>hive-${stack_version}</name>
             </package>
@@ -438,10 +441,11 @@
               <condition>should_install_hive_atlas</condition>
             </package>
             <package>
-              <name>hive2-${stack_version}</name>
+              <name>tez-hive2-${stack_version}</name>
             </package>
+            -->
             <package>
-              <name>tez-hive2-${stack_version}</name>
+              <name>hive2-${stack_version}</name>
             </package>
           </packages>
         </osSpecific>
@@ -491,7 +495,10 @@
         <service>HDFS</service>
         <service>YARN</service>
         <service>TEZ</service>
+        <!-- TODO AMBARI-20753
+        Re-add after Pig service is being packaged.
         <service>PIG</service>
+        -->
         <service>SLIDER</service>
       </requiredServices>
 


[17/50] [abbrv] ambari git commit: AMBARI-20790. Miscellaneous fixes to support HDP 3.0 Tech Preview (alejandro)

Posted by ao...@apache.org.
AMBARI-20790. Miscellaneous fixes to support HDP 3.0 Tech Preview (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c1f9aba7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c1f9aba7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c1f9aba7

Branch: refs/heads/branch-3.0-perf
Commit: c1f9aba75829c7a877988ab7894b1880513fa10d
Parents: 1cd9d1e
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Apr 19 19:29:11 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../libraries/script/script.py                  |   6 +-
 .../common-services/SPARK/2.2.0/metainfo.xml    |  13 +-
 .../SPARK/2.2.0/scripts/job_history_server.py   |   8 +-
 .../SPARK/2.2.0/scripts/livy_server.py          |   8 +-
 .../SPARK/2.2.0/scripts/params.py               |  18 +-
 .../SPARK/2.2.0/scripts/spark_client.py         |   8 +-
 .../SPARK/2.2.0/scripts/spark_service.py        |   9 +-
 .../SPARK/2.2.0/scripts/spark_thrift_server.py  |   8 +-
 .../YARN/2.1.0.2.0/package/scripts/yarn.py      |  36 ++--
 .../common-services/YARN/3.0.0.3.0/metainfo.xml |   4 +-
 .../YARN/3.0.0.3.0/package/scripts/yarn.py      |  36 ++--
 .../services/HDFS/configuration/hadoop-env.xml  |   2 +-
 .../YARN/configuration-mapred/mapred-env.xml    |   2 +-
 .../YARN/configuration-mapred/mapred-site.xml   |  12 +-
 .../stacks/2.0.6/YARN/test_historyserver.py     | 198 +++++++++----------
 .../stacks/2.0.6/YARN/test_nodemanager.py       | 196 +++++++++---------
 .../stacks/2.0.6/YARN/test_resourcemanager.py   |  52 ++---
 .../stacks/2.1/YARN/test_apptimelineserver.py   |  40 ++--
 .../test/python/stacks/2.3/YARN/test_ats_1_5.py |  98 ++++-----
 19 files changed, 385 insertions(+), 369 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 2c441ad..d0e5678 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -370,7 +370,7 @@ class Script(object):
       
       show_logs(log_folder, user, lines_count=COUNT_OF_LAST_LINES_OF_OUT_FILES_LOGGED, mask=OUT_FILES_MASK)
 
-  def post_start(self, env):
+  def post_start(self, env=None):
     pid_files = self.get_pid_files()
     if pid_files == []:
       Logger.logger.warning("Pid files for current script are not defined")
@@ -827,7 +827,7 @@ class Script(object):
 
       # To remain backward compatible with older stacks, only pass upgrade_type if available.
       # TODO, remove checking the argspec for "upgrade_type" once all of the services support that optional param.
-      self.pre_start()
+      self.pre_start(env)
       if "upgrade_type" in inspect.getargspec(self.start).args:
         self.start(env, upgrade_type=upgrade_type)
       else:
@@ -835,7 +835,7 @@ class Script(object):
           self.start(env, rolling_restart=(upgrade_type == UPGRADE_TYPE_ROLLING))
         else:
           self.start(env)
-      self.post_start()
+      self.post_start(env)
 
       if is_stack_upgrade:
         # Remain backward compatible with the rest of the services that haven't switched to using

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/common-services/SPARK/2.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/metainfo.xml
index 25a6855..d2e2d8b 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/metainfo.xml
@@ -240,18 +240,19 @@
         <service>HIVE</service>
       </requiredServices>
 
+      <!-- TODO, change these to "spark" and "livy" after RPM switches the name. -->
       <osSpecifics>
         <osSpecific>
           <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
           <packages>
             <package>
-              <name>spark_${stack_version}</name>
+              <name>spark2_${stack_version}</name>
             </package>
             <package>
-              <name>spark_${stack_version}-python</name>
+              <name>spark2_${stack_version}-python</name>
             </package>
             <package>
-              <name>livy_${stack_version}</name>
+              <name>livy2_${stack_version}</name>
             </package>
           </packages>
         </osSpecific>
@@ -259,13 +260,13 @@
           <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
           <packages>
             <package>
-              <name>spark-${stack_version}</name>
+              <name>spark2-${stack_version}</name>
             </package>
             <package>
-              <name>spark-${stack_version}-python</name>
+              <name>spark2-${stack_version}-python</name>
             </package>
             <package>
-              <name>livy-${stack_version}</name>
+              <name>livy2-${stack_version}</name>
             </package>
           </packages>
         </osSpecific>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/job_history_server.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/job_history_server.py
index 16a2224..3937c88 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/job_history_server.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/job_history_server.py
@@ -68,7 +68,8 @@ class JobHistoryServer(Script):
     
 
   def get_component_name(self):
-    return "spark-historyserver"
+    # TODO, change to "spark" after RPM switches the name
+    return "spark2-historyserver"
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
@@ -76,8 +77,9 @@ class JobHistoryServer(Script):
     env.set_params(params)
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
       Logger.info("Executing Spark Job History Server Stack Upgrade pre-restart")
-      conf_select.select(params.stack_name, "spark", params.version)
-      stack_select.select("spark-historyserver", params.version)
+      # TODO, change to "spark" after RPM switches the name
+      conf_select.select(params.stack_name, "spark2", params.version)
+      stack_select.select("spark2-historyserver", params.version)
 
       # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
       # need to copy the tarball, otherwise, copy it.

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/livy_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/livy_server.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/livy_server.py
index b09d9a9..269c97d 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/livy_server.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/livy_server.py
@@ -126,7 +126,8 @@ class LivyServer(Script):
         Logger.info("DFS directory '" + dir_path + "' exists.")
 
   def get_component_name(self):
-    return "livy-server"
+    # TODO, change to "livy" after RPM switches the name
+    return "livy2-server"
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
@@ -134,8 +135,9 @@ class LivyServer(Script):
     env.set_params(params)
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
       Logger.info("Executing Livy Server Stack Upgrade pre-restart")
-      conf_select.select(params.stack_name, "spark", params.version)
-      stack_select.select("livy-server", params.version)
+      # TODO, change to "spark" and "livy" after RPM switches the name
+      conf_select.select(params.stack_name, "spark2", params.version)
+      stack_select.select("livy2-server", params.version)
 
   def get_log_folder(self):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/params.py
index ab58cb6..e60cab5 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/params.py
@@ -35,12 +35,13 @@ from resource_management.libraries.script.script import Script
 
 # a map of the Ambari role to the component name
 # for use with <stack-root>/current/<component>
+# TODO, change to "spark" and "livy" after RPM switches the name
 SERVER_ROLE_DIRECTORY_MAP = {
-  'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
-  'SPARK_CLIENT' : 'spark-client',
-  'SPARK_THRIFTSERVER' : 'spark-thriftserver',
-  'LIVY_SERVER' : 'livy-server',
-  'LIVY_CLIENT' : 'livy-client'
+  'SPARK_JOBHISTORYSERVER' : 'spark2-historyserver',
+  'SPARK_CLIENT' : 'spark2-client',
+  'SPARK_THRIFTSERVER' : 'spark2-thriftserver',
+  'LIVY_SERVER' : 'livy2-server',
+  'LIVY_CLIENT' : 'livy2-client'
 
 }
 
@@ -59,7 +60,8 @@ sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
 # New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
 version = default("/commandParams/version", None)
 
-spark_conf = '/etc/spark/conf'
+# TODO, change to "spark" after RPM switches the name
+spark_conf = '/etc/spark2/conf'
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 
@@ -139,7 +141,7 @@ has_spark_thriftserver = not len(spark_thriftserver_hosts) == 0
 
 # hive-site params
 spark_hive_properties = {
-  'hive.metastore.uris': config['configurations']['hive-site']['hive.metastore.uris']
+  'hive.metastore.uris': default('/configurations/hive-site/hive.metastore.uris', '')
 }
 
 # security settings
@@ -195,7 +197,7 @@ dfs_type = default("/commandParams/dfs_type", "")
 
 # livy related config
 
-# livy for spark is only supported from HDP 2.6
+# livy for spark2 is only supported from HDP 2.6
 has_livyserver = False
 
 if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted):

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_client.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_client.py
index a2e53cd..3acde4e 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_client.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_client.py
@@ -44,7 +44,8 @@ class SparkClient(Script):
     raise ClientComponentHasNoStatus()
   
   def get_component_name(self):
-    return "spark-client"
+    # TODO, change to "spark" after RPM switches the name
+    return "spark2-client"
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
@@ -52,8 +53,9 @@ class SparkClient(Script):
     env.set_params(params)
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
       Logger.info("Executing Spark Client Stack Upgrade pre-restart")
-      conf_select.select(params.stack_name, "spark", params.version)
-      stack_select.select("spark-client", params.version)
+      # TODO, change to "spark" after RPM switches the name
+      conf_select.select(params.stack_name, "spark2", params.version)
+      stack_select.select("spark2-client", params.version)
 
 if __name__ == "__main__":
   SparkClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_service.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_service.py
index c6619e4..536d798 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_service.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_service.py
@@ -41,7 +41,7 @@ def make_tarfile(output_filename, source_dir):
   parent_dir=os.path.dirname(output_filename)
   if not os.path.exists(parent_dir):
     os.makedirs(parent_dir)
-    os.chmod(parent_dir, 0711)
+  os.chmod(parent_dir, 0711)
   with closing(tarfile.open(output_filename, "w:gz")) as tar:
     for file in os.listdir(source_dir):
       tar.add(os.path.join(source_dir,file),arcname=file)
@@ -58,12 +58,13 @@ def spark_service(name, upgrade_type=None, action=None):
       effective_version = format_stack_version(effective_version)
 
     if name == 'jobhistoryserver' and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
-      # create & copy spark-hdp-yarn-archive.tar.gz to hdfs
+      # TODO, change to "spark" after RPM switches the name
+      # create & copy spark2-hdp-yarn-archive.tar.gz to hdfs
       if not params.sysprep_skip_copy_tarballs_hdfs:
           source_dir=params.spark_home+"/jars"
-          tmp_archive_file=get_tarball_paths("spark")[1]
+          tmp_archive_file=get_tarball_paths("spark2")[1]
           make_tarfile(tmp_archive_file, source_dir)
-          copy_to_hdfs("spark", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs, replace_existing_files=True)
+          copy_to_hdfs("spark2", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs, replace_existing_files=True)
       # create spark history directory
       params.HdfsResource(params.spark_history_dir,
                           type="directory",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_thrift_server.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_thrift_server.py
index de82c16..8953b35 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_thrift_server.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_thrift_server.py
@@ -63,15 +63,17 @@ class SparkThriftServer(Script):
     check_process_status(status_params.spark_thrift_server_pid_file)
 
   def get_component_name(self):
-    return "spark-thriftserver"
+    # TODO, change to "spark" after RPM switches the name
+    return "spark2-thriftserver"
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
 
     env.set_params(params)
     Logger.info("Executing Spark Thrift Server Stack Upgrade pre-restart")
-    conf_select.select(params.stack_name, "spark", params.version)
-    stack_select.select("spark-thriftserver", params.version)
+    # TODO, change to "spark" after RPM switches the name
+    conf_select.select(params.stack_name, "spark2", params.version)
+    stack_select.select("spark2-thriftserver", params.version)
       
   def get_log_folder(self):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
index 52338df..2e1b208 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
@@ -45,15 +45,6 @@ def yarn(name=None, config_dir=None):
   """
   import params
 
-  if name == 'resourcemanager':
-    setup_resourcemanager()
-  elif name == 'nodemanager':
-    setup_nodemanager()
-  elif name == 'apptimelineserver':
-    setup_ats()
-  elif name == 'historyserver':
-    setup_historyserver()
-
   if config_dir is None:
     config_dir = params.hadoop_conf_dir
 
@@ -61,32 +52,41 @@ def yarn(name=None, config_dir=None):
     Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
               owner=params.yarn_user,
               group=params.user_group,
-              create_parents = True,
+              create_parents=True,
               mode=0755,
-              cd_access = 'a',
+              cd_access='a',
     )
 
   Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
             owner=params.yarn_user,
             group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
+            create_parents=True,
+            cd_access='a',
   )
-
   Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
             owner=params.mapred_user,
             group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
+            create_parents=True,
+            cd_access='a',
   )
   Directory([params.yarn_log_dir_prefix],
             owner=params.yarn_user,
             group=params.user_group,
-            create_parents = True,
+            create_parents=True,
             ignore_failures=True,
-            cd_access = 'a',
+            cd_access='a',
   )
 
+  # Some of these function calls depend on the directories above being created first.
+  if name == 'resourcemanager':
+    setup_resourcemanager()
+  elif name == 'nodemanager':
+    setup_nodemanager()
+  elif name == 'apptimelineserver':
+    setup_ats()
+  elif name == 'historyserver':
+    setup_historyserver()
+  
   XmlConfig("core-site.xml",
             conf_dir=config_dir,
             configurations=params.config['configurations']['core-site'],

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
index 18186bd..a802795 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
@@ -267,8 +267,7 @@
             <enabled>true</enabled>
             <co-locate>YARN/RESOURCEMANAGER</co-locate>
           </auto-deploy>
-
-          <!-- TODO HDP 3.0, add later after UI is fixed,
+          
           <dependencies>
             <dependency>
               <name>HDFS/HDFS_CLIENT</name>
@@ -292,7 +291,6 @@
               </auto-deploy>
             </dependency>
           </dependencies>
-          -->
 
           <commandScript>
             <script>scripts/historyserver.py</script>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
index d601f8f..c6b4c18 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
@@ -45,15 +45,6 @@ def yarn(name=None, config_dir=None):
   """
   import params
 
-  if name == 'resourcemanager':
-    setup_resourcemanager()
-  elif name == 'nodemanager':
-    setup_nodemanager()
-  elif name == 'apptimelineserver':
-    setup_ats()
-  elif name == 'historyserver':
-    setup_historyserver()
-
   if config_dir is None:
     config_dir = params.hadoop_conf_dir
 
@@ -61,32 +52,42 @@ def yarn(name=None, config_dir=None):
     Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
               owner=params.yarn_user,
               group=params.user_group,
-              create_parents = True,
+              create_parents=True,
               mode=0755,
-              cd_access = 'a',
+              cd_access='a',
     )
 
   Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
             owner=params.yarn_user,
             group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
+            create_parents=True,
+            cd_access='a',
   )
 
   Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
             owner=params.mapred_user,
             group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
+            create_parents=True,
+            cd_access='a',
   )
   Directory([params.yarn_log_dir_prefix],
             owner=params.yarn_user,
             group=params.user_group,
-            create_parents = True,
+            create_parents=True,
             ignore_failures=True,
-            cd_access = 'a',
+            cd_access='a',
   )
 
+  # Some of these function calls depend on the directories above being created first.
+  if name == 'resourcemanager':
+    setup_resourcemanager()
+  elif name == 'nodemanager':
+    setup_nodemanager()
+  elif name == 'apptimelineserver':
+    setup_ats()
+  elif name == 'historyserver':
+    setup_historyserver()
+
   XmlConfig("core-site.xml",
             conf_dir=config_dir,
             configurations=params.config['configurations']['core-site'],
@@ -377,6 +378,7 @@ def setup_resourcemanager():
        owner=params.yarn_user,
        group=params.user_group
   )
+  # This depends on the parent directory already existing.
   File(params.yarn_job_summary_log,
      owner=params.yarn_user,
      group=params.user_group

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
index fbda35a..9d504db 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
@@ -165,6 +165,6 @@
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
index deb4ef7..a143660 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
@@ -44,6 +44,6 @@
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
index cbeb08b..6ce4d72 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
@@ -25,7 +25,7 @@
       CLASSPATH for MR applications. A comma-separated list of CLASSPATH
       entries.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These configs were inherited from HDP 2.2 -->
@@ -37,13 +37,13 @@
       This is not an additive property. You must preserve the original value if
       you want your map and reduce tasks to have access to native libraries (compression, etc)
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.application.framework.path</name>
     <value>/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.admin-command-opts</name>
@@ -61,18 +61,18 @@
       of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
       mapreduce.reduce.env config settings.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index 7a0514a..fc7fcd8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -197,7 +197,55 @@ class TestHistoryServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
-
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      ignore_failures = True,
+      cd_access = 'a',
+    )
     self.assertResourceCalled('HdfsResource', '/app-logs',
         immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
@@ -309,55 +357,6 @@ class TestHistoryServer(RMFTestCase):
       cd_access = 'a',
       recursive_ownership = True,
     )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      ignore_failures = True,
-      cd_access = 'a',
-    )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hdfs',
       group = 'hadoop',
@@ -464,6 +463,56 @@ class TestHistoryServer(RMFTestCase):
                               )
 
   def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      ignore_failures = True,
+      cd_access = 'a',
+    )
+
     self.assertResourceCalled('HdfsResource', '/app-logs',
         immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
@@ -575,55 +624,6 @@ class TestHistoryServer(RMFTestCase):
       cd_access = 'a',
       recursive_ownership = True,
     )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      ignore_failures = True,
-      cd_access = 'a',
-    )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hdfs',
       group = 'hadoop',

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
index ab5e2cd..fbc1b85 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
@@ -133,6 +133,56 @@ class TestNodeManager(RMFTestCase):
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      ignore_failures = True,
+      cd_access = 'a',
+    )
+
     self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/yarn',
         create_parents = True,
         mode = 0755,
@@ -185,55 +235,7 @@ class TestNodeManager(RMFTestCase):
         group = 'hadoop',
         mode = 0644,
     )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      ignore_failures = True,
-      cd_access = 'a',
-    )
+
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hdfs',
       group = 'hadoop',
@@ -340,53 +342,6 @@ class TestNodeManager(RMFTestCase):
                               )
 
   def assert_configure_secured(self):
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-                              action = ['delete']
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-                              action = ['delete']
-    )
-    self.assertResourceCalled('Directory', '/var/lib/hadoop-yarn',)
-    self.assertResourceCalled('File', '/var/lib/hadoop-yarn/nm_security_enabled',
-                              content= 'Marker file to track first start after enabling/disabling security. During first start yarn local, log dirs are removed and recreated'
-    )
-    self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/yarn',
-        create_parents = True,
-        mode = 0755,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              ignore_failures = True,
-                              mode = 0775,
-                              cd_access='a',
-                              )
-    self.assertResourceCalled('File', '/var/lib/ambari-agent/data/yarn/yarn_log_dir_mount.hist',
-        content = '\n# This file keeps track of the last known mount-point for each dir.\n# It is safe to delete, since it will get regenerated the next time that the component of the service starts.\n# However, it is not advised to delete this file since Ambari may\n# re-create a dir that used to be mounted on a drive but is now mounted on the root.\n# Comments begin with a hash (#) symbol\n# dir,mount_point\n',
-        owner = 'hdfs',
-        group = 'hadoop',
-        mode = 0644,
-    )
-    self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/yarn',
-        create_parents = True,
-        mode = 0755,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              ignore_failures = True,
-                              mode = 0755,
-                              cd_access='a',
-                              recursive_mode_flags = {'d': 'a+rwx', 'f': 'a+rw'},
-                              )
-    self.assertResourceCalled('File', '/var/lib/ambari-agent/data/yarn/yarn_local_dir_mount.hist',
-        content = '\n# This file keeps track of the last known mount-point for each dir.\n# It is safe to delete, since it will get regenerated the next time that the component of the service starts.\n# However, it is not advised to delete this file since Ambari may\n# re-create a dir that used to be mounted on a drive but is now mounted on the root.\n# Comments begin with a hash (#) symbol\n# dir,mount_point\n',
-        owner = 'hdfs',
-        group = 'hadoop',
-        mode = 0644,
-    )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
       owner = 'yarn',
       group = 'hadoop',
@@ -436,6 +391,55 @@ class TestNodeManager(RMFTestCase):
       ignore_failures = True,
       cd_access = 'a',
     )
+
+    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
+                              action = ['delete']
+    )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
+                              action = ['delete']
+    )
+    self.assertResourceCalled('Directory', '/var/lib/hadoop-yarn',)
+    self.assertResourceCalled('File', '/var/lib/hadoop-yarn/nm_security_enabled',
+                              content= 'Marker file to track first start after enabling/disabling security. During first start yarn local, log dirs are removed and recreated'
+    )
+    self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/yarn',
+        create_parents = True,
+        mode = 0755,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              ignore_failures = True,
+                              mode = 0775,
+                              cd_access='a',
+                              )
+    self.assertResourceCalled('File', '/var/lib/ambari-agent/data/yarn/yarn_log_dir_mount.hist',
+        content = '\n# This file keeps track of the last known mount-point for each dir.\n# It is safe to delete, since it will get regenerated the next time that the component of the service starts.\n# However, it is not advised to delete this file since Ambari may\n# re-create a dir that used to be mounted on a drive but is now mounted on the root.\n# Comments begin with a hash (#) symbol\n# dir,mount_point\n',
+        owner = 'hdfs',
+        group = 'hadoop',
+        mode = 0644,
+    )
+    self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/yarn',
+        create_parents = True,
+        mode = 0755,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              ignore_failures = True,
+                              mode = 0755,
+                              cd_access='a',
+                              recursive_mode_flags = {'d': 'a+rwx', 'f': 'a+rw'},
+                              )
+    self.assertResourceCalled('File', '/var/lib/ambari-agent/data/yarn/yarn_local_dir_mount.hist',
+        content = '\n# This file keeps track of the last known mount-point for each dir.\n# It is safe to delete, since it will get regenerated the next time that the component of the service starts.\n# However, it is not advised to delete this file since Ambari may\n# re-create a dir that used to be mounted on a drive but is now mounted on the root.\n# Comments begin with a hash (#) symbol\n# dir,mount_point\n',
+        owner = 'hdfs',
+        group = 'hadoop',
+        mode = 0644,
+    )
+
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hdfs',
       group = 'hadoop',

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
index b7f90c8..a63caeb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
@@ -178,19 +178,6 @@ class TestResourceManager(RMFTestCase):
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
-    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
-        mode = 0755,
-        create_parents = True,
-        cd_access = 'a',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
-        owner = 'yarn',
-        group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
-      owner = 'yarn',
-      group = 'hadoop',
-    )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
       owner = 'yarn',
       group = 'hadoop',
@@ -240,6 +227,19 @@ class TestResourceManager(RMFTestCase):
       ignore_failures = True,
       cd_access = 'a',
     )
+    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
+        mode = 0755,
+        create_parents = True,
+        cd_access = 'a',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
+        owner = 'yarn',
+        group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
+      owner = 'yarn',
+      group = 'hadoop',
+    )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hdfs',
       group = 'hadoop',
@@ -346,19 +346,6 @@ class TestResourceManager(RMFTestCase):
                               )
 
   def assert_configure_secured(self):
-    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
-        mode = 0755,
-        create_parents = True,
-        cd_access = 'a',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
-        owner = 'yarn',
-        group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
-      owner = 'yarn',
-      group = 'hadoop',
-    )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
       owner = 'yarn',
       group = 'hadoop',
@@ -408,6 +395,19 @@ class TestResourceManager(RMFTestCase):
       ignore_failures = True,
       cd_access = 'a',
     )
+    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
+        mode = 0755,
+        create_parents = True,
+        cd_access = 'a',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
+        owner = 'yarn',
+        group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
+      owner = 'yarn',
+      group = 'hadoop',
+    )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hdfs',
       group = 'hadoop',

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
index 40db813..5730783 100644
--- a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
+++ b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
@@ -92,26 +92,6 @@ class TestAppTimelineServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/timeline',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access='a'
-                              )
-    self.assertResourceCalled('HdfsResource', None,
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/bin',
-                              keytab = UnknownConfigurationMock(),
-                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-                              dfs_type = '',
-                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
-                              kinit_path_local = '/usr/bin/kinit',
-                              principal_name = UnknownConfigurationMock(),
-                              user = 'hdfs',
-                              action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-                              hadoop_conf_dir = '/etc/hadoop/conf',
-                              )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
                               owner = 'yarn',
                               group = 'hadoop',
@@ -161,6 +141,26 @@ class TestAppTimelineServer(RMFTestCase):
                               ignore_failures = True,
                               cd_access = 'a',
                               )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/timeline',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access='a'
+                              )
+    self.assertResourceCalled('HdfsResource', None,
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              hadoop_bin_dir = '/usr/bin',
+                              keytab = UnknownConfigurationMock(),
+                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+                              dfs_type = '',
+                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = UnknownConfigurationMock(),
+                              user = 'hdfs',
+                              action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              hadoop_conf_dir = '/etc/hadoop/conf',
+                              )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
                               owner = 'hdfs',
                               group = 'hadoop',

http://git-wip-us.apache.org/repos/asf/ambari/blob/c1f9aba7/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py b/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
index b523412..b46a0ed 100644
--- a/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
+++ b/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
@@ -50,6 +50,55 @@ class TestAts(RMFTestCase):
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
+                              owner = 'mapred',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
+                              owner = 'mapred',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
+                              owner = 'mapred',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
+                              owner = 'mapred',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              ignore_failures = True,
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/timeline',
                               owner = 'yarn',
                               group = 'hadoop',
@@ -144,55 +193,6 @@ class TestAts(RMFTestCase):
                               action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
-                              owner = 'mapred',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
-                              owner = 'mapred',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
-                              owner = 'mapred',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
-                              owner = 'mapred',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              ignore_failures = True,
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
                               group = 'hadoop',
                               conf_dir = '/etc/hadoop/conf',


[24/50] [abbrv] ambari git commit: AMBARI-20787. Ambari agent should log how it determines it's FQDN. (stoader)

Posted by ao...@apache.org.
AMBARI-20787. Ambari agent should log how it determines it's FQDN. (stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/53dcaa8d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/53dcaa8d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/53dcaa8d

Branch: refs/heads/branch-3.0-perf
Commit: 53dcaa8d2c3b401f47a258c73398ef8746a230f6
Parents: 8ead189
Author: Toader, Sebastian <st...@hortonworks.com>
Authored: Fri Apr 21 08:11:31 2017 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 ambari-agent/src/main/python/ambari_agent/Hardware.py | 2 ++
 ambari-agent/src/main/python/ambari_agent/hostname.py | 8 ++++++++
 2 files changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/53dcaa8d/ambari-agent/src/main/python/ambari_agent/Hardware.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Hardware.py b/ambari-agent/src/main/python/ambari_agent/Hardware.py
index 8cb8a28..696438e 100644
--- a/ambari-agent/src/main/python/ambari_agent/Hardware.py
+++ b/ambari-agent/src/main/python/ambari_agent/Hardware.py
@@ -45,11 +45,13 @@ class Hardware:
   LINUX_PATH_SEP = "/"
 
   def __init__(self, config):
+    logger.info("Initializing host system information.")
     self.hardware = {
       'mounts': Hardware.osdisks()
     }
     self.config = config
     self.hardware.update(Facter(self.config).facterInfo())
+    logger.info("Host system information: %s", self.hardware)
 
   @classmethod
   def _parse_df_line(cls, line):

http://git-wip-us.apache.org/repos/asf/ambari/blob/53dcaa8d/ambari-agent/src/main/python/ambari_agent/hostname.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/hostname.py b/ambari-agent/src/main/python/ambari_agent/hostname.py
index 0f5f069..357c6b0 100644
--- a/ambari-agent/src/main/python/ambari_agent/hostname.py
+++ b/ambari-agent/src/main/python/ambari_agent/hostname.py
@@ -23,6 +23,7 @@ import subprocess
 import urllib2
 import logging
 import traceback
+import sys
 
 logger = logging.getLogger(__name__)
 
@@ -52,12 +53,19 @@ def hostname(config):
       out, err = osStat.communicate()
       if (0 == osStat.returncode and 0 != len(out.strip())):
         cached_hostname = out.strip()
+        logger.info("Read hostname '{0}' using agent:hostname_script '{1}'".format(cached_hostname, scriptname))
       else:
+        logger.warn("Execution of '{0}' failed with exit code {1}. err='{2}'\nout='{3}'".format(scriptname, osStat.returncode, err.strip(), out.strip()))
         cached_hostname = socket.getfqdn()
+        logger.info("Read hostname '{0}' using socket.getfqdn() as '{1}' failed".format(cached_hostname, scriptname))
     except:
       cached_hostname = socket.getfqdn()
+      logger.warn("Unexpected error while retrieving hostname: '{0}', defaulting to socket.getfqdn()".format(sys.exc_info()))
+      logger.info("Read hostname '{0}' using socket.getfqdn().".format(cached_hostname))
   except:
     cached_hostname = socket.getfqdn()
+    logger.info("agent:hostname_script configuration not defined thus read hostname '{0}' using socket.getfqdn().".format(cached_hostname))
+
   cached_hostname = cached_hostname.lower()
   return cached_hostname
 


[50/50] [abbrv] ambari git commit: AMBARI-20743. Prototype server websocket endpoint with STOMP sub-protocol. (mpapirkovskyy)

Posted by ao...@apache.org.
AMBARI-20743. Prototype server websocket endpoint with STOMP sub-protocol. (mpapirkovskyy)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8cc384ca
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8cc384ca
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8cc384ca

Branch: refs/heads/branch-3.0-perf
Commit: 8cc384cae0eba1705a7de77eb5747b96c0bc0b21
Parents: fb2ba49
Author: Myroslav Papirkovskyi <mp...@hortonworks.com>
Authored: Mon Apr 10 18:48:54 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 15:00:48 2017 +0300

----------------------------------------------------------------------
 .../java/org/apache/ambari/server/controller/AmbariServer.java    | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8cc384ca/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index e10a6e7..3a3908d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -311,7 +311,7 @@ public class AmbariServer {
     final SessionIdManager sessionIdManager = new DefaultSessionIdManager(server);
     sessionHandler.setSessionIdManager(sessionIdManager);
     server.setSessionIdManager(sessionIdManager);
-    
+
     // Agent Jetty thread pool - widen the thread pool if needed !
     Integer agentAcceptors = configs.getAgentApiAcceptors() != null ?
       configs.getAgentApiAcceptors() : DEFAULT_ACCEPTORS_COUNT;
@@ -319,7 +319,6 @@ public class AmbariServer {
     Server serverForAgent = configureJettyThreadPool(agentAcceptors * 2,
       AGENT_THREAD_POOL_NAME, configs.getAgentThreadPoolSize());
 
-
     setSystemProperties(configs);
 
     runDatabaseConsistencyCheck();


[23/50] [abbrv] ambari git commit: AMBARI-20813. Convert ambari-web build process to use yarn package manager to fix dependencies and to make the process faster. (yusaku)

Posted by ao...@apache.org.
AMBARI-20813. Convert ambari-web build process to use yarn package manager to fix dependencies and to make the process faster. (yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8ead1899
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8ead1899
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8ead1899

Branch: refs/heads/branch-3.0-perf
Commit: 8ead1899099b9125133ee1d33921be14382ca73a
Parents: 78048fc
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Thu Apr 20 17:57:30 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 ambari-web/yarn.lock | 4170 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 4170 insertions(+)
----------------------------------------------------------------------



[18/50] [abbrv] ambari git commit: AMBARI-20781. Hadoop QA test results link is broken

Posted by ao...@apache.org.
AMBARI-20781. Hadoop QA test results link is broken


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1cd9d1e0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1cd9d1e0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1cd9d1e0

Branch: refs/heads/branch-3.0-perf
Commit: 1cd9d1e0fa4357c2c073d6430809095344d5480e
Parents: c77e4d2
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Thu Apr 20 18:27:31 2017 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 dev-support/test-patch.sh | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1cd9d1e0/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 934e092..6ed8322 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -665,6 +665,7 @@ runTests () {
     done
 
     test_logfile=$PATCH_DIR/testrun_${module_suffix}.txt
+    test_logfile_url=$BUILD_URL/artifact/patch-work/testrun_${module_suffix}.txt
     echo "  Running tests in $module"
 
     # Skip java tests if this module did not have changes to java files
@@ -691,7 +692,7 @@ $module_test_timeouts"
 ${module_failed_tests}"
     fi
     if [[ $test_build_result != 0 && -z "$module_failed_tests" && -z "$module_test_timeouts" ]] ; then
-      failed_test_builds="$module $failed_test_builds"
+      failed_test_builds="[$module|$test_logfile_url] $failed_test_builds"
     fi
     cd -
   done
@@ -995,8 +996,6 @@ if [[ $JENKINS == "true" || $RUN_TESTS == "true" ]] ; then
 fi
 checkInjectSystemFaults
 (( RESULT = RESULT + $? ))
-JIRA_COMMENT_FOOTER="Test results: $BUILD_URL/testReport/
-$JIRA_COMMENT_FOOTER"
 
 submitJiraComment $RESULT
 cleanupAndExit $RESULT


[20/50] [abbrv] ambari git commit: AMBARI-20808. Create schema requests to upgrade solr schema (do not change it externally) (oleewere)

Posted by ao...@apache.org.
AMBARI-20808. Create schema requests to upgrade solr schema (do not change it externally) (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/dfb34498
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/dfb34498
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/dfb34498

Branch: refs/heads/branch-3.0-perf
Commit: dfb34498bdda159b6cbc86064a612439d49a75ea
Parents: ae5c17e
Author: oleewere <ol...@gmail.com>
Authored: Thu Apr 20 21:32:07 2017 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../configurer/SolrCollectionConfigurer.java    |   4 +-
 .../handler/AbstractSolrConfigHandler.java      | 116 ++++++++++++++++
 .../logsearch/handler/UpgradeSchemaHandler.java | 139 +++++++++++++++++++
 .../handler/UploadConfigurationHandler.java     | 136 +++++-------------
 4 files changed, 293 insertions(+), 102 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/dfb34498/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrCollectionConfigurer.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrCollectionConfigurer.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrCollectionConfigurer.java
index 2a4c790..2f56812 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrCollectionConfigurer.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrCollectionConfigurer.java
@@ -25,6 +25,7 @@ import org.apache.ambari.logsearch.handler.ACLHandler;
 import org.apache.ambari.logsearch.handler.CreateCollectionHandler;
 import org.apache.ambari.logsearch.handler.ListCollectionHandler;
 import org.apache.ambari.logsearch.handler.ReloadCollectionHandler;
+import org.apache.ambari.logsearch.handler.UpgradeSchemaHandler;
 import org.apache.ambari.logsearch.handler.UploadConfigurationHandler;
 import org.apache.commons.lang.StringUtils;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
@@ -103,7 +104,8 @@ public class SolrCollectionConfigurer implements Configurer {
     if (!state.isConfigurationUploaded()) {
       state.setConfigurationUploaded(true);
     }
-    return reloadCollectionNeeded;
+    boolean upgradeSchema = new UpgradeSchemaHandler(cloudSolrClient, configSetFolder).handle(cloudSolrClient, solrPropsConfig);
+    return reloadCollectionNeeded || upgradeSchema;
   }
 
   public boolean stopSetupCondition(SolrCollectionState state) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/dfb34498/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/AbstractSolrConfigHandler.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/AbstractSolrConfigHandler.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/AbstractSolrConfigHandler.java
new file mode 100644
index 0000000..8693157
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/AbstractSolrConfigHandler.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.handler;
+
+import org.apache.ambari.logsearch.conf.SolrPropsConfig;
+import org.apache.commons.io.FileUtils;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkConfigManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.FileSystems;
+import java.nio.file.Paths;
+import java.util.UUID;
+
+public abstract class AbstractSolrConfigHandler implements SolrZkRequestHandler<Boolean> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbstractSolrConfigHandler.class);
+
+  private File configSetFolder;
+
+  public AbstractSolrConfigHandler(File configSetFolder) {
+    this.configSetFolder = configSetFolder;
+  }
+
+  @Override
+  public Boolean handle(CloudSolrClient solrClient, SolrPropsConfig solrPropsConfig) throws Exception {
+    boolean reloadCollectionNeeded = false;
+    String separator = FileSystems.getDefault().getSeparator();
+    String downloadFolderLocation = String.format("%s%s%s%s%s", System.getProperty("java.io.tmpdir"), separator,
+      UUID.randomUUID().toString(), separator, solrPropsConfig.getConfigName());
+    solrClient.connect();
+    SolrZkClient zkClient = solrClient.getZkStateReader().getZkClient();
+    File tmpDir = new File(downloadFolderLocation);
+    try {
+      ZkConfigManager zkConfigManager = new ZkConfigManager(zkClient);
+      boolean configExists = zkConfigManager.configExists(solrPropsConfig.getConfigName());
+      if (configExists) {
+        reloadCollectionNeeded = doIfConfigExists(solrPropsConfig, zkClient, separator, downloadFolderLocation, tmpDir);
+      } else {
+        doIfConfigNotExist(solrPropsConfig, zkConfigManager);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(String.format("Cannot upload configurations to zk. (collection: %s, config set folder: %s)",
+        solrPropsConfig.getCollection(), solrPropsConfig.getConfigSetFolder()), e);
+    } finally {
+      if (tmpDir.exists()) {
+        try {
+          FileUtils.deleteDirectory(tmpDir);
+        } catch (IOException e){
+          LOG.error("Cannot delete temp directory.", e);
+        }
+      }
+    }
+    return reloadCollectionNeeded;
+  }
+
+  /**
+   * Update config file (like solrconfig.xml) to zookeeper znode of solr, contains a download location as well which can be
+   * used to determine that you need to update the configuration or not
+   */
+  public abstract boolean updateConfigIfNeeded(SolrPropsConfig solrPropsConfig, SolrZkClient zkClient, File file,
+                                               String separator, String downloadFolderLocation) throws IOException;
+
+  /**
+   * Config file name which should be uploaded to zookeeper
+   */
+  public abstract String getConfigFileName();
+
+  public void doIfConfigNotExist(SolrPropsConfig solrPropsConfig, ZkConfigManager zkConfigManager) throws IOException {
+    // Do nothing
+  };
+
+  public boolean doIfConfigExists(SolrPropsConfig solrPropsConfig, SolrZkClient zkClient, String separator, String downloadFolderLocation, File tmpDir) throws IOException {
+    boolean result = false;
+    LOG.info("Config set exists for '{}' collection. Refreshing it if needed...", solrPropsConfig.getCollection());
+    if (!tmpDir.mkdirs()) {
+      LOG.error("Cannot create directories for '{}'", tmpDir.getAbsolutePath());
+    }
+    ZkConfigManager zkConfigManager = new ZkConfigManager(zkClient);
+    zkConfigManager.downloadConfigDir(solrPropsConfig.getConfigName(), Paths.get(downloadFolderLocation));
+    File[] listOfFiles = getConfigSetFolder().listFiles();
+    if (listOfFiles != null) {
+      for (File file : listOfFiles) {
+        if (file.getName().equals(getConfigFileName()) && updateConfigIfNeeded(solrPropsConfig, zkClient, file, separator, downloadFolderLocation)) {
+          result = true;
+          break;
+        }
+      }
+    }
+    return result;
+  }
+
+  protected File getConfigSetFolder() {
+    return configSetFolder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/dfb34498/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UpgradeSchemaHandler.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UpgradeSchemaHandler.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UpgradeSchemaHandler.java
new file mode 100644
index 0000000..0da2125
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UpgradeSchemaHandler.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.handler;
+
+import org.apache.ambari.logsearch.conf.SolrPropsConfig;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.configuration.HierarchicalConfiguration.Node;
+import org.apache.commons.configuration.XMLConfiguration;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.schema.SchemaRequest;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class UpgradeSchemaHandler extends AbstractSolrConfigHandler {
+
+  private static final Logger LOG = LoggerFactory.getLogger(UpgradeSchemaHandler.class);
+
+  private static final String SCHEMA_FILE = "managed-schema";
+  private static final String FIELD_NAME_PATH = "field[@name]";
+  private static final String FIELD_TYPE_NAME_PATH = "fieldType[@name]";
+  private static final String DYNAMIC_FIELD = "dynamicField";
+  private static final String DYNAMIC_FIELD_NAME_PATH = DYNAMIC_FIELD + "[@name]";
+
+  private CloudSolrClient cloudSolrClient;
+
+  private XMLConfiguration localFileXml;
+  private List<String> localDynamicFields;
+
+  public UpgradeSchemaHandler(CloudSolrClient cloudSolrClient, File configSetFolder) {
+    super(configSetFolder);
+    this.cloudSolrClient = cloudSolrClient;
+  }
+
+  @Override
+  public boolean updateConfigIfNeeded(SolrPropsConfig solrPropsConfig, SolrZkClient zkClient, File file, String separator, String downloadFolderLocation) throws IOException {
+    boolean result = false;
+    if (localSchemaFileHasMoreFields(file, new File(String.format("%s%s%s", downloadFolderLocation, separator, file.getName())))) {
+      LOG.info("Solr schema file differs ('{}'), update config schema...", file.getName());
+      try {
+        upgradeDynamicFields();
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+      result = true;
+    }
+    return result;
+  }
+
+  // for now we only upgrades dynamic fields, later we can extend this feature if needed
+  private void upgradeDynamicFields() throws IOException, SolrServerException {
+    if (localFileXml.getRoot() != null && CollectionUtils.isNotEmpty(localDynamicFields)) {
+      List<Node> children = localFileXml.getRoot().getChildren(DYNAMIC_FIELD);
+      for (Node dynamicFieldNode : children) {
+        List<Node> attributes = dynamicFieldNode.getAttributes();
+        Map<String, Object> attributesMap = new HashMap<>();
+        for (Node attribute : attributes) {
+          attributesMap.put(attribute.getName(), attribute.getValue());
+        }
+        if (attributesMap.get("name") != null && localDynamicFields.contains(attributesMap.get("name").toString())) {
+          SchemaRequest.AddDynamicField addDynamicFieldRequest = new SchemaRequest.AddDynamicField(attributesMap);
+          addDynamicFieldRequest.process(cloudSolrClient);
+          LOG.info("Added dynamic field request sent. (field name: {})", attributesMap.get("name"));
+        }
+      }
+    }
+  }
+
+  @Override
+  public String getConfigFileName() {
+    return SCHEMA_FILE;
+  }
+
+  private boolean localSchemaFileHasMoreFields(File localFile, File downloadedFile) {
+    try {
+      localFileXml = new XMLConfiguration(localFile);
+      XMLConfiguration downloadedFileXml = new XMLConfiguration(downloadedFile);
+
+      List<String> localFieldNames = (ArrayList<String>) localFileXml.getProperty(FIELD_NAME_PATH);
+      List<String> localFieldTypes = (ArrayList<String>) localFileXml.getProperty(FIELD_TYPE_NAME_PATH);
+      localDynamicFields = (ArrayList<String>) localFileXml.getProperty(DYNAMIC_FIELD_NAME_PATH);
+
+      List<String> fieldNames = (ArrayList<String>) downloadedFileXml.getProperty(FIELD_NAME_PATH);
+      List<String> fieldTypes = (ArrayList<String>) downloadedFileXml.getProperty(FIELD_TYPE_NAME_PATH);
+      List<String> dynamicFields = (ArrayList<String>) downloadedFileXml.getProperty(DYNAMIC_FIELD_NAME_PATH);
+
+      boolean fieldNameHasDiff = hasMoreFields(localFieldNames, fieldNames, FIELD_NAME_PATH);
+      boolean fieldTypeHasDiff = hasMoreFields(localFieldTypes, fieldTypes, FIELD_TYPE_NAME_PATH);
+      boolean dynamicFieldNameHasDiff = hasMoreFields(localDynamicFields, dynamicFields, DYNAMIC_FIELD_NAME_PATH);
+
+      return fieldNameHasDiff || fieldTypeHasDiff || dynamicFieldNameHasDiff;
+    } catch (Exception e) {
+      throw new RuntimeException("Exception during schema xml parsing.", e);
+    }
+  }
+
+  private boolean hasMoreFields(List<String> localFields, List<String> fields, String tag) {
+    boolean result = false;
+    if (localFields != null) {
+      if (fields == null) {
+        result = true;
+      } else {
+        localFields.removeAll(fields);
+        if (!localFields.isEmpty()) {
+          result = true;
+        }
+      }
+    }
+    if (result) {
+      LOG.info("Found new fields or field types in local schema file.: {} ({})", localFields.toString(), tag);
+    }
+    return result;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/dfb34498/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UploadConfigurationHandler.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UploadConfigurationHandler.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UploadConfigurationHandler.java
index 27a6705..6964951 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UploadConfigurationHandler.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UploadConfigurationHandler.java
@@ -19,133 +19,67 @@
 package org.apache.ambari.logsearch.handler;
 
 import org.apache.ambari.logsearch.conf.SolrPropsConfig;
-import org.apache.commons.configuration.XMLConfiguration;
 import org.apache.commons.io.FileUtils;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.commons.io.IOUtils;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.zookeeper.CreateMode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
-import java.nio.file.FileSystems;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
+import java.io.InputStream;
 
-public class UploadConfigurationHandler implements SolrZkRequestHandler<Boolean> {
+public class UploadConfigurationHandler extends AbstractSolrConfigHandler {
 
   private static final Logger LOG = LoggerFactory.getLogger(UploadConfigurationHandler.class);
 
-  private static final String SCHEMA_FILE = "managed-schema";
   private static final String SOLR_CONFIG_FILE = "solrconfig.xml";
-  private static final String FIELD_NAME_PATH = "field[@name]";
-  private static final String FIELD_TYPE_NAME_PATH = "fieldType[@name]";
-  private static final String DYNAMIC_FIELD_NAME_PATH = "dynamicField[@name]";
-
-  private File configSetFolder;
 
   public UploadConfigurationHandler(File configSetFolder) {
-    this.configSetFolder = configSetFolder;
+    super(configSetFolder);
   }
 
   @Override
-  public Boolean handle(CloudSolrClient solrClient, SolrPropsConfig solrPropsConfig) throws Exception {
-    boolean reloadCollectionNeeded = false;
-    String separator = FileSystems.getDefault().getSeparator();
-    String downloadFolderLocation = String.format("%s%s%s%s%s", System.getProperty("java.io.tmpdir"), separator,
-      UUID.randomUUID().toString(), separator, solrPropsConfig.getConfigName());
-    solrClient.connect();
-    SolrZkClient zkClient = solrClient.getZkStateReader().getZkClient();
-    File tmpDir = new File(downloadFolderLocation);
-    try {
+  public boolean updateConfigIfNeeded(SolrPropsConfig solrPropsConfig, SolrZkClient zkClient, File file,
+                                      String separator, String downloadFolderLocation) throws IOException {
+    boolean result = false;
+    if (!FileUtils.contentEquals(file, new File(String.format("%s%s%s", downloadFolderLocation, separator, file.getName())))) {
+      LOG.info("Solr config file differs ('{}'), upload config set to zookeeper", file.getName());
       ZkConfigManager zkConfigManager = new ZkConfigManager(zkClient);
-      boolean configExists = zkConfigManager.configExists(solrPropsConfig.getConfigName());
-      if (configExists) {
-        LOG.info("Config set exists for '{}' collection. Refreshing it if needed...", solrPropsConfig.getCollection());
-        if (!tmpDir.mkdirs()) {
-          LOG.error("Cannot create directories for '{}'", tmpDir.getAbsolutePath());
-        }
-        zkConfigManager.downloadConfigDir(solrPropsConfig.getConfigName(), Paths.get(downloadFolderLocation));
-        File[] listOfFiles = configSetFolder.listFiles();
-        if (listOfFiles != null) {
-          for (File file : listOfFiles) {
-            if (file.getName().equals(SOLR_CONFIG_FILE) && !FileUtils.contentEquals(file, new File(String.format("%s%s%s", downloadFolderLocation, separator, file.getName())))) {
-              LOG.info("Solr config file differs ('{}'), upload config set to zookeeper", file.getName());
-              zkConfigManager.uploadConfigDir(configSetFolder.toPath(), solrPropsConfig.getConfigName());
-              reloadCollectionNeeded = true;
-              break;
-            }
-            if (file.getName().equals(SCHEMA_FILE) && localSchemaFileHasMoreFields(file, new File(String.format("%s%s%s", downloadFolderLocation, separator, file.getName())))) {
-              LOG.info("Solr schema file differs ('{}'), upload config set to zookeeper", file.getName());
-              zkConfigManager.uploadConfigDir(configSetFolder.toPath(), solrPropsConfig.getConfigName());
-              reloadCollectionNeeded = true;
-              break;
-            }
-          }
-        }
-      } else {
-        LOG.info("Config set does not exist for '{}' collection. Uploading it to zookeeper...", solrPropsConfig.getCollection());
-        File[] listOfFiles = configSetFolder.listFiles();
-        if (listOfFiles != null) {
-          zkConfigManager.uploadConfigDir(configSetFolder.toPath(), solrPropsConfig.getConfigName());
+      zkConfigManager.uploadConfigDir(getConfigSetFolder().toPath(), solrPropsConfig.getConfigName());
+      String filePath = String.format("%s%s%s", getConfigSetFolder(), separator, getConfigFileName());
+      String configsPath = String.format("/%s/%s/%s", "configs", solrPropsConfig.getConfigName(), getConfigFileName());
+      InputStream is = new FileInputStream(filePath);
+      try {
+        if (zkClient.exists(configsPath, true)) {
+          zkClient.setData(configsPath, IOUtils.toByteArray(is), true);
+        } else {
+          zkClient.create(configsPath, IOUtils.toByteArray(is), CreateMode.PERSISTENT, true);
         }
+      } catch (Exception e) {
+        throw new IllegalStateException(e);
       }
-    } catch (Exception e) {
-      throw new RuntimeException(String.format("Cannot upload configurations to zk. (collection: %s, config set folder: %s)",
-        solrPropsConfig.getCollection(), solrPropsConfig.getConfigSetFolder()), e);
-    } finally {
-      if (tmpDir.exists()) {
-        try {
-          FileUtils.deleteDirectory(tmpDir);
-        } catch (IOException e){
-          LOG.error("Cannot delete temp directory.", e);
-        }
+      finally {
+        IOUtils.closeQuietly(is);
       }
+      result = true;
     }
-    return reloadCollectionNeeded;
+    return result;
   }
 
-  private boolean localSchemaFileHasMoreFields(File localFile, File downloadedFile) {
-    try {
-      XMLConfiguration localFileXml = new XMLConfiguration(localFile);
-      XMLConfiguration downloadedFileXml = new XMLConfiguration(downloadedFile);
-
-      List<String> localFieldNames = (ArrayList<String>) localFileXml.getProperty(FIELD_NAME_PATH);
-      List<String> localFieldTypes = (ArrayList<String>) localFileXml.getProperty(FIELD_TYPE_NAME_PATH);
-      List<String> localDynamicFields = (ArrayList<String>) localFileXml.getProperty(DYNAMIC_FIELD_NAME_PATH);
-
-      List<String> fieldNames = (ArrayList<String>) downloadedFileXml.getProperty(FIELD_NAME_PATH);
-      List<String> fieldTypes = (ArrayList<String>) downloadedFileXml.getProperty(FIELD_TYPE_NAME_PATH);
-      List<String> dynamicFields = (ArrayList<String>) downloadedFileXml.getProperty(DYNAMIC_FIELD_NAME_PATH);
-
-      boolean fieldNameHasDiff = hasMoreFields(localFieldNames, fieldNames, FIELD_NAME_PATH);
-      boolean fieldTypeHasDiff = hasMoreFields(localFieldTypes, fieldTypes, FIELD_TYPE_NAME_PATH);
-      boolean dynamicFieldNameHasDiff = hasMoreFields(localDynamicFields, dynamicFields, DYNAMIC_FIELD_NAME_PATH);
-
-      return fieldNameHasDiff || fieldTypeHasDiff || dynamicFieldNameHasDiff;
-    } catch (Exception e) {
-      throw new RuntimeException("Exception during schema xml parsing.", e);
+  @Override
+  public void doIfConfigNotExist(SolrPropsConfig solrPropsConfig, ZkConfigManager zkConfigManager) throws IOException {
+    LOG.info("Config set does not exist for '{}' collection. Uploading it to zookeeper...", solrPropsConfig.getCollection());
+    File[] listOfFiles = getConfigSetFolder().listFiles();
+    if (listOfFiles != null) {
+      zkConfigManager.uploadConfigDir(getConfigSetFolder().toPath(), solrPropsConfig.getConfigName());
     }
   }
 
-  private boolean hasMoreFields(List<String> localFields, List<String> fields, String tag) {
-    boolean result = false;
-    if (localFields != null) {
-      if (fields == null) {
-        result = true;
-      } else {
-        localFields.removeAll(fields);
-        if (!localFields.isEmpty()) {
-          result = true;
-        }
-      }
-    }
-    if (result) {
-      LOG.info("Found new fields or field types in local schema file.: {} ({})", localFields.toString(), tag);
-    }
-    return result;
+  @Override
+  public String getConfigFileName() {
+    return SOLR_CONFIG_FILE;
   }
-
 }


[08/50] [abbrv] ambari git commit: AMBARI-20782. Add config changes from AMBARI-20537 to hive component upgrade path. (Swapan Shridhar via Prasanth Jayachandran).

Posted by ao...@apache.org.
AMBARI-20782. Add config changes from AMBARI-20537 to hive component upgrade path. (Swapan Shridhar via Prasanth Jayachandran).


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/611aaa51
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/611aaa51
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/611aaa51

Branch: refs/heads/branch-3.0-perf
Commit: 611aaa51dd0697f675f1dfbb7b555e9336c96ba9
Parents: 6326589
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Tue Apr 18 18:01:35 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  | 10 ++++++
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml | 37 +++++++++++++-------
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |  2 ++
 3 files changed, 36 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/611aaa51/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index b70b9df..ad936c2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -487,6 +487,16 @@
             <transfer operation="delete" delete-key="atlas.cluster.name"/>
           </definition>
 
+          <definition xsi:type="configure" id="llap_append_stack_size_java_opts" summary="Update JVM stack size for LLAP">
+            <type>hive-interactive-env</type>
+            <insert key="llap_java_opts" value="-Xss512k" insert-type="append" newline-before="false" newline-after="false" />
+          </definition>
+
+          <definition xsi:type="configure" id="llap_update_shuffle_parallel_copies" summary="Update tez shuffle parallel copies for LLAP">
+            <type>hive-interactive-site</type>
+            <set key="tez.runtime.shuffle.parallel.copies" value="8"/>
+          </definition>
+
         </changes>
 
       </component>

http://git-wip-us.apache.org/repos/asf/ambari/blob/611aaa51/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index a6bc4fc..d46353e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -540,6 +540,30 @@
         <task xsi:type="configure" id="hdp_2_6_0_0_tez_append_heap_dump_options_for_tez_am"/>
       </execute-stage>
 
+      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update headroom for LLAP">
+        <task xsi:type="configure" id="llap_update_headroom">
+          <summary>Updating headroom for LLAP</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update Hash Aggregation settings for LLAP">
+        <task xsi:type="configure" id="llap_update_hashaggregation">
+          <summary>Updating Hash Aggregation settings for LLAP</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update JVM stack size for LLAP">
+        <task xsi:type="configure" id="llap_append_stack_size_java_opts">
+          <summary>Updating JVM stack size for LLAP</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update tez shuffle parallel copies for LLAP">
+        <task xsi:type="configure" id="llap_update_shuffle_parallel_copies">
+          <summary>Updating tez shuffle parallel copies for LLAP</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Copying hive.tez.container.size to HIVE SERVER INTERACTIVE's hive-site">
         <task xsi:type="configure" id="hdp_2_6_0_0_copy_hive_tez_container_size_to_hiveInteractive"/>
       </execute-stage>
@@ -696,19 +720,6 @@
           <function>wait_for_safemode_off</function>
         </task>
       </execute-stage>
-
-      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update headroom for LLAP">
-        <task xsi:type="configure" id="llap_update_headroom">
-          <summary>Updating the Hive Log4J2 properties to include parameterizations</summary>
-        </task>
-      </execute-stage>
-
-      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update Hash Aggregation settings for LLAP">
-        <task xsi:type="configure" id="llap_update_hashaggregation">
-          <summary>Updating Hash Aggregation settings for LLAP</summary>
-        </task>
-      </execute-stage>
-
     </group>
 
     <group xsi:type="restart" name="KAFKA" title="Kafka">

http://git-wip-us.apache.org/repos/asf/ambari/blob/611aaa51/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 53d4579..94aab48 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -842,6 +842,8 @@
           <task xsi:type="configure" id="llap_cli_log4j2_parameterize"/>
           <task xsi:type="configure" id="llap_update_headroom"/>
           <task xsi:type="configure" id="llap_update_hashaggregation"/>
+          <task xsi:type="configure" id="llap_append_stack_size_java_opts"/>
+          <task xsi:type="configure" id="llap_update_shuffle_parallel_copies"/>
         </pre-upgrade>
         
         <pre-downgrade />


[49/50] [abbrv] ambari git commit: AMBARI-20326. HDP 3.0 TP - support for HBase with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro)

Posted by ao...@apache.org.
AMBARI-20326. HDP 3.0 TP - support for HBase with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/41006ebe
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/41006ebe
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/41006ebe

Branch: refs/heads/branch-3.0-perf
Commit: 41006ebebb81d6e2aefda4efb51401ecb810fa2e
Parents: e879440
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Thu Apr 20 15:29:46 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../common-services/HBASE/2.0.0.3.0/alerts.json |  127 +
 .../HBASE/2.0.0.3.0/configuration/hbase-env.xml |  279 ++
 .../2.0.0.3.0/configuration/hbase-log4j.xml     |  188 +
 .../2.0.0.3.0/configuration/hbase-policy.xml    |   53 +
 .../2.0.0.3.0/configuration/hbase-site.xml      |  774 +++
 .../configuration/ranger-hbase-audit.xml        |  132 +
 .../ranger-hbase-plugin-properties.xml          |  135 +
 .../ranger-hbase-policymgr-ssl.xml              |   66 +
 .../configuration/ranger-hbase-security.xml     |   74 +
 .../HBASE/2.0.0.3.0/kerberos.json               |  160 +
 .../HBASE/2.0.0.3.0/metainfo.xml                |  232 +
 .../HBASE/2.0.0.3.0/metrics.json                | 4733 ++++++++++++++++++
 .../2.0.0.3.0/package/files/draining_servers.rb |  164 +
 .../package/files/hbase-smoke-cleanup.sh        |   23 +
 .../2.0.0.3.0/package/files/hbaseSmokeVerify.sh |   34 +
 .../HBASE/2.0.0.3.0/package/scripts/__init__.py |   19 +
 .../2.0.0.3.0/package/scripts/functions.py      |   54 +
 .../HBASE/2.0.0.3.0/package/scripts/hbase.py    |  230 +
 .../2.0.0.3.0/package/scripts/hbase_client.py   |   81 +
 .../package/scripts/hbase_decommission.py       |   94 +
 .../2.0.0.3.0/package/scripts/hbase_master.py   |  163 +
 .../package/scripts/hbase_regionserver.py       |  174 +
 .../2.0.0.3.0/package/scripts/hbase_service.py  |   66 +
 .../2.0.0.3.0/package/scripts/hbase_upgrade.py  |   42 +
 .../HBASE/2.0.0.3.0/package/scripts/params.py   |   28 +
 .../2.0.0.3.0/package/scripts/params_linux.py   |  426 ++
 .../2.0.0.3.0/package/scripts/params_windows.py |   43 +
 .../package/scripts/phoenix_queryserver.py      |   92 +
 .../package/scripts/phoenix_service.py          |   56 +
 .../2.0.0.3.0/package/scripts/service_check.py  |   99 +
 .../package/scripts/setup_ranger_hbase.py       |  106 +
 .../2.0.0.3.0/package/scripts/status_params.py  |   68 +
 .../HBASE/2.0.0.3.0/package/scripts/upgrade.py  |  106 +
 .../package/templates/hbase-smoke.sh.j2         |   44 +
 .../2.0.0.3.0/package/templates/hbase.conf.j2   |   35 +
 .../package/templates/hbase_client_jaas.conf.j2 |   23 +
 .../templates/hbase_grant_permissions.j2        |   39 +
 .../package/templates/hbase_master_jaas.conf.j2 |   26 +
 .../templates/hbase_queryserver_jaas.conf.j2    |   26 +
 .../templates/hbase_regionserver_jaas.conf.j2   |   26 +
 .../templates/input.config-hbase.json.j2        |   79 +
 .../package/templates/regionservers.j2          |   20 +
 .../HBASE/2.0.0.3.0/quicklinks/quicklinks.json  |   97 +
 .../HBASE/2.0.0.3.0/role_command_order.json     |   10 +
 .../HBASE/2.0.0.3.0/themes/theme.json           |  407 ++
 .../HBASE/2.0.0.3.0/widgets.json                |  510 ++
 .../stacks/HDP/3.0/services/HBASE/metainfo.xml  |   26 +
 47 files changed, 10489 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/alerts.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/alerts.json
new file mode 100644
index 0000000..6fcb4dc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/alerts.json
@@ -0,0 +1,127 @@
+{
+  "HBASE": {
+    "service": [
+      {
+        "name": "hbase_regionserver_process_percent",
+        "label": "Percent RegionServers Available",
+        "description": "This service-level alert is triggered if the configured percentage of RegionServer processes cannot be determined to be up and listening on the network for the configured warning and critical thresholds. It aggregates the results of RegionServer process down checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "hbase_regionserver_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 10
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 30
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          }
+        }
+      }    
+    ],
+    "HBASE_MASTER": [
+      {
+        "name": "hbase_master_process",
+        "label": "HBase Master Process",
+        "description": "This alert is triggered if the HBase master processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hbase-site/hbase.master.port}}",
+          "default_port": 60000,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "hbase_master_cpu",
+        "label": "HBase Master CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the HBase Master exceeds certain warning and critical thresholds. It checks the HBase Master JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hbase-site/hbase.master.info.port}}",
+            "default_port": 60010,
+            "connection_timeout": 5.0,
+            "kerberos_principal": "{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}",
+            "kerberos_keytab": "{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}"
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      }
+    ],
+    "HBASE_REGIONSERVER": [
+      {
+        "name": "hbase_regionserver_process",
+        "label": "HBase RegionServer Process",
+        "description": "This host-level alert is triggered if the RegionServer processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hbase-site/hbase.regionserver.info.port}}",
+          "default_port": 60030,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-env.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-env.xml
new file mode 100644
index 0000000..da12706
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-env.xml
@@ -0,0 +1,279 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <!-- Inherited from HBase in HDP 2.0.6. -->
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/hbase</value>
+    <display-name>HBase Log Dir Prefix</display-name>
+    <description>Log Directories for HBase.</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <value>/var/run/hbase</value>
+    <display-name>HBase PID Dir</display-name>
+    <description>Pid Directory for HBase.</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_regionserver_heapsize</name>
+    <value>4096</value>
+    <description>Maximum amount of memory each HBase RegionServer can use.</description>
+    <display-name>HBase RegionServer Maximum Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>6554</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_max</name>
+    <value>4000</value>
+    <description>
+Sets the upper bound on HBase RegionServers' young generation size.
+This value is used in case the young generation size (-Xmn) calculated based on the max heapsize (hbase_regionserver_heapsize)
+and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
+    </description>
+    <display-name>RegionServers maximum value for -Xmn</display-name>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_ratio</name>
+    <value>0.2</value>
+    <display-name>RegionServers -Xmn in -Xmx ratio</display-name>
+    <description>Percentage of max heap size (-Xmx) which used for young generation heap (-Xmn).</description>
+    <value-attributes>
+      <type>float</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_master_heapsize</name>
+    <value>4096</value>
+    <description>Maximum amount of memory each HBase Master can use.</description>
+    <display-name>HBase Master Maximum Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>16384</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_user</name>
+    <display-name>HBase User</display-name>
+    <value>hbase</value>
+    <property-type>USER</property-type>
+    <description>HBase User Name.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_user_nofile_limit</name>
+    <value>32000</value>
+    <description>Max open files limit setting for HBASE user.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_user_nproc_limit</name>
+    <value>16000</value>
+    <description>Max number of processes limit setting for HBASE user.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_java_io_tmpdir</name>
+    <value>/tmp</value>
+    <description>Used in hbase-env.sh as HBASE_OPTS=-Djava.io.tmpdir=java_io_tmpdir</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_principal_name</name>
+    <description>HBase principal name</description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_user_keytab</name>
+    <description>HBase keytab path</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_regionserver_shutdown_timeout</name>
+    <value>30</value>
+    <display-name>HBase RegionServer shutdown timeout</display-name>
+    <description>
+After this number of seconds waiting for graceful stop of HBase Master it will be forced to exit with SIGKILL.
+The timeout is introduced because there is a known bug when from time to time HBase RegionServer hangs forever on stop if NN safemode is on.
+    </description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>hbase-env template</display-name>
+    <description>This is the jinja template for hbase-env.sh file</description>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if java_version &lt; 8 %}
+JDK_DEPENDED_OPTS="-XX:PermSize=128m -XX:MaxPermSize=128m"
+{% endif %}
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}} -Djava.io.tmpdir={{java_io_tmpdir}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}} $JDK_DEPENDED_OPTS"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}} $JDK_DEPENDED_OPTS"
+export PHOENIX_QUERYSERVER_OPTS="$PHOENIX_QUERYSERVER_OPTS -Djava.security.auth.login.config={{queryserver_jaas_config_file}}"
+{% else %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{java_io_tmpdir}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} $JDK_DEPENDED_OPTS"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} $JDK_DEPENDED_OPTS"
+{% endif %}
+
+# HBase off-heap MaxDirectMemorySize
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}"
+</value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.2 -->
+  <property>
+    <name>hbase_max_direct_memory_size</name>
+    <value/>
+    <display-name>HBase off-heap MaxDirectMemorySize</display-name>
+    <description>If not empty, adds '-XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m' to HBASE_REGIONSERVER_OPTS.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>phoenix_sql_enabled</name>
+    <value>false</value>
+    <description>Enable Phoenix SQL</description>
+    <display-name>Enable Phoenix</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-log4j.xml
new file mode 100644
index 0000000..10e2237
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-log4j.xml
@@ -0,0 +1,188 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+ <property>
+    <name>hbase_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>HBase Log: backup file size</display-name>
+    <value-attributes>
+        <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+      <name>hbase_log_maxbackupindex</name>
+      <value>20</value>
+      <description>The number of backup files</description>
+      <display-name>HBase Log: # of backup files</display-name>
+      <value-attributes>
+        <type>int</type>
+        <minimum>0</minimum>
+      </value-attributes>
+      <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+    <name>hbase_security_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of security backup file before the log is rotated</description>
+    <display-name>HBase Security Log: backup file size</display-name>
+    <value-attributes>
+        <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+      <name>hbase_security_log_maxbackupindex</name>
+      <value>20</value>
+      <description>The number of security backup files</description>
+      <display-name>HBase Security Log: # of backup files</display-name>
+      <value-attributes>
+        <type>int</type>
+        <minimum>0</minimum>
+      </value-attributes>
+      <on-ambari-upgrade add="false"/>
+ </property>
+  <property>
+    <name>content</name>
+    <display-name>hbase-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.security.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Rolling File Appender properties
+hbase.log.maxfilesize={{hbase_log_maxfilesize}}MB
+hbase.log.maxbackupindex={{hbase_log_maxbackupindex}}
+
+# Rolling File Appender
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+#
+# Security audit appender
+#
+hbase.security.log.file=SecurityAuth.audit
+hbase.security.log.maxfilesize={{hbase_security_log_maxfilesize}}MB
+hbase.security.log.maxbackupindex={{hbase_security_log_maxbackupindex}}
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
+log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.category.SecurityLogger=${hbase.security.logger}
+log4j.additivity.SecurityLogger=false
+#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
+
+#
+# Null Appender
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=INFO
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.hbase=INFO
+# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
+#log4j.logger.org.apache.hadoop.dfs=DEBUG
+# Set this class to log INFO only otherwise its OTT
+# Enable this to get detailed connection error/retry logging.
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+
+
+# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
+#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
+
+# Uncomment the below if you want to remove logging of client region caching'
+# and scan of .META. messages
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
+# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
+
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-policy.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-policy.xml
new file mode 100644
index 0000000..8500ee0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-policy.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HRegionInterface protocol implementations (ie. 
+    clients talking to HRegionServers)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterInterface protocol implementation (ie. 
+    clients talking to HMaster for admin operations).
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>security.masterregion.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterRegionInterface protocol implementations
+    (for HRegionServers communicating with HMaster)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-site.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-site.xml
new file mode 100644
index 0000000..0df616e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-site.xml
@@ -0,0 +1,774 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <!-- Inherited from HBase in HDP 2.0.6 -->
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://localhost:8020/apps/hbase/data</value>
+    <description>The directory shared by region servers and into
+    which HBase persists.  The URL should be 'fully-qualified'
+    to include the filesystem scheme.  For example, to specify the
+    HDFS directory '/hbase' where the HDFS instance's namenode is
+    running at namenode.example.org on port 9000, set this value to:
+    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
+    into /tmp.  Change this configuration else all data will be lost
+    on machine restart.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>The mode the cluster will be in. Possible values are
+      false for standalone mode and true for distributed mode.  If
+      false, startup will run all HBase and ZooKeeper daemons together
+      in the one JVM.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>16000</value>
+    <display-name>HBase Master Port</display-name>
+    <description>The port the HBase Master should bind to.</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/tmp/hbase-${user.name}</value>
+    <display-name>HBase tmp directory</display-name>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp' (The '/tmp' directory is often cleared on
+    machine restart).
+    </description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.local.dir</name>
+    <value>${hbase.tmp.dir}/local</value>
+    <description>Directory on the local filesystem to be used as a local storage
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value>0.0.0.0</value>
+    <description>The bind address for the HBase Master web UI
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>16010</value>
+    <description>The port for the HBase Master web UI.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>16030</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>30</value>
+    <description>
+      Count of RPC Listener instances spun up on RegionServers.
+      Same property is used by the Master for count of master handlers.
+    </description>
+    <display-name>Number of Handlers per RegionServer</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>5</minimum>
+      <maximum>240</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>604800000</value>
+    <description>Time between major compactions, expressed in milliseconds. Set to 0 to disable
+      time-based automatic major compactions. User-requested and size-based major compactions will
+      still run. This value is multiplied by hbase.hregion.majorcompaction.jitter to cause
+      compaction to start at a somewhat-random time during a given window of time. The default value
+      is 7 days, expressed in milliseconds. If major compactions are causing disruption in your
+      environment, you can configure them to run at off-peak times for your deployment, or disable
+      time-based major compactions by setting this parameter to 0, and run major compactions in a
+      cron job or by another external mechanism.</description>
+    <display-name>Major Compaction Interval</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2592000000</maximum>
+      <unit>milliseconds</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>4</value>
+    <description>
+      Block updates if memstore has hbase.hregion.memstore.block.multiplier
+      times hbase.hregion.memstore.flush.size bytes.  Useful preventing
+      runaway memstore during spikes in update traffic.  Without an
+      upper-bound, memstore fills such that when it flushes the
+      resultant flush files take a long time to compact or split, or
+      worse, we OOME.
+    </description>
+    <display-name>HBase Region Block Multiplier</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>2</value>
+        </entry>
+        <entry>
+          <value>4</value>
+        </entry>
+        <entry>
+          <value>8</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value>134217728</value>
+    <description>
+      The size of an individual memstore. Each column familiy within each region is allocated its own memstore.
+    </description>
+    <display-name>Memstore Flush Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>33554432</minimum>
+      <maximum>268435456</maximum>
+      <increment-step>1048576</increment-step>
+      <unit>B</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.mslab.enabled</name>
+    <value>true</value>
+    <description>
+      Enables the MemStore-Local Allocation Buffer,
+      a feature which works to prevent heap fragmentation under
+      heavy write loads. This can reduce the frequency of stop-the-world
+      GC pauses on large heaps.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>10737418240</value>
+    <description>
+      Maximum HFile size. If the sum of the sizes of a region's HFiles has grown to exceed this
+      value, the region is split in two.
+    </description>
+    <display-name>Maximum Region File Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1073741824</minimum>
+      <maximum>107374182400</maximum>
+      <unit>B</unit>
+      <increment-step>1073741824</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>100</value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+    <display-name>Number of Fetched Rows when Scanning from Disk</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>100</minimum>
+      <maximum>10000</maximum>
+      <increment-step>100</increment-step>
+      <unit>rows</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>90000</value>
+    <description>ZooKeeper session timeout.
+      ZooKeeper session timeout in milliseconds. It is used in two different ways.
+      First, this value is used in the ZK client that HBase uses to connect to the ensemble.
+      It is also used by HBase when it starts a ZK server and it is passed as the 'maxSessionTimeout'. See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions.
+      For example, if a HBase region server connects to a ZK ensemble that's also managed by HBase, then the
+      session timeout will be the one specified by this configuration. But, a region server that connects
+      to an ensemble managed with a different configuration will be subjected that ensemble's maxSessionTimeout. So,
+      even though HBase might propose using 90 seconds, the ensemble can have a max timeout lower than this and
+      it will take precedence.
+    </description>
+    <display-name>Zookeeper Session Timeout</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>10000</minimum>
+      <maximum>180000</maximum>
+      <unit>milliseconds</unit>
+      <increment-step>10000</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value>1048576</value>
+    <description>
+      Specifies the combined maximum allowed size of a KeyValue
+      instance. This is to set an upper boundary for a single entry saved in a
+      storage file. Since they cannot be split it helps avoiding that a region
+      cannot be split any further because the data is too large. It seems wise
+      to set this to a fraction of the maximum region size. Setting it to zero
+      or less disables the check.
+    </description>
+    <display-name>Maximum Record Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1048576</minimum>
+      <maximum>31457280</maximum>
+      <unit>B</unit>
+      <increment-step>262144</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hstore.compactionThreshold</name>
+    <value>3</value>
+    <description>
+      The maximum number of StoreFiles which will be selected for a single minor
+      compaction, regardless of the number of eligible StoreFiles. Effectively, the value of
+      hbase.hstore.compaction.max controls the length of time it takes a single compaction to
+      complete. Setting it larger means that more StoreFiles are included in a compaction. For most
+      cases, the default value is appropriate.
+    </description>
+    <display-name>Maximum Store Files before Minor Compaction</display-name>
+    <value-attributes>
+      <type>int</type>
+      <entries>
+        <entry>
+          <value>2</value>
+        </entry>
+        <entry>
+          <value>3</value>
+        </entry>
+        <entry>
+          <value>4</value>
+        </entry>
+      </entries>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <display-name>hstore blocking storefiles</display-name>
+    <value>10</value>
+    <description>
+    If more than this number of StoreFiles in any one Store
+    (one StoreFile is written per flush of MemStore) then updates are
+    blocked for this HRegion until a compaction is completed, or
+    until hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.40</value>
+    <description>Percentage of RegionServer memory to allocate to read buffers.</description>
+    <display-name>% of RegionServer Allocated to Read Buffers</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0</minimum>
+      <maximum>0.8</maximum>
+      <increment-step>0.01</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- Additional configuration specific to HBase security -->
+  <property>
+    <name>hbase.superuser</name>
+    <value>hbase</value>
+    <description>List of users or groups (comma-separated), who are allowed
+    full privileges, regardless of stored ACLs, across the cluster.
+    Only used when HBase security is enabled.
+    </description>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>hbase_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.security.authentication</name>
+    <value>simple</value>
+    <description>
+      Select Simple or Kerberos authentication. Note: Kerberos must be set up before the Kerberos option will take effect.
+    </description>
+    <display-name>Enable Authentication</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <label>Simple</label>
+          <value>simple</value>
+        </entry>
+        <entry>
+          <label>Kerberos</label>
+          <value>kerberos</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description> Set Authorization Method.</description>
+    <display-name>Enable Authorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Native</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value>org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint</value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+      default on all tables. For any override coprocessor method, these classes
+      will be called in order. After implementing your own Coprocessor, just put
+      it in HBase's classpath and add the fully qualified class name here.
+      A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authentication</name>
+      </property>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value/>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+    <value-attributes>
+      <type>multiLine</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+  <property>
+    <name>hbase.zookeeper.useMulti</name>
+    <value>true</value>
+    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).&#xB7;
+    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
+    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase-unsecure</value>
+    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+      files that are configured with a relative path will go under this node.
+      By default, all of HBase's ZooKeeper file path are configured with a
+      relative path, so they will all go under this directory unless changed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.client.retries.number</name>
+    <value>35</value>
+    <description>Maximum retries.  Used as maximum for all retryable
+    operations such as the getting of a cell's value, starting a row update,
+    etc.  Retry interval is a rough function based on hbase.client.pause.  At
+    first we retry at this interval but then with backoff, we pretty quickly reach
+    retrying every ten seconds.  See HConstants#RETRY_BACKOFF for how the backup
+    ramps up.  Change this setting and hbase.client.pause to suit your workload.</description>
+    <display-name>Maximum Client Retries</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>5</minimum>
+      <maximum>50</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.rpc.timeout</name>
+    <value>90000</value>
+    <description>
+      This is for the RPC layer to define how long HBase client applications
+      take for a remote call to time out. It uses pings to check connections
+      but will eventually throw a TimeoutException.
+    </description>
+    <display-name>HBase RPC Timeout</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>10000</minimum>
+      <maximum>180000</maximum>
+      <unit>milliseconds</unit>
+      <increment-step>10000</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+    <description>Disables version verification.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>phoenix.query.timeoutMs</name>
+    <value>60000</value>
+    <description>Number of milliseconds after which a Phoenix query will timeout on the client.</description>
+    <display-name>Phoenix Query Timeout</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>30000</minimum>
+      <maximum>180000</maximum>
+      <unit>milliseconds</unit>
+      <increment-step>10000</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>Path to domain socket.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.rpc.protection</name>
+    <value>authentication</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.2 -->
+  <property>
+    <name>hbase.bulkload.staging.dir</name>
+    <value>/apps/hbase/staging</value>
+    <description>A staging directory in default file system (HDFS)
+      for bulk loading.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction.jitter</name>
+    <value>0.50</value>
+    <description>A multiplier applied to hbase.hregion.majorcompaction to cause compaction to occur
+      a given amount of time either side of hbase.hregion.majorcompaction. The smaller the number,
+      the closer the compactions will happen to the hbase.hregion.majorcompaction
+      interval.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.bucketcache.ioengine</name>
+    <value/>
+    <description>Where to store the contents of the bucketcache. One of: onheap,
+      offheap, or file. If a file, set it to file:PATH_TO_FILE.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.bucketcache.size</name>
+    <value/>
+    <description>The size of the buckets for the bucketcache if you only use a single size.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.bucketcache.percentage.in.combinedcache</name>
+    <value/>
+    <description>Value to be set between 0.0 and 1.0</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.wal.codec</name>
+    <display-name>RegionServer WAL Codec</display-name>
+    <value>org.apache.hadoop.hbase.regionserver.wal.WALCellCodec</value>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>phoenix_sql_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.region.server.rpc.scheduler.factory.class</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>phoenix_sql_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.rpc.controllerfactory.class</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>phoenix_sql_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>phoenix.functions.allowUserDefinedFunctions</name>
+    <value> </value>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>phoenix_sql_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.coprocessor.regionserver.classes</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hstore.compaction.max</name>
+    <value>10</value>
+    <description>The maximum number of StoreFiles which will be selected for a single minor
+      compaction, regardless of the number of eligible StoreFiles. Effectively, the value of
+      hbase.hstore.compaction.max controls the length of time it takes a single compaction to
+      complete. Setting it larger means that more StoreFiles are included in a compaction. For most
+      cases, the default value is appropriate.
+    </description>
+    <display-name>Maximum Files for Compaction</display-name>
+    <value-attributes>
+      <type>int</type>
+      <entries>
+        <entry>
+          <value>8</value>
+        </entry>
+        <entry>
+          <value>9</value>
+        </entry>
+        <entry>
+          <value>10</value>
+        </entry>
+        <entry>
+          <value>11</value>
+        </entry>
+        <entry>
+          <value>12</value>
+        </entry>
+        <entry>
+          <value>13</value>
+        </entry>
+        <entry>
+          <value>14</value>
+        </entry>
+        <entry>
+          <value>15</value>
+        </entry>
+      </entries>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.size</name>
+    <value>0.4</value>
+    <description>Percentage of RegionServer memory to allocate to write buffers.
+      Each column family within each region is allocated a smaller pool (the memstore) within this shared write pool.
+      If this buffer is full, updates are blocked and data is flushed from memstores until a global low watermark
+      (hbase.regionserver.global.memstore.size.lower.limit) is reached.
+    </description>
+    <display-name>% of RegionServer Allocated to Write Buffers</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0</minimum>
+      <maximum>0.8</maximum>
+      <increment-step>0.01</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.3 -->
+  <property>
+    <name>hbase.regionserver.port</name>
+    <value>16020</value>
+    <description>The port the HBase RegionServer binds to.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.5 -->
+  <property>
+    <name>hbase.master.ui.readonly</name>
+    <value>false</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zookeeper.recovery.retry</name>
+    <value>6</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.6 -->
+  <property>
+    <name>hbase.regionserver.executor.openregion.threads</name>
+    <value>20</value>
+    <description>The number of threads region server uses to open regions
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.master.namespace.init.timeout</name>
+    <value>2400000</value>
+    <description>The number of milliseconds master waits for hbase:namespace table to be initialized
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.master.wait.on.regionservers.timeout</name>
+    <value>30000</value>
+    <description>The number of milliseconds master waits for region servers to report in
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-audit.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-audit.xml
new file mode 100644
index 0000000..9d4e7d0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-audit.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/hbase/audit/hdfs/spool</value>
+    <description>/var/log/hbase/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/hbase/audit/solr/spool</value>
+    <description>/var/log/hbase/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>true</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.6 -->
+  <property>
+    <name>ranger.plugin.hbase.ambari.cluster.name</name>
+    <value>{{cluster_name}}</value>
+    <description>Capture cluster name from where Ranger hbase plugin is enabled.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-plugin-properties.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-plugin-properties.xml
new file mode 100644
index 0000000..7d7f1fa
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-plugin-properties.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <display-name>Policy user for HBASE</display-name>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger_user</name>
+      </property>
+    </depends-on>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-hbase-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for HBASE</display-name>
+    <description>Enable ranger hbase plugin ?</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>hbase</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <depends-on>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+      <property>
+        <type>hbase-env</type>
+        <name>hbase_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>hbase</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.6 -->
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-policymgr-ssl.xml
new file mode 100644
index 0000000..c761b26
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-policymgr-ssl.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/hbase-client/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/hbase-client/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-security.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-security.xml
new file mode 100644
index 0000000..4a0909a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-security.xml
@@ -0,0 +1,74 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.hbase.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing HBase policies</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.rest.ssl.config.file</name>
+    <value>/etc/hbase/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.hbase.update.xapolicies.on.grant.revoke</name>
+    <value>true</value>
+    <display-name>Should HBase GRANT/REVOKE update XA policies</display-name>
+    <description>Should HBase plugin update Ranger policies for updates to permissions done using GRANT/REVOKE?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/kerberos.json
new file mode 100644
index 0000000..011921b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/kerberos.json
@@ -0,0 +1,160 @@
+{
+  "services": [
+    {
+      "name": "HBASE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "hbase",
+          "principal": {
+            "value": "${hbase-env/hbase_user}${principal_suffix}@${realm}",
+            "type" : "user",
+            "configuration": "hbase-env/hbase_principal_name",
+            "local_username": "${hbase-env/hbase_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/hbase.headless.keytab",
+            "owner": {
+              "name": "${hbase-env/hbase_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "hbase-env/hbase_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hbase-site": {
+            "hbase.security.authentication": "kerberos",
+            "hbase.security.authorization": "true",
+            "zookeeper.znode.parent": "/hbase-secure",
+            "hbase.coprocessor.master.classes": "{{hbase_coprocessor_master_classes}}",
+            "hbase.coprocessor.region.classes": "{{hbase_coprocessor_region_classes}}",
+            "hbase.coprocessor.regionserver.classes": "{{hbase_coprocessor_regionserver_classes}}",
+            "hbase.bulkload.staging.dir": "/apps/hbase/staging",
+            "hbase.master.ui.readonly": "true"
+          }
+        },
+        {
+          "ranger-hbase-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HBASE_MASTER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "hbase_master_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.master.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.master.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hbase-site/hbase.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hbase-site/hbase.security.authentication.spnego.kerberos.keytab"
+              }
+            },
+            {
+              "name" : "ranger_hbase_audit",
+              "reference": "/HBASE/HBASE_MASTER/hbase_master_hbase",
+              "principal": {
+                "configuration": "ranger-hbase-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-hbase-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HBASE_REGIONSERVER",
+          "identities": [
+            {
+              "name": "hbase_regionserver_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.regionserver.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.regionserver.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hbase-site/hbase.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hbase-site/hbase.security.authentication.spnego.kerberos.keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "PHOENIX_QUERY_SERVER",
+          "identities": [
+            {
+              "name": "phoenix_spnego",
+              "reference": "/spnego",
+              "principal": {
+                "configuration": "hbase-site/phoenix.queryserver.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hbase-site/phoenix.queryserver.keytab.file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}


[07/50] [abbrv] ambari git commit: AMBARI-20777 : AMS changes to use instanceId for cluster based segregation of data. (avijayan)

Posted by ao...@apache.org.
AMBARI-20777 : AMS changes to use instanceId for cluster based segregation of data. (avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6326589b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6326589b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6326589b

Branch: refs/heads/branch-3.0-perf
Commit: 6326589b5af88fc72eddbfc6c33e1898e986b938
Parents: 893f97e
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Tue Apr 18 13:16:09 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../timeline/HadoopTimelineMetricsSink.java     |   4 +
 .../timeline/HBaseTimelineMetricStore.java      |   5 +
 .../metrics/timeline/PhoenixHBaseAccessor.java  | 106 +++++++++++++++++++
 .../metrics/timeline/TimelineMetricStore.java   |   8 ++
 .../TimelineMetricClusterAggregatorSecond.java  |   1 +
 .../TimelineMetricMetadataManager.java          |  38 +++++++
 .../discovery/TimelineMetricMetadataSync.java   |  57 ++++++++++
 .../timeline/query/PhoenixTransactSQL.java      |  12 +++
 .../webapp/TimelineWebServices.java             |  16 +++
 .../timeline/TestTimelineMetricStore.java       |   5 +
 .../timeline/discovery/TestMetadataManager.java |   8 ++
 .../timeline/discovery/TestMetadataSync.java    |  12 +++
 12 files changed, 272 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6326589b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
index a112ef2..8e0de03 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
@@ -51,6 +51,7 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
   private Map<String, Set<String>> useTagsMap = new HashMap<String, Set<String>>();
   private TimelineMetricsCache metricsCache;
   private String hostName = "UNKNOWN.example.com";
+  private String instanceId = null;
   private String serviceName = "";
   private Collection<String> collectorHosts;
   private String collectorUri;
@@ -94,6 +95,8 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
     }
 
     serviceName = getServiceName(conf);
+    String inst = conf.getString("instanceId", "");
+    instanceId = StringUtils.isEmpty(inst) ? null : inst;
 
     LOG.info("Identified hostname = " + hostName + ", serviceName = " + serviceName);
     // Initialize the collector write strategy
@@ -318,6 +321,7 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
         timelineMetric.setMetricName(name);
         timelineMetric.setHostName(hostName);
         timelineMetric.setAppId(serviceName);
+        timelineMetric.setInstanceId(instanceId);
         timelineMetric.setStartTime(startTime);
         timelineMetric.setType(metric.type() != null ? metric.type().name() : null);
         timelineMetric.getMetricValues().put(startTime, value.doubleValue());

http://git-wip-us.apache.org/repos/asf/ambari/blob/6326589b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index 17c58f0..fa095a0 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -388,6 +388,11 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
   }
 
   @Override
+  public Map<String, Set<String>> getInstanceHostsMetadata() throws SQLException, IOException {
+    return metricMetadataManager.getHostedInstanceCache();
+  }
+
+  @Override
   public List<String> getLiveInstances() {
 
     List<String> instances = null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/6326589b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
index 8b0d84b..65bbc4c 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
@@ -115,6 +115,7 @@ import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.ti
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CONTAINER_METRICS_TABLE_NAME;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_CONTAINER_METRICS_TABLE_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_HOSTED_APPS_METADATA_TABLE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_INSTANCE_HOST_TABLE_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_AGGREGATE_TABLE_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_GROUPED_TABLE_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_TABLE_SQL;
@@ -124,6 +125,7 @@ import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.ti
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.DEFAULT_ENCODING;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.DEFAULT_TABLE_COMPRESSION;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_HOSTED_APPS_METADATA_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_INSTANCE_HOST_METADATA_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_METRIC_METADATA_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_DAILY_TABLE_NAME;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_HOURLY_TABLE_NAME;
@@ -138,6 +140,7 @@ import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.ti
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_CLUSTER_AGGREGATE_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_CLUSTER_AGGREGATE_TIME_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_HOSTED_APPS_METADATA_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_INSTANCE_HOST_METADATA_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_METADATA_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_METRICS_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_CONTAINER_METRICS_SQL;
@@ -430,6 +433,11 @@ public class PhoenixHBaseAccessor {
         encoding, compression);
       stmt.executeUpdate(hostedAppSql);
 
+      //Host Instances table
+      String hostedInstancesSql = String.format(CREATE_INSTANCE_HOST_TABLE_SQL,
+        encoding, compression);
+      stmt.executeUpdate(hostedInstancesSql);
+
       // Container Metrics
       stmt.executeUpdate( String.format(CREATE_CONTAINER_METRICS_TABLE_SQL,
         encoding, tableTTL.get(CONTAINER_METRICS_TABLE_NAME), compression));
@@ -778,6 +786,8 @@ public class PhoenixHBaseAccessor {
 
         metadataManager.putIfModifiedHostedAppsMetadata(
                 tm.getHostName(), tm.getAppId());
+
+        metadataManager.putIfModifiedHostedInstanceMetadata(tm.getInstanceId(), tm.getHostName());
       }
       if (!acceptMetric) {
         iterator.remove();
@@ -1552,6 +1562,55 @@ public class PhoenixHBaseAccessor {
     }
   }
 
+  public void saveInstanceHostsMetadata(Map<String, Set<String>> instanceHostsMap) throws SQLException {
+    Connection conn = getConnection();
+    PreparedStatement stmt = null;
+    try {
+      stmt = conn.prepareStatement(UPSERT_INSTANCE_HOST_METADATA_SQL);
+      int rowCount = 0;
+
+      for (Map.Entry<String, Set<String>> hostInstancesEntry : instanceHostsMap.entrySet()) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("Host Instances Entry: " + hostInstancesEntry);
+        }
+
+        String instanceId = hostInstancesEntry.getKey();
+
+        for(String hostname : hostInstancesEntry.getValue()) {
+          stmt.clearParameters();
+          stmt.setString(1, instanceId);
+          stmt.setString(2, hostname);
+          try {
+            stmt.executeUpdate();
+            rowCount++;
+          } catch (SQLException sql) {
+            LOG.error("Error saving host instances metadata.", sql);
+          }
+        }
+
+      }
+
+      conn.commit();
+      LOG.info("Saved " + rowCount + " host instances metadata records.");
+
+    } finally {
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException sql) {
+          // Ignore
+        }
+      }
+    }
+  }
+
   /**
    * Save metdata on updates.
    * @param metricMetadata @Collection<@TimelineMetricMetadata>
@@ -1658,6 +1717,53 @@ public class PhoenixHBaseAccessor {
     return hostedAppMap;
   }
 
+  public Map<String, Set<String>> getInstanceHostsMetdata() throws SQLException {
+    Map<String, Set<String>> instanceHostsMap = new HashMap<>();
+    Connection conn = getConnection();
+    PreparedStatement stmt = null;
+    ResultSet rs = null;
+
+    try {
+      stmt = conn.prepareStatement(GET_INSTANCE_HOST_METADATA_SQL);
+      rs = stmt.executeQuery();
+
+      while (rs.next()) {
+        String instanceId = rs.getString("INSTANCE_ID");
+        String hostname = rs.getString("HOSTNAME");
+
+        if (!instanceHostsMap.containsKey(instanceId)) {
+          instanceHostsMap.put(instanceId, new HashSet<String>());
+        }
+        instanceHostsMap.get(instanceId).add(hostname);
+      }
+
+    } finally {
+      if (rs != null) {
+        try {
+          rs.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException sql) {
+          // Ignore
+        }
+      }
+    }
+
+    return instanceHostsMap;
+  }
+
   // No filter criteria support for now.
   public Map<TimelineMetricMetadataKey, TimelineMetricMetadata> getTimelineMetricMetadata() throws SQLException {
     Map<TimelineMetricMetadataKey, TimelineMetricMetadata> metadataMap = new HashMap<>();

http://git-wip-us.apache.org/repos/asf/ambari/blob/6326589b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
index d049e33..121a8ae 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
@@ -89,6 +89,14 @@ public interface TimelineMetricStore {
   Map<String, Set<String>> getHostAppsMetadata() throws SQLException, IOException;
 
   /**
+   * Returns all instances and the set of hosts each instance is present on
+   * @return { instanceId : [ hosts ] }
+   * @throws SQLException
+   * @throws IOException
+   */
+  Map<String, Set<String>> getInstanceHostsMetadata() throws SQLException, IOException;
+
+  /**
    * Return a list of known live collector nodes
    * @return [ hostname ]
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/6326589b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
index 5310906..a5a3499 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
@@ -129,6 +129,7 @@ public class TimelineMetricClusterAggregatorSecond extends AbstractTimelineAggre
     condition.addOrderByColumn("METRIC_NAME");
     condition.addOrderByColumn("HOSTNAME");
     condition.addOrderByColumn("APP_ID");
+    condition.addOrderByColumn("INSTANCE_ID");
     condition.addOrderByColumn("SERVER_TIME");
     return condition;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6326589b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java
index 7eb2457..f904ebe 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java
@@ -54,8 +54,10 @@ public class TimelineMetricMetadataManager {
   private final Map<TimelineMetricMetadataKey, TimelineMetricMetadata> METADATA_CACHE = new ConcurrentHashMap<>();
   // Map to lookup apps on a host
   private final Map<String, Set<String>> HOSTED_APPS_MAP = new ConcurrentHashMap<>();
+  private final Map<String, Set<String>> INSTANCE_HOST_MAP = new ConcurrentHashMap<>();
   // Sync only when needed
   AtomicBoolean SYNC_HOSTED_APPS_METADATA = new AtomicBoolean(false);
+  AtomicBoolean SYNC_HOSTED_INSTANCES_METADATA = new AtomicBoolean(false);
 
   // Single thread to sync back new writes to the store
   private final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
@@ -122,14 +124,25 @@ public class TimelineMetricMetadataManager {
     return HOSTED_APPS_MAP;
   }
 
+  public Map<String, Set<String>> getHostedInstanceCache() {
+    return INSTANCE_HOST_MAP;
+  }
+
   public boolean syncHostedAppsMetadata() {
     return SYNC_HOSTED_APPS_METADATA.get();
   }
 
+  public boolean syncHostedInstanceMetadata() {
+    return SYNC_HOSTED_INSTANCES_METADATA.get();
+  }
+
   public void markSuccessOnSyncHostedAppsMetadata() {
     SYNC_HOSTED_APPS_METADATA.set(false);
   }
 
+  public void markSuccessOnSyncHostedInstanceMetadata() {
+    SYNC_HOSTED_INSTANCES_METADATA.set(false);
+  }
   /**
    * Test metric name for valid patterns and return true/false
    */
@@ -189,6 +202,23 @@ public class TimelineMetricMetadataManager {
     }
   }
 
+  public void putIfModifiedHostedInstanceMetadata(String instanceId, String hostname) {
+    if (StringUtils.isEmpty(instanceId)) {
+      return;
+    }
+
+    Set<String> hosts = INSTANCE_HOST_MAP.get(instanceId);
+    if (hosts == null) {
+      hosts = new HashSet<>();
+      INSTANCE_HOST_MAP.put(instanceId, hosts);
+    }
+
+    if (!hosts.contains(hostname)) {
+      hosts.add(hostname);
+      SYNC_HOSTED_INSTANCES_METADATA.set(true);
+    }
+  }
+
   public void persistMetadata(Collection<TimelineMetricMetadata> metadata) throws SQLException {
     hBaseAccessor.saveMetricMetadata(metadata);
   }
@@ -197,6 +227,10 @@ public class TimelineMetricMetadataManager {
     hBaseAccessor.saveHostAppsMetadata(hostedApps);
   }
 
+  public void persistHostedInstanceMetadata(Map<String, Set<String>> hostedInstancesMetadata) throws SQLException {
+    hBaseAccessor.saveInstanceHostsMetadata(hostedInstancesMetadata);
+  }
+
   public TimelineMetricMetadata getTimelineMetricMetadata(TimelineMetric timelineMetric, boolean isWhitelisted) {
     return new TimelineMetricMetadata(
       timelineMetric.getMetricName(),
@@ -233,6 +267,10 @@ public class TimelineMetricMetadataManager {
     return hBaseAccessor.getHostedAppsMetadata();
   }
 
+  Map<String, Set<String>> getHostedInstancesFromStore() throws SQLException {
+    return hBaseAccessor.getInstanceHostsMetdata();
+  }
+
   private boolean supportAggregates(TimelineMetric metric) {
     return MapUtils.isEmpty(metric.getMetadata()) ||
       !(String.valueOf(true).equals(metric.getMetadata().get("skipAggregation")));

http://git-wip-us.apache.org/repos/asf/ambari/blob/6326589b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java
index 25b525a..6d519f6 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java
@@ -45,11 +45,15 @@ public class TimelineMetricMetadataSync implements Runnable {
     persistMetricMetadata();
     LOG.debug("Persisting hosted apps metadata...");
     persistHostAppsMetadata();
+    LOG.debug("Persisting hosted instance metadata...");
+    persistHostInstancesMetadata();
     if (cacheManager.isDistributedModeEnabled()) {
       LOG.debug("Refreshing metric metadata...");
       refreshMetricMetadata();
       LOG.debug("Refreshing hosted apps metadata...");
       refreshHostAppsMetadata();
+      LOG.debug("Refreshing hosted instances metadata...");
+      refreshHostedInstancesMetadata();
     }
   }
 
@@ -147,6 +151,41 @@ public class TimelineMetricMetadataSync implements Runnable {
   }
 
   /**
+   * Sync apps instances data if needed
+   */
+  private void persistHostInstancesMetadata() {
+    if (cacheManager.syncHostedInstanceMetadata()) {
+      Map<String, Set<String>> persistedData = null;
+      try {
+        persistedData = cacheManager.getHostedInstancesFromStore();
+      } catch (SQLException e) {
+        LOG.warn("Failed on fetching hosted instances data from store.", e);
+        return; // Something wrong with store
+      }
+
+      Map<String, Set<String>> cachedData = cacheManager.getHostedInstanceCache();
+      Map<String, Set<String>> dataToSync = new HashMap<>();
+      if (cachedData != null && !cachedData.isEmpty()) {
+        for (Map.Entry<String, Set<String>> cacheEntry : cachedData.entrySet()) {
+          // No persistence / stale data in store
+          if (persistedData == null || persistedData.isEmpty() ||
+            !persistedData.containsKey(cacheEntry.getKey()) ||
+            !persistedData.get(cacheEntry.getKey()).containsAll(cacheEntry.getValue())) {
+            dataToSync.put(cacheEntry.getKey(), cacheEntry.getValue());
+          }
+        }
+        try {
+          cacheManager.persistHostedInstanceMetadata(dataToSync);
+          cacheManager.markSuccessOnSyncHostedInstanceMetadata();
+
+        } catch (SQLException e) {
+          LOG.warn("Error persisting hosted apps metadata.", e);
+        }
+      }
+
+    }
+  }
+  /**
    * Read all hosted apps metadata and update cached values - HA
    */
   private void refreshHostAppsMetadata() {
@@ -166,4 +205,22 @@ public class TimelineMetricMetadataSync implements Runnable {
       }
     }
   }
+
+  private void refreshHostedInstancesMetadata() {
+    Map<String, Set<String>> hostedInstancesFromStore = null;
+    try {
+      hostedInstancesFromStore = cacheManager.getHostedInstancesFromStore();
+    } catch (SQLException e) {
+      LOG.warn("Error refreshing metadata from store.", e);
+    }
+    if (hostedInstancesFromStore != null) {
+      Map<String, Set<String>> cachedData = cacheManager.getHostedInstanceCache();
+
+      for (Map.Entry<String, Set<String>> storeEntry : hostedInstancesFromStore.entrySet()) {
+        if (!cachedData.containsKey(storeEntry.getKey())) {
+          cachedData.put(storeEntry.getKey(), storeEntry.getValue());
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6326589b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java
index 0c8e5a7..d39230d 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java
@@ -148,6 +148,12 @@ public class PhoenixTransactSQL {
       "CONSTRAINT pk PRIMARY KEY (HOSTNAME))" +
       "DATA_BLOCK_ENCODING='%s', COMPRESSION='%s'";
 
+  public static final String CREATE_INSTANCE_HOST_TABLE_SQL =
+    "CREATE TABLE IF NOT EXISTS INSTANCE_HOST_METADATA " +
+      "(INSTANCE_ID VARCHAR, HOSTNAME VARCHAR, " +
+      "CONSTRAINT pk PRIMARY KEY (INSTANCE_ID, HOSTNAME))" +
+      "DATA_BLOCK_ENCODING='%s', COMPRESSION='%s'";
+
   public static final String ALTER_METRICS_METADATA_TABLE =
     "ALTER TABLE METRICS_METADATA ADD IF NOT EXISTS IS_WHITELISTED BOOLEAN";
 
@@ -230,6 +236,9 @@ public class PhoenixTransactSQL {
   public static final String UPSERT_HOSTED_APPS_METADATA_SQL =
     "UPSERT INTO HOSTED_APPS_METADATA (HOSTNAME, APP_IDS) VALUES (?, ?)";
 
+  public static final String UPSERT_INSTANCE_HOST_METADATA_SQL =
+    "UPSERT INTO INSTANCE_HOST_METADATA (INSTANCE_ID, HOSTNAME) VALUES (?, ?)";
+
   /**
    * Retrieve a set of rows from metrics records table.
    */
@@ -309,6 +318,9 @@ public class PhoenixTransactSQL {
   public static final String GET_HOSTED_APPS_METADATA_SQL = "SELECT " +
     "HOSTNAME, APP_IDS FROM HOSTED_APPS_METADATA";
 
+  public static final String GET_INSTANCE_HOST_METADATA_SQL = "SELECT " +
+    "INSTANCE_ID, HOSTNAME FROM INSTANCE_HOST_METADATA";
+
   /**
    * Aggregate host metrics using a GROUP BY clause to take advantage of
    * N - way parallel scan where N = number of regions.

http://git-wip-us.apache.org/repos/asf/ambari/blob/6326589b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
index 304a8e0..6278c59 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
@@ -412,6 +412,22 @@ public class TimelineWebServices {
     }
   }
 
+  @GET
+  @Path("/metrics/instances")
+  @Produces({ MediaType.APPLICATION_JSON })
+  public Map<String, Set<String>> getClusterHostsMetadata(
+    @Context HttpServletRequest req,
+    @Context HttpServletResponse res
+  ) {
+    init(res);
+
+    try {
+      return timelineMetricStore.getInstanceHostsMetadata();
+    } catch (Exception e) {
+      throw new WebApplicationException(e, Response.Status.INTERNAL_SERVER_ERROR);
+    }
+  }
+
   /**
    * This is a discovery endpoint that advertises known live collector
    * instances. Note: It will always answer with current instance as live.

http://git-wip-us.apache.org/repos/asf/ambari/blob/6326589b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
index b2e8cac..b40481d 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
@@ -97,6 +97,11 @@ public class TestTimelineMetricStore implements TimelineMetricStore {
   }
 
   @Override
+  public Map<String, Set<String>> getInstanceHostsMetadata() throws SQLException, IOException {
+    return Collections.emptyMap();
+  }
+
+  @Override
   public List<String> getLiveInstances() {
     return Collections.emptyList();
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6326589b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataManager.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataManager.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataManager.java
index b243e0b..c62fd34 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataManager.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataManager.java
@@ -69,6 +69,7 @@ public class TestMetadataManager extends AbstractMiniHBaseClusterTest {
     metric2.setStartTime(now - 1000);
     metric2.setAppId("dummy_app2");
     metric2.setType("Integer");
+    metric2.setInstanceId("instance2");
     metric2.setMetricValues(new TreeMap<Long, Double>() {{
       put(now - 100, 1.0);
       put(now - 200, 2.0);
@@ -144,5 +145,12 @@ public class TestMetadataManager extends AbstractMiniHBaseClusterTest {
     Assert.assertEquals("dummy_app1", savedHostData.get("dummy_host1").iterator().next());
     Assert.assertEquals("dummy_app2", savedHostData.get("dummy_host2").iterator().next());
     Assert.assertEquals("dummy_app3", cachedHostData.get("dummy_host3").iterator().next());
+
+
+    Map<String, Set<String>> cachedHostInstanceData = metadataManager.getHostedInstanceCache();
+    Map<String, Set<String>> savedHostInstanceData = metadataManager.getHostedInstancesFromStore();
+    Assert.assertEquals(cachedHostInstanceData.size(), savedHostInstanceData.size());
+    Assert.assertEquals("dummy_host2", cachedHostInstanceData.get("instance2").iterator().next());
+
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6326589b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataSync.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataSync.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataSync.java
index 5eab903..181abca 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataSync.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataSync.java
@@ -56,9 +56,15 @@ public class TestMetadataSync {
       put("h2", new HashSet<>(Arrays.asList("a1", "a2")));
     }};
 
+    Map<String, Set<String>> hostedInstances = new HashMap<String, Set<String>>() {{
+      put("i1", new HashSet<>(Arrays.asList("h1")));
+      put("i2", new HashSet<>(Arrays.asList("h1", "h2")));
+    }};
+
     expect(configuration.get("timeline.metrics.service.operation.mode", "")).andReturn("distributed");
     expect(hBaseAccessor.getTimelineMetricMetadata()).andReturn(metadata);
     expect(hBaseAccessor.getHostedAppsMetadata()).andReturn(hostedApps);
+    expect(hBaseAccessor.getInstanceHostsMetdata()).andReturn(hostedInstances);
 
     replay(configuration, hBaseAccessor);
 
@@ -80,6 +86,12 @@ public class TestMetadataSync {
     Assert.assertEquals(2, hostedApps.size());
     Assert.assertEquals(1, hostedApps.get("h1").size());
     Assert.assertEquals(2, hostedApps.get("h2").size());
+
+    hostedInstances = metadataManager.getHostedInstanceCache();
+    Assert.assertEquals(2, hostedInstances.size());
+    Assert.assertEquals(1, hostedInstances.get("i1").size());
+    Assert.assertEquals(2, hostedInstances.get("i2").size());
+
   }
 
   @Test


[32/50] [abbrv] ambari git commit: AMBARI-20824. Add two new preemption properties to RU/EE (ncole)

Posted by ao...@apache.org.
AMBARI-20824. Add two new preemption properties to RU/EE (ncole)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e8794409
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e8794409
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e8794409

Branch: refs/heads/branch-3.0-perf
Commit: e87944093260a9463b1db539c14b2e8bf0a08229
Parents: 6b399e4
Author: Nate Cole <nc...@hortonworks.com>
Authored: Fri Apr 21 16:52:34 2017 -0400
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../resources/stacks/HDP/2.6/upgrades/config-upgrade.xml | 11 +++++++++++
 .../stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml   |  4 ++++
 .../resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml    |  1 +
 3 files changed, 16 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e8794409/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index 87646fa..f5d87e9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -104,6 +104,17 @@
             <set key="ranger.plugin.yarn.ambari.cluster.name" value="{{cluster_name}}"
               if-type="ranger-yarn-plugin-properties" if-key="ranger-yarn-plugin-enabled" if-key-state="present"/>
           </definition>
+          <definition xsi:type="configure" id="hdp_2_6_yarn_preemption" summary="Resource Manager Preemption Settings">
+            <type>yarn-site</type>
+            <set key="yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round" value="0.1"
+              if-type="yarn-site"
+              if-key="yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round"
+              if-key-state="absent" />
+            <set key="yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor" value="1"
+              if-type="yarn-site"
+              if-key="yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor"
+              if-key-state="absent" />
+          </definition>
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e8794409/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index 7f0c818..f7c276b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -322,6 +322,10 @@
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for Ranger Yarn plugin">
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_yarn_plugin_cluster_name"/>
       </execute-stage>
+      
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for YARN Preemption">
+        <task xsi:type="configure" id="hdp_2_6_yarn_preemption"/>
+      </execute-stage>
 
       <!-- KAFKA -->
       <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Apply config changes for Ranger Kafka plugin">

http://git-wip-us.apache.org/repos/asf/ambari/blob/e8794409/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index 7279870..81757c1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -686,6 +686,7 @@
       <component name="RESOURCEMANAGER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_6_maint_ranger_yarn_plugin_cluster_name"/>
+          <task xsi:type="configure" id="hdp_2_6_yarn_preemption" />
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->
         <upgrade>


[09/50] [abbrv] ambari git commit: Revert "AMBARI-20696: Skip calling stack selector, conf selector tools for Nifi, Streamline, Registry custom services in HDP cluster (Madhuvanthi Radhakrishnan via jluniya)"

Posted by ao...@apache.org.
Revert "AMBARI-20696: Skip calling stack selector, conf selector tools for Nifi, Streamline, Registry custom services in HDP cluster (Madhuvanthi Radhakrishnan via jluniya)"

This reverts commit 4f2523e7f08fd81859831353a18ffb52ab0af8e9.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a6bba692
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a6bba692
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a6bba692

Branch: refs/heads/branch-3.0-perf
Commit: a6bba6929556b8138099270bcc40f309ee4cd0dd
Parents: 611aaa5
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Wed Apr 19 08:55:58 2017 -0700
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Apr 24 14:59:56 2017 +0300

----------------------------------------------------------------------
 .../libraries/functions/stack_select.py         |  5 ---
 .../libraries/functions/stack_tools.py          | 10 -----
 .../libraries/functions/version_select_util.py  | 42 --------------------
 .../ambari/server/agent/ExecutionCommand.java   | 11 -----
 .../AmbariCustomCommandExecutionHelper.java     |  8 ++--
 .../AmbariManagementControllerImpl.java         |  1 -
 .../internal/ClientConfigResourceProvider.java  |  1 -
 .../scripts/shared_initialization.py            |  5 +--
 .../AmbariCustomCommandExecutionHelperTest.java |  1 -
 .../AmbariManagementControllerTest.java         |  4 +-
 10 files changed, 7 insertions(+), 81 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a6bba692/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
index 00127b1..79393b9 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
@@ -123,11 +123,6 @@ def select_all(version_to_select):
   """
   stack_root = Script.get_stack_root()
   (stack_selector_name, stack_selector_path, stack_selector_package) = stack_tools.get_stack_tool(stack_tools.STACK_SELECTOR_NAME)
-  if stack_selector_path is None:
-    Logger.warning(format("Skipping executing \"stack select all\ as stack selector path is None"))
-    return
-
-
   # it's an error, but it shouldn't really stop anything from working
   if version_to_select is None:
     Logger.error(format("Unable to execute {stack_selector_name} after installing because there was no version specified"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6bba692/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
index 93ec0b7..02ae62d 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -32,7 +32,6 @@ from resource_management.core.utils import pad
 STACK_SELECTOR_NAME = "stack_selector"
 CONF_SELECTOR_NAME = "conf_selector"
 
-
 def get_stack_tool(name):
   """
   Give a tool selector name get the stack-specific tool name, tool path, tool package
@@ -42,17 +41,8 @@ def get_stack_tool(name):
   from resource_management.libraries.functions.default import default
   stack_tools = None
   stack_tools_config = default("/configurations/cluster-env/stack_tools", None)
-  stack_name = default("/hostLevelParams/stack_name", None)
-  service_name = default("/serviceName", None)
-
-  #Get version Advertised tag to decide whether or not to call the selector tools
-  is_version_advertised = default("/versionAdvertised", True)
   if stack_tools_config:
     stack_tools = json.loads(stack_tools_config)
-  if service_name is not None:
-    if not is_version_advertised:
-      Logger.warning(format("No \"stack selector tool\" returned as the component does not advertise a version"))
-      return (None, None, None)
 
   if not stack_tools or not name or name.lower() not in stack_tools:
     Logger.warning("Cannot find config for {0} stack tool in {1}".format(str(name), str(stack_tools)))

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6bba692/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
index 85fe807..ff00a1f 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
@@ -79,48 +79,6 @@ def get_component_version(stack_name, component_name):
   return version
 
 
-def get_component_version_with_stack_selector(stack_selector_path, component_name):
-  """
-   For specific cases where we deal with HDP add on services from a management pack, the version
-   needs to be determined by using the specific stack selector itself.
-   :param stack_selector_path: /usr/bin/hdf-select
-   Comes from the service which calls for this function.
-   :param component_name: Component name as a string necessary to get the version
-   :return: Returns a string if found, e.g., 2.2.1.0-2175, otherwise, returns None
-   This function can be called by custom services, hence should not be removed
-  """
-  version = None
-  out = None
-  code = -1
-  if not stack_selector_path:
-    Logger.error("Stack selector path not provided")
-  elif not os.path.exists(stack_selector_path):
-    Logger.error("Stack selector path does not exist")
-  elif not component_name:
-    Logger.error("Component name not provided")
-  else:
-    tmpfile = tempfile.NamedTemporaryFile()
-
-    get_stack_comp_version_cmd = ""
-    try:
-      # This is necessary because Ubuntu returns "stdin: is not a tty", see AMBARI-8088
-      with open(tmpfile.name, 'r') as file:
-        get_stack_comp_version_cmd = '{0} status {1} > {2}' .format(stack_selector_path, component_name, tmpfile.name)
-        code, stdoutdata = shell.call(get_stack_comp_version_cmd, quiet=True)
-        out = file.read()
-
-      if code != 0 or out is None:
-        raise Exception("Code is nonzero or output is empty")
-
-      Logger.debug("Command: %s\nOutput: %s" % (get_stack_comp_version_cmd, str(out)))
-      matches = re.findall(r"([\d\.]+\-\d+)", out)
-      version = matches[0] if matches and len(matches) > 0 else None
-    except Exception, e:
-      Logger.error("Could not determine stack version for component %s by calling '%s'. Return Code: %s, Output: %s." %
-                   (component_name, get_stack_comp_version_cmd, str(code), str(out)))
-  return version
-
-
 def get_versions_from_stack_root(stack_root):
   """
   Given a stack install root, returns a list of stack versions currently installed.

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6bba692/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index 95da25e..8c726a0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -97,9 +97,6 @@ public class ExecutionCommand extends AgentCommand {
   @SerializedName("serviceName")
   private String serviceName;
 
-  @SerializedName("versionAdvertised")
-  private boolean versionAdvertised;
-
   @SerializedName("serviceType")
   private String serviceType;
 
@@ -330,14 +327,6 @@ public class ExecutionCommand extends AgentCommand {
     this.serviceName = serviceName;
   }
 
-  public boolean getVersionAdvertised() {
-    return versionAdvertised;
-  }
-
-  public void setVersionAdvertised(boolean versionAdvertised) {
-    this.versionAdvertised = versionAdvertised;
-  }
-
   public String getServiceType() {
 	return serviceType;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6bba692/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index a493b94..17bc718 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -326,9 +326,6 @@ public class AmbariCustomCommandExecutionHelper {
     AmbariMetaInfo ambariMetaInfo = managementController.getAmbariMetaInfo();
     ServiceInfo serviceInfo = ambariMetaInfo.getService(
         stackId.getStackName(), stackId.getStackVersion(), serviceName);
-    ComponentInfo componentInfo = ambariMetaInfo.getComponent(
-            stackId.getStackName(), stackId.getStackVersion(),
-            serviceName, componentName);
     StackInfo stackInfo = ambariMetaInfo.getStack
        (stackId.getStackName(), stackId.getStackVersion());
 
@@ -435,7 +432,6 @@ public class AmbariCustomCommandExecutionHelper {
       hostLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
 
       execCmd.setHostLevelParams(hostLevelParams);
-      execCmd.setVersionAdvertised(componentInfo.isVersionAdvertised());
 
       Map<String, String> commandParams = new TreeMap<>();
       if (additionalCommandParams != null) {
@@ -447,6 +443,10 @@ public class AmbariCustomCommandExecutionHelper {
       boolean isInstallCommand = commandName.equals(RoleCommand.INSTALL.toString());
       int commandTimeout = Short.valueOf(configs.getDefaultAgentTaskTimeout(isInstallCommand)).intValue();
 
+      ComponentInfo componentInfo = ambariMetaInfo.getComponent(
+          stackId.getStackName(), stackId.getStackVersion(),
+          serviceName, componentName);
+
       if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
         // Service check command is not custom command
         CommandScriptDefinition script = componentInfo.getCommandScript();

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6bba692/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 76c1167..751ce08 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -2457,7 +2457,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     hostParams.put(UNLIMITED_KEY_JCE_REQUIRED, (unlimitedKeyJCEPolicyRequired) ? "true" : "false");
 
     execCmd.setHostLevelParams(hostParams);
-    execCmd.setVersionAdvertised(componentInfo.isVersionAdvertised());
 
     Map<String, String> roleParams = new TreeMap<>();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6bba692/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index 77b027c..e42bd45 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -443,7 +443,6 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
         jsonContent.put("hostLevelParams", hostLevelParams);
         jsonContent.put("hostname", hostName);
         jsonContent.put("clusterName", cluster.getClusterName());
-        jsonContent.put("versionAdvertised", componentInfo.isVersionAdvertised());
         jsonConfigurations = gson.toJson(jsonContent);
 
         File tmpDirectory = new File(TMP_PATH);

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6bba692/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
index a0f58cb..1609050 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -22,7 +22,6 @@ import os
 from resource_management.libraries.functions import stack_tools
 from resource_management.libraries.functions.version import compare_versions
 from resource_management.core.resources.packaging import Package
-from resource_management.core.logger import Logger
 
 def install_packages():
   import params
@@ -32,9 +31,7 @@ def install_packages():
   packages = ['unzip', 'curl']
   if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
     stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
-    if stack_selector_package:
-      packages.append(stack_selector_package)
-
+    packages.append(stack_selector_package)
   Package(packages,
           retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
           retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6bba692/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
index 4eca710..71a02f5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
@@ -225,7 +225,6 @@ public class AmbariCustomCommandExecutionHelperTest {
     Assert.assertTrue(command.getHostLevelParams().containsKey(ExecutionCommand.KeyNames.USER_GROUPS));
     Assert.assertEquals("{\"zookeeperUser\":[\"zookeeperGroup\"]}", command.getHostLevelParams().get(ExecutionCommand.KeyNames.USER_GROUPS));
     Assert.assertEquals(true, command.getForceRefreshConfigTagsBeforeExecution());
-    Assert.assertFalse(command.getVersionAdvertised());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6bba692/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index dfb8cb7..554e089 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -275,7 +275,6 @@ public class AmbariManagementControllerTest {
     hostDAO = injector.getInstance(HostDAO.class);
     topologyHostInfoDAO = injector.getInstance(TopologyHostInfoDAO.class);
     hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
-
     stackManagerMock = (StackManagerMock) ambariMetaInfo.getStackManager();
     EasyMock.replay(injector.getInstance(AuditLogger.class));
   }
@@ -1272,6 +1271,8 @@ public class AmbariManagementControllerTest {
     crReq.setDesiredConfig(Collections.singletonList(cr3));
     controller.updateClusters(Collections.singleton(crReq), null);
 
+
+
     // Install
     installService(cluster1, serviceName, false, false);
     ExecutionCommand ec =
@@ -1311,7 +1312,6 @@ public class AmbariManagementControllerTest {
     assertEquals("[\"myhdfsgroup\"]", ec.getHostLevelParams().get(ExecutionCommand.KeyNames.GROUP_LIST));
     assertTrue(ec.getHostLevelParams().containsKey(ExecutionCommand.KeyNames.USER_GROUPS));
     assertEquals("{\"myhdfsuser\":[\"myhdfsgroup\"]}", ec.getHostLevelParams().get(ExecutionCommand.KeyNames.USER_GROUPS));
-    assertEquals(ec.getVersionAdvertised(), false);
   }
 
   @Test