You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2017/10/11 05:24:57 UTC
[01/31] ambari git commit: AMBARI-22153.On Zeppelin restart sometimes
interpreter settings get reset(Prabhjyot Singh via Venkata Sairam)
Repository: ambari
Updated Branches:
refs/heads/branch-feature-AMBARI-14714 eb6b21c00 -> 045d9bfe3
AMBARI-22153.On Zeppelin restart sometimes interpreter settings get reset(Prabhjyot Singh via Venkata Sairam)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/84e616da
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/84e616da
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/84e616da
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 84e616da753224c43d62ddaeb8f1ef935c62d876
Parents: e61556c
Author: Venkata Sairam <ve...@gmail.com>
Authored: Mon Oct 9 12:04:32 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Mon Oct 9 12:04:32 2017 +0530
----------------------------------------------------------------------
.../ZEPPELIN/0.7.0/package/scripts/master.py | 33 +++---
.../stacks/2.6/ZEPPELIN/test_zeppelin_070.py | 101 +++++--------------
2 files changed, 45 insertions(+), 89 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/84e616da/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
index a450fb6..d615d06 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
@@ -192,18 +192,8 @@ class Master(Script):
notebook_directory = "/user/" + format("{zeppelin_user}") + "/" + \
params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir']
- kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
- kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
-
- notebook_directory_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -e {notebook_directory};echo $?"),
- user=params.zeppelin_user)[1]
-
- #if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
- if "\n" in notebook_directory_exists:
- notebook_directory_exists = notebook_directory_exists.split("\n")[1]
- # '1' means it does not exists
- if notebook_directory_exists == '1':
+ if self.is_path_exists_in_HDFS(notebook_directory, params.zeppelin_user):
# hdfs dfs -mkdir {notebook_directory}
params.HdfsResource(format("{notebook_directory}"),
type="directory",
@@ -310,6 +300,22 @@ class Master(Script):
return hdfs_interpreter_config
+ def is_path_exists_in_HDFS(self, path, as_user):
+ kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+ kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
+ path_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -e {path};echo $?"),
+ user=as_user)[1]
+
+ # if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
+ if "\n" in path_exists:
+ path_exists = path_exists.split("\n")[1]
+
+ # '1' means it does not exists
+ if path_exists == '0':
+ return True
+ else:
+ return False
+
def get_interpreter_settings(self):
import params
import json
@@ -320,12 +326,14 @@ class Master(Script):
if 'zeppelin.config.fs.dir' in params.config['configurations']['zeppelin-config']:
zeppelin_conf_fs = self.getZeppelinConfFS(params)
- if os.path.exists(zeppelin_conf_fs):
+
+ if self.is_path_exists_in_HDFS(zeppelin_conf_fs, params.zeppelin_user):
# copy from hdfs to /etc/zeppelin/conf/interpreter.json
params.HdfsResource(interpreter_config,
type="file",
action="download_on_execute",
source=zeppelin_conf_fs,
+ user=params.zeppelin_user,
group=params.zeppelin_group,
owner=params.zeppelin_user)
else:
@@ -353,6 +361,7 @@ class Master(Script):
type="file",
action="create_on_execute",
source=interpreter_config,
+ user=params.zeppelin_user,
group=params.zeppelin_group,
owner=params.zeppelin_user,
replace_existing_files=True)
http://git-wip-us.apache.org/repos/asf/ambari/blob/84e616da/ambari-server/src/test/python/stacks/2.6/ZEPPELIN/test_zeppelin_070.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/ZEPPELIN/test_zeppelin_070.py b/ambari-server/src/test/python/stacks/2.6/ZEPPELIN/test_zeppelin_070.py
index e8ef262..3064880 100644
--- a/ambari-server/src/test/python/stacks/2.6/ZEPPELIN/test_zeppelin_070.py
+++ b/ambari-server/src/test/python/stacks/2.6/ZEPPELIN/test_zeppelin_070.py
@@ -305,67 +305,32 @@ class TestZeppelin070(RMFTestCase):
security_enabled=False,
)
- self.assertResourceCalled('HdfsResource', '/etc/zeppelin/conf/interpreter.json',
- security_enabled = False,
- hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
- keytab = UnknownConfigurationMock(),
- source = '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
- default_fs = 'hdfs://c6401.ambari.apache.org:8020',
- hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
- hdfs_site = {u'a': u'b'},
- kinit_path_local = '/usr/bin/kinit',
- principal_name = UnknownConfigurationMock(),
- user = 'hdfs',
- owner = 'zeppelin',
- group = 'zeppelin',
- hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
- type = 'file',
- action = ['download_on_execute'],
- )
-
self.assertResourceCalled('File', '/etc/zeppelin/conf/interpreter.json',
- content=interpreter_json_generated.template_after_base,
+ content=interpreter_json_generated.template_after_base,
+ owner='zeppelin',
+ group='zeppelin',
+ )
+
+ self.assertResourceCalled('HdfsResource',
+ '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
+ security_enabled=False,
+ hadoop_bin_dir='/usr/hdp/2.5.0.0-1235/hadoop/bin',
+ keytab=UnknownConfigurationMock(),
+ source='/etc/zeppelin/conf/interpreter.json',
+ default_fs='hdfs://c6401.ambari.apache.org:8020',
+ hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+ hdfs_site={u'a': u'b'},
+ kinit_path_local='/usr/bin/kinit',
+ principal_name=UnknownConfigurationMock(),
+ user='zeppelin',
owner='zeppelin',
group='zeppelin',
+ replace_existing_files=True,
+ hadoop_conf_dir='/usr/hdp/2.5.0.0-1235/hadoop/conf',
+ type='file',
+ action=['create_on_execute'],
)
- self.assertResourceCalled('HdfsResource', '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
- security_enabled = False,
- hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
- keytab = UnknownConfigurationMock(),
- source = '/etc/zeppelin/conf/interpreter.json',
- default_fs = 'hdfs://c6401.ambari.apache.org:8020',
- replace_existing_files = True,
- hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
- hdfs_site = {u'a': u'b'},
- kinit_path_local = '/usr/bin/kinit',
- principal_name = UnknownConfigurationMock(),
- user = 'hdfs',
- owner = 'zeppelin',
- group = 'zeppelin',
- hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
- type = 'file',
- action = ['create_on_execute'],
- )
-
- self.assertResourceCalled('HdfsResource', '/etc/zeppelin/conf/interpreter.json',
- security_enabled = False,
- hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
- keytab = UnknownConfigurationMock(),
- source = '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
- default_fs = 'hdfs://c6401.ambari.apache.org:8020',
- hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
- hdfs_site = {u'a': u'b'},
- kinit_path_local = '/usr/bin/kinit',
- principal_name = UnknownConfigurationMock(),
- user = 'hdfs',
- owner = 'zeppelin',
- group = 'zeppelin',
- hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
- type = 'file',
- action = ['download_on_execute'],
- )
-
self.assertResourceCalled('File', '/etc/zeppelin/conf/interpreter.json',
content=interpreter_json_generated.template_after_without_spark_and_livy,
owner='zeppelin',
@@ -383,7 +348,7 @@ class TestZeppelin070(RMFTestCase):
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
- user = 'hdfs',
+ user = 'zeppelin',
owner = 'zeppelin',
group = 'zeppelin',
hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
@@ -391,24 +356,6 @@ class TestZeppelin070(RMFTestCase):
action = ['create_on_execute'],
)
- self.assertResourceCalled('HdfsResource', '/etc/zeppelin/conf/interpreter.json',
- security_enabled = False,
- hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
- keytab = UnknownConfigurationMock(),
- source = '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
- default_fs = 'hdfs://c6401.ambari.apache.org:8020',
- hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
- hdfs_site = {u'a': u'b'},
- kinit_path_local = '/usr/bin/kinit',
- principal_name = UnknownConfigurationMock(),
- user = 'hdfs',
- owner = 'zeppelin',
- group = 'zeppelin',
- hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
- type = 'file',
- action = ['download_on_execute'],
- )
-
self.assertResourceCalled('File', '/etc/zeppelin/conf/interpreter.json',
content=interpreter_json_generated.template_after_kerberos,
owner='zeppelin',
@@ -421,12 +368,12 @@ class TestZeppelin070(RMFTestCase):
keytab = UnknownConfigurationMock(),
source = '/etc/zeppelin/conf/interpreter.json',
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
- replace_existing_files = True,
hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
- user = 'hdfs',
+ replace_existing_files = True,
+ user = 'zeppelin',
owner = 'zeppelin',
group = 'zeppelin',
hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
[05/31] ambari git commit: AMBARI-22168 Move service metrics to
separate tab. (atkach)
Posted by jl...@apache.org.
AMBARI-22168 Move service metrics to separate tab. (atkach)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6eb273e1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6eb273e1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6eb273e1
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 6eb273e19a81773c27f235631c54a3e142277f08
Parents: e83c86d
Author: Andrii Tkach <at...@apache.org>
Authored: Mon Oct 9 14:11:36 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Mon Oct 9 15:01:28 2017 +0300
----------------------------------------------------------------------
ambari-web/app/assets/test/tests.js | 2 +
ambari-web/app/controllers.js | 1 +
.../app/controllers/main/service/info/metric.js | 468 +++++++++++++++++++
.../controllers/main/service/info/summary.js | 449 +-----------------
.../service/widgets/create/wizard_controller.js | 2 +-
ambari-web/app/messages.js | 1 +
.../app/styles/enhanced_service_dashboard.less | 26 +-
.../app/templates/main/service/info/metrics.hbs | 104 +++++
.../app/templates/main/service/info/summary.hbs | 84 ----
ambari-web/app/templates/main/service/item.hbs | 5 +-
ambari-web/app/views.js | 1 +
ambari-web/app/views/main/service/info/menu.js | 7 +
.../app/views/main/service/info/metrics_view.js | 290 ++++++++++++
.../app/views/main/service/info/summary.js | 315 ++-----------
ambari-web/app/views/main/service/item.js | 6 +
.../main/service/info/metric_test.js | 110 +++++
.../main/service/info/summary_test.js | 76 ---
.../main/service/info/metrics_view_test.js | 334 +++++++++++++
.../views/main/service/info/summary_test.js | 281 +----------
19 files changed, 1400 insertions(+), 1162 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/assets/test/tests.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/test/tests.js b/ambari-web/app/assets/test/tests.js
index 03b4657..7c636d4 100644
--- a/ambari-web/app/assets/test/tests.js
+++ b/ambari-web/app/assets/test/tests.js
@@ -125,6 +125,7 @@ var files = [
'test/controllers/main/service/item_test',
'test/controllers/main/service/info/config_test',
'test/controllers/main/service/info/summary_test',
+ 'test/controllers/main/service/info/metric_test',
'test/controllers/main/service_test',
'test/controllers/main/admin_test',
'test/controllers/main/views_controller_test',
@@ -340,6 +341,7 @@ var files = [
'test/views/main/service/service_test',
'test/views/main/service/info/config_test',
'test/views/main/service/info/summary_test',
+ 'test/views/main/service/info/metrics_view_test',
'test/views/main/service/info/menu_test',
'test/views/main/service/info/component_list_view_test',
'test/views/main/service/info/metrics/ambari_metrics/regionserver_base_test',
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/controllers.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers.js b/ambari-web/app/controllers.js
index 81e5eb7..f7d77be 100644
--- a/ambari-web/app/controllers.js
+++ b/ambari-web/app/controllers.js
@@ -142,6 +142,7 @@ require('controllers/main/charts');
require('controllers/main/charts/heatmap_metrics/heatmap_metric');
require('controllers/main/charts/heatmap');
require('controllers/main/service/info/heatmap');
+require('controllers/main/service/info/metric');
require('controllers/main/views_controller');
require('controllers/main/views/details_controller');
require('controllers/wizard/step0_controller');
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/controllers/main/service/info/metric.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/info/metric.js b/ambari-web/app/controllers/main/service/info/metric.js
new file mode 100644
index 0000000..9dfc32c
--- /dev/null
+++ b/ambari-web/app/controllers/main/service/info/metric.js
@@ -0,0 +1,468 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+var App = require('app');
+
+App.MainServiceInfoMetricsController = Em.Controller.extend(App.WidgetSectionMixin, {
+ name: 'mainServiceInfoMetricsController',
+
+ layoutNameSuffix: "_dashboard",
+
+ sectionNameSuffix: "_SUMMARY",
+
+ /**
+ * Some widget has type `GRAPH`
+ *
+ * @type {boolean}
+ */
+ someWidgetGraphExists: Em.computed.someBy('widgets', 'widgetType', 'GRAPH'),
+
+ /**
+ * @type {boolean}
+ */
+ showTimeRangeControl: Em.computed.or('!isServiceWithEnhancedWidgets', 'someWidgetGraphExists'),
+
+ /**
+ * @type {boolean}
+ */
+ isWidgetLayoutsLoaded: false,
+
+ /**
+ * @type {boolean}
+ */
+ isAllSharedWidgetsLoaded: false,
+
+ /**
+ * @type {boolean}
+ */
+ isMineWidgetsLoaded: false,
+
+ /**
+ * load widget layouts across all users in CLUSTER scope
+ * @returns {$.ajax}
+ */
+ loadWidgetLayouts: function () {
+ this.set('isWidgetLayoutsLoaded', false);
+ return App.ajax.send({
+ name: 'widgets.layouts.get',
+ sender: this,
+ data: {
+ sectionName: this.get('sectionName')
+ },
+ success: 'loadWidgetLayoutsSuccessCallback'
+ });
+ },
+
+ loadWidgetLayoutsSuccessCallback: function (data) {
+ App.widgetLayoutMapper.map(data);
+ this.set('isWidgetLayoutsLoaded', true);
+ },
+
+
+ /**
+ * load all shared widgets to show on widget browser
+ * @returns {$.ajax}
+ */
+ loadAllSharedWidgets: function () {
+ this.set('isAllSharedWidgetsLoaded', false);
+ return App.ajax.send({
+ name: 'widgets.all.shared.get',
+ sender: this,
+ success: 'loadAllSharedWidgetsSuccessCallback'
+ });
+ },
+
+ /**
+ * success callback of <code>loadAllSharedWidgets</code>
+ * @param {object|null} data
+ */
+ loadAllSharedWidgetsSuccessCallback: function (data) {
+ var widgetIds = this.get('widgets').mapProperty('id');
+ if (data.items[0] && data.items.length) {
+ this.set("allSharedWidgets",
+ data.items.filter(function (widget) {
+ return widget.WidgetInfo.widget_type != "HEATMAP";
+ }).map(function (widget) {
+ var widgetType = widget.WidgetInfo.widget_type;
+ var widgetName = widget.WidgetInfo.widget_name;
+ var widgetId = widget.WidgetInfo.id;
+ return Em.Object.create({
+ id: widgetId,
+ widgetName: widgetName,
+ description: widget.WidgetInfo.description,
+ widgetType: widgetType,
+ iconPath: "/img/widget-" + widgetType.toLowerCase() + ".png",
+ serviceName: JSON.parse(widget.WidgetInfo.metrics).mapProperty('service_name').uniq().join('-'),
+ added: widgetIds.contains(widgetId),
+ isShared: widget.WidgetInfo.scope == "CLUSTER"
+ });
+ })
+ );
+ }
+ this.set('isAllSharedWidgetsLoaded', true);
+ },
+
+ allSharedWidgets: [],
+ mineWidgets: [],
+
+ /**
+ * load all mine widgets of current user to show on widget browser
+ * @returns {$.ajax}
+ */
+ loadMineWidgets: function () {
+ this.set('isMineWidgetsLoaded', false);
+ return App.ajax.send({
+ name: 'widgets.all.mine.get',
+ sender: this,
+ data: {
+ loginName: App.router.get('loginName')
+ },
+ success: 'loadMineWidgetsSuccessCallback'
+ });
+ },
+
+ /**
+ * success callback of <code>loadMineWidgets</code>
+ * @param {object|null} data
+ */
+ loadMineWidgetsSuccessCallback: function (data) {
+ var widgetIds = this.get('widgets').mapProperty('id');
+ if (data.items[0] && data.items.length) {
+ this.set("mineWidgets",
+ data.items.filter(function (widget) {
+ return widget.WidgetInfo.widget_type != "HEATMAP";
+ }).map(function (widget) {
+ var widgetType = widget.WidgetInfo.widget_type;
+ var widgetName = widget.WidgetInfo.widget_name;
+ var widgetId = widget.WidgetInfo.id;
+ return Em.Object.create({
+ id: widget.WidgetInfo.id,
+ widgetName: widgetName,
+ description: widget.WidgetInfo.description,
+ widgetType: widgetType,
+ iconPath: "/img/widget-" + widgetType.toLowerCase() + ".png",
+ serviceName: JSON.parse(widget.WidgetInfo.metrics).mapProperty('service_name').uniq().join('-'),
+ added: widgetIds.contains(widgetId),
+ isShared: widget.WidgetInfo.scope == "CLUSTER"
+ });
+ })
+ );
+ } else {
+ this.set("mineWidgets", []);
+ }
+ this.set('isMineWidgetsLoaded', true);
+ },
+
+ /**
+ * add widgets, on click handler for "Add"
+ */
+ addWidget: function (event) {
+ var widgetToAdd = event.context;
+ var activeLayout = this.get('activeWidgetLayout');
+ var widgetIds = activeLayout.get('widgets').map(function(widget) {
+ return {
+ "id": widget.get("id")
+ }
+ });
+ widgetIds.pushObject({
+ "id": widgetToAdd.id
+ });
+ var data = {
+ "WidgetLayoutInfo": {
+ "display_name": activeLayout.get("displayName"),
+ "id": activeLayout.get("id"),
+ "layout_name": activeLayout.get("layoutName"),
+ "scope": activeLayout.get("scope"),
+ "section_name": activeLayout.get("sectionName"),
+ "widgets": widgetIds
+ }
+ };
+
+ widgetToAdd.set('added', !widgetToAdd.added);
+ return App.ajax.send({
+ name: 'widget.layout.edit',
+ sender: this,
+ data: {
+ layoutId: activeLayout.get("id"),
+ data: data
+ },
+ success: 'updateActiveLayout'
+ });
+ },
+
+ /**
+ * hide widgets, on click handler for "Added"
+ */
+ hideWidget: function (event) {
+ var widgetToHide = event.context;
+ var activeLayout = this.get('activeWidgetLayout');
+ var widgetIds = activeLayout.get('widgets').map(function (widget) {
+ return {
+ "id": widget.get("id")
+ }
+ });
+ var data = {
+ "WidgetLayoutInfo": {
+ "display_name": activeLayout.get("displayName"),
+ "id": activeLayout.get("id"),
+ "layout_name": activeLayout.get("layoutName"),
+ "scope": activeLayout.get("scope"),
+ "section_name": activeLayout.get("sectionName"),
+ "widgets": widgetIds.filter(function (widget) {
+ return widget.id !== widgetToHide.id;
+ })
+ }
+ };
+
+ widgetToHide.set('added', !widgetToHide.added);
+ return App.ajax.send({
+ name: 'widget.layout.edit',
+ sender: this,
+ data: {
+ layoutId: activeLayout.get("id"),
+ data: data
+ },
+ success: 'hideWidgetSuccessCallback'
+ });
+
+ },
+
+ /**
+ * @param {object|null} data
+ * @param {object} opt
+ * @param {object} params
+ */
+ hideWidgetSuccessCallback: function (data, opt, params) {
+ params.data.WidgetLayoutInfo.widgets = params.data.WidgetLayoutInfo.widgets.map(function (widget) {
+ return {
+ WidgetInfo: {
+ id: widget.id
+ }
+ }
+ });
+ App.widgetLayoutMapper.map({items: [params.data]});
+ this.propertyDidChange('widgets');
+ },
+
+ /**
+ * update current active widget layout
+ */
+ updateActiveLayout: function () {
+ this.getActiveWidgetLayout();
+ },
+
+ /**
+ * delete widgets, on click handler for "Delete"
+ */
+ deleteWidget: function (event) {
+ var widget = event.context;
+ var self = this;
+ var confirmMsg = widget.get('isShared') ? Em.I18n.t('dashboard.widgets.browser.action.delete.shared.bodyMsg').format(widget.widgetName) : Em.I18n.t('dashboard.widgets.browser.action.delete.mine.bodyMsg').format(widget.widgetName);
+ var bodyMessage = Em.Object.create({
+ confirmMsg: confirmMsg,
+ confirmButton: Em.I18n.t('dashboard.widgets.browser.action.delete.btnMsg')
+ });
+ return App.showConfirmationFeedBackPopup(function (query) {
+ return App.ajax.send({
+ name: 'widget.action.delete',
+ sender: self,
+ data: {
+ id: widget.id
+ },
+ success: 'updateWidgetBrowser'
+ });
+
+ }, bodyMessage);
+ },
+
+ /**
+ * update widget browser content after deleted some widget
+ */
+ updateWidgetBrowser: function () {
+ this.loadAllSharedWidgets();
+ this.loadMineWidgets();
+ },
+
+ /**
+ * Share widgets, on click handler for "Share"
+ */
+ shareWidget: function (event) {
+ var widget = event.context;
+ var self = this;
+ var bodyMessage = Em.Object.create({
+ confirmMsg: Em.I18n.t('dashboard.widgets.browser.action.share.confirmation'),
+ confirmButton: Em.I18n.t('dashboard.widgets.browser.action.share')
+ });
+ return App.showConfirmationFeedBackPopup(function (query) {
+ return App.ajax.send({
+ name: 'widgets.wizard.edit',
+ sender: self,
+ data: {
+ data: {
+ "WidgetInfo": {
+ "widget_name": widget.get("widgetName"),
+ "scope": "CLUSTER"
+ }
+ },
+ widgetId: widget.get("id")
+ },
+ success: 'updateWidgetBrowser'
+ });
+ }, bodyMessage);
+ },
+
+ /**
+ * create widget
+ */
+ createWidget: function () {
+ App.router.send('createServiceWidget', Em.Object.create({
+ layout: this.get('activeWidgetLayout'),
+ serviceName: this.get('content.serviceName')
+ }));
+ },
+
+ /**
+ * edit widget
+ * @param {App.Widget} content
+ */
+ editWidget: function (content) {
+ content.set('serviceName', this.get('content.serviceName'));
+ App.router.send('editServiceWidget', content);
+ },
+
+ /**
+ * launch Widgets Browser popup
+ * @method showPopup
+ * @return {App.ModalPopup}
+ */
+ goToWidgetsBrowser: function () {
+ var self = this;
+
+ return App.ModalPopup.show({
+ header: Em.I18n.t('dashboard.widgets.browser.header'),
+
+ classNames: ['common-modal-wrapper', 'widgets-browser-popup'],
+ modalDialogClasses: ['modal-lg'],
+ onPrimary: function () {
+ this.hide();
+ self.set('isAllSharedWidgetsLoaded', false);
+ self.set('allSharedWidgets', []);
+ self.set('isMineWidgetsLoaded', false);
+ self.set('mineWidgets', []);
+ },
+ autoHeight: false,
+ isHideBodyScroll: false,
+ footerClass: Ember.View.extend({
+ templateName: require('templates/common/modal_popups/widget_browser_footer'),
+ isShowMineOnly: false,
+ onPrimary: function() {
+ this.get('parentView').onPrimary();
+ }
+ }),
+ isShowMineOnly: false,
+ bodyClass: Ember.View.extend({
+ templateName: require('templates/common/modal_popups/widget_browser_popup'),
+ controller: self,
+ willInsertElement: function () {
+ this.get('controller').loadAllSharedWidgets();
+ this.get('controller').loadMineWidgets();
+ },
+
+ isLoaded: Em.computed.and('controller.isAllSharedWidgetsLoaded', 'controller.isMineWidgetsLoaded'),
+
+ isWidgetEmptyList: Em.computed.empty('filteredContent'),
+
+ activeService: '',
+ activeStatus: '',
+
+ content: function () {
+ if (this.get('parentView.isShowMineOnly')) {
+ return this.get('controller.mineWidgets');
+ } else {
+ // merge my widgets and all shared widgets, no duplicated is allowed
+ var content = [];
+ var widgetMap = {};
+ var allWidgets = this.get('controller.allSharedWidgets').concat(this.get('controller.mineWidgets'));
+ allWidgets.forEach(function(widget) {
+ if (!widgetMap[widget.get("id")]) {
+ content.pushObject(widget);
+ widgetMap[widget.get("id")] = true;
+ }
+ });
+ return content;
+ }
+ }.property('controller.allSharedWidgets.length', 'controller.isAllSharedWidgetsLoaded',
+ 'controller.mineWidgets.length', 'controller.isMineWidgetsLoaded', 'parentView.isShowMineOnly'),
+
+ /**
+ * displaying content filtered by service name and status.
+ */
+ filteredContent: function () {
+ var activeService = this.get('activeService') ? this.get('activeService') : this.get('controller.content.serviceName');
+ var result = [];
+ this.get('content').forEach(function (widget) {
+ if (widget.get('serviceName').indexOf(activeService) >= 0) {
+ result.pushObject(widget);
+ }
+ });
+ return result;
+ }.property('content', 'activeService', 'activeStatus'),
+
+ /**
+ * service name filter
+ */
+ services: function () {
+ var view = this;
+ var services = App.Service.find().filter(function(item){
+ var stackService = App.StackService.find().findProperty('serviceName', item.get('serviceName'));
+ return stackService.get('isServiceWithWidgets');
+ });
+ return services.map(function (service) {
+ return Em.Object.create({
+ value: service.get('serviceName'),
+ label: service.get('displayName'),
+ isActive: function () {
+ var activeService = view.get('activeService') ? view.get('activeService') : view.get('controller.content.serviceName');
+ return this.get('value') == activeService;
+ }.property('value', 'view.activeService')
+ })
+ });
+ }.property('activeService'),
+
+ filterByService: function (event) {
+ this.set('activeService', event.context);
+ },
+
+ createWidget: function () {
+ this.get('parentView').onPrimary();
+ this.get('controller').createWidget();
+ },
+
+ ensureTooltip: function () {
+ Em.run.later(this, function () {
+ App.tooltip($("[rel='shared-icon-tooltip']"));
+ }, 1000);
+ }.observes('activeService', 'parentView.isShowMineOnly'),
+
+ didInsertElement: function () {
+ this.ensureTooltip();
+ }
+ })
+ });
+ }
+
+});
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/controllers/main/service/info/summary.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/info/summary.js b/ambari-web/app/controllers/main/service/info/summary.js
index d696334..3d7483a 100644
--- a/ambari-web/app/controllers/main/service/info/summary.js
+++ b/ambari-web/app/controllers/main/service/info/summary.js
@@ -17,7 +17,7 @@
var App = require('app');
-App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMixin, {
+App.MainServiceInfoSummaryController = Em.Controller.extend({
name: 'mainServiceInfoSummaryController',
selectedFlumeAgent: null,
@@ -40,10 +40,6 @@ App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMix
*/
isPreviousRangerConfigsCallFailed: false,
- layoutNameSuffix: "_dashboard",
-
- sectionNameSuffix: "_SUMMARY",
-
/**
* HiveServer2 JDBC connection endpoint data
* @type {array}
@@ -112,18 +108,6 @@ App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMix
],
/**
- * Some widget has type `GRAPH`
- *
- * @type {boolean}
- */
- someWidgetGraphExists: Em.computed.someBy('widgets', 'widgetType', 'GRAPH'),
-
- /**
- * @type {boolean}
- */
- showTimeRangeControl: Em.computed.or('!isServiceWithEnhancedWidgets', 'someWidgetGraphExists'),
-
- /**
* Set initial Ranger plugins data
* @method setRangerPlugins
*/
@@ -425,437 +409,6 @@ App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMix
});
},
-
- /**
- * @type {boolean}
- */
- isWidgetLayoutsLoaded: false,
-
- /**
- * @type {boolean}
- */
- isAllSharedWidgetsLoaded: false,
-
- /**
- * @type {boolean}
- */
- isMineWidgetsLoaded: false,
-
-
- /**
- * load widget layouts across all users in CLUSTER scope
- * @returns {$.ajax}
- */
- loadWidgetLayouts: function () {
- this.set('isWidgetLayoutsLoaded', false);
- return App.ajax.send({
- name: 'widgets.layouts.get',
- sender: this,
- data: {
- sectionName: this.get('sectionName')
- },
- success: 'loadWidgetLayoutsSuccessCallback'
- });
- },
-
- loadWidgetLayoutsSuccessCallback: function (data) {
- App.widgetLayoutMapper.map(data);
- this.set('isWidgetLayoutsLoaded', true);
- },
-
-
- /**
- * load all shared widgets to show on widget browser
- * @returns {$.ajax}
- */
- loadAllSharedWidgets: function () {
- this.set('isAllSharedWidgetsLoaded', false);
- return App.ajax.send({
- name: 'widgets.all.shared.get',
- sender: this,
- success: 'loadAllSharedWidgetsSuccessCallback'
- });
- },
-
- /**
- * success callback of <code>loadAllSharedWidgets</code>
- * @param {object|null} data
- */
- loadAllSharedWidgetsSuccessCallback: function (data) {
- var widgetIds = this.get('widgets').mapProperty('id');
- if (data.items[0] && data.items.length) {
- this.set("allSharedWidgets",
- data.items.filter(function (widget) {
- return widget.WidgetInfo.widget_type != "HEATMAP";
- }).map(function (widget) {
- var widgetType = widget.WidgetInfo.widget_type;
- var widgetName = widget.WidgetInfo.widget_name;
- var widgetId = widget.WidgetInfo.id;
- return Em.Object.create({
- id: widgetId,
- widgetName: widgetName,
- description: widget.WidgetInfo.description,
- widgetType: widgetType,
- iconPath: "/img/widget-" + widgetType.toLowerCase() + ".png",
- serviceName: JSON.parse(widget.WidgetInfo.metrics).mapProperty('service_name').uniq().join('-'),
- added: widgetIds.contains(widgetId),
- isShared: widget.WidgetInfo.scope == "CLUSTER"
- });
- })
- );
- }
- this.set('isAllSharedWidgetsLoaded', true);
- },
-
- allSharedWidgets: [],
- mineWidgets: [],
-
- /**
- * load all mine widgets of current user to show on widget browser
- * @returns {$.ajax}
- */
- loadMineWidgets: function () {
- this.set('isMineWidgetsLoaded', false);
- return App.ajax.send({
- name: 'widgets.all.mine.get',
- sender: this,
- data: {
- loginName: App.router.get('loginName')
- },
- success: 'loadMineWidgetsSuccessCallback'
- });
- },
-
- /**
- * success callback of <code>loadMineWidgets</code>
- * @param {object|null} data
- */
- loadMineWidgetsSuccessCallback: function (data) {
- var widgetIds = this.get('widgets').mapProperty('id');
- if (data.items[0] && data.items.length) {
- this.set("mineWidgets",
- data.items.filter(function (widget) {
- return widget.WidgetInfo.widget_type != "HEATMAP";
- }).map(function (widget) {
- var widgetType = widget.WidgetInfo.widget_type;
- var widgetName = widget.WidgetInfo.widget_name;
- var widgetId = widget.WidgetInfo.id;
- return Em.Object.create({
- id: widget.WidgetInfo.id,
- widgetName: widgetName,
- description: widget.WidgetInfo.description,
- widgetType: widgetType,
- iconPath: "/img/widget-" + widgetType.toLowerCase() + ".png",
- serviceName: JSON.parse(widget.WidgetInfo.metrics).mapProperty('service_name').uniq().join('-'),
- added: widgetIds.contains(widgetId),
- isShared: widget.WidgetInfo.scope == "CLUSTER"
- });
- })
- );
- } else {
- this.set("mineWidgets", []);
- }
- this.set('isMineWidgetsLoaded', true);
- },
-
- /**
- * add widgets, on click handler for "Add"
- */
- addWidget: function (event) {
- var widgetToAdd = event.context;
- var activeLayout = this.get('activeWidgetLayout');
- var widgetIds = activeLayout.get('widgets').map(function(widget) {
- return {
- "id": widget.get("id")
- }
- });
- widgetIds.pushObject({
- "id": widgetToAdd.id
- });
- var data = {
- "WidgetLayoutInfo": {
- "display_name": activeLayout.get("displayName"),
- "id": activeLayout.get("id"),
- "layout_name": activeLayout.get("layoutName"),
- "scope": activeLayout.get("scope"),
- "section_name": activeLayout.get("sectionName"),
- "widgets": widgetIds
- }
- };
-
- widgetToAdd.set('added', !widgetToAdd.added);
- return App.ajax.send({
- name: 'widget.layout.edit',
- sender: this,
- data: {
- layoutId: activeLayout.get("id"),
- data: data
- },
- success: 'updateActiveLayout'
- });
- },
-
- /**
- * hide widgets, on click handler for "Added"
- */
- hideWidget: function (event) {
- var widgetToHide = event.context;
- var activeLayout = this.get('activeWidgetLayout');
- var widgetIds = activeLayout.get('widgets').map(function (widget) {
- return {
- "id": widget.get("id")
- }
- });
- var data = {
- "WidgetLayoutInfo": {
- "display_name": activeLayout.get("displayName"),
- "id": activeLayout.get("id"),
- "layout_name": activeLayout.get("layoutName"),
- "scope": activeLayout.get("scope"),
- "section_name": activeLayout.get("sectionName"),
- "widgets": widgetIds.filter(function (widget) {
- return widget.id !== widgetToHide.id;
- })
- }
- };
-
- widgetToHide.set('added', !widgetToHide.added);
- return App.ajax.send({
- name: 'widget.layout.edit',
- sender: this,
- data: {
- layoutId: activeLayout.get("id"),
- data: data
- },
- success: 'hideWidgetSuccessCallback'
- });
-
- },
-
- /**
- * @param {object|null} data
- * @param {object} opt
- * @param {object} params
- */
- hideWidgetSuccessCallback: function (data, opt, params) {
- params.data.WidgetLayoutInfo.widgets = params.data.WidgetLayoutInfo.widgets.map(function (widget) {
- return {
- WidgetInfo: {
- id: widget.id
- }
- }
- });
- App.widgetLayoutMapper.map({items: [params.data]});
- this.propertyDidChange('widgets');
- },
-
- /**
- * update current active widget layout
- */
- updateActiveLayout: function () {
- this.getActiveWidgetLayout();
- },
-
- /**
- * delete widgets, on click handler for "Delete"
- */
- deleteWidget: function (event) {
- var widget = event.context;
- var self = this;
- var confirmMsg = widget.get('isShared') ? Em.I18n.t('dashboard.widgets.browser.action.delete.shared.bodyMsg').format(widget.widgetName) : Em.I18n.t('dashboard.widgets.browser.action.delete.mine.bodyMsg').format(widget.widgetName);
- var bodyMessage = Em.Object.create({
- confirmMsg: confirmMsg,
- confirmButton: Em.I18n.t('dashboard.widgets.browser.action.delete.btnMsg')
- });
- return App.showConfirmationFeedBackPopup(function (query) {
- return App.ajax.send({
- name: 'widget.action.delete',
- sender: self,
- data: {
- id: widget.id
- },
- success: 'updateWidgetBrowser'
- });
-
- }, bodyMessage);
- },
-
- /**
- * update widget browser content after deleted some widget
- */
- updateWidgetBrowser: function () {
- this.loadAllSharedWidgets();
- this.loadMineWidgets();
- },
-
- /**
- * Share widgets, on click handler for "Share"
- */
- shareWidget: function (event) {
- var widget = event.context;
- var self = this;
- var bodyMessage = Em.Object.create({
- confirmMsg: Em.I18n.t('dashboard.widgets.browser.action.share.confirmation'),
- confirmButton: Em.I18n.t('dashboard.widgets.browser.action.share')
- });
- return App.showConfirmationFeedBackPopup(function (query) {
- return App.ajax.send({
- name: 'widgets.wizard.edit',
- sender: self,
- data: {
- data: {
- "WidgetInfo": {
- "widget_name": widget.get("widgetName"),
- "scope": "CLUSTER"
- }
- },
- widgetId: widget.get("id")
- },
- success: 'updateWidgetBrowser'
- });
- }, bodyMessage);
- },
-
- /**
- * create widget
- */
- createWidget: function () {
- App.router.send('createServiceWidget', Em.Object.create({
- layout: this.get('activeWidgetLayout'),
- serviceName: this.get('content.serviceName')
- }));
- },
-
- /**
- * edit widget
- * @param {App.Widget} content
- */
- editWidget: function (content) {
- content.set('serviceName', this.get('content.serviceName'));
- App.router.send('editServiceWidget', content);
- },
-
- /**
- * launch Widgets Browser popup
- * @method showPopup
- * @return {App.ModalPopup}
- */
- goToWidgetsBrowser: function () {
- var self = this;
-
- return App.ModalPopup.show({
- header: Em.I18n.t('dashboard.widgets.browser.header'),
-
- classNames: ['common-modal-wrapper', 'widgets-browser-popup'],
- modalDialogClasses: ['modal-lg'],
- onPrimary: function () {
- this.hide();
- self.set('isAllSharedWidgetsLoaded', false);
- self.set('allSharedWidgets', []);
- self.set('isMineWidgetsLoaded', false);
- self.set('mineWidgets', []);
- },
- autoHeight: false,
- isHideBodyScroll: false,
- footerClass: Ember.View.extend({
- templateName: require('templates/common/modal_popups/widget_browser_footer'),
- isShowMineOnly: false,
- onPrimary: function() {
- this.get('parentView').onPrimary();
- }
- }),
- isShowMineOnly: false,
- bodyClass: Ember.View.extend({
- templateName: require('templates/common/modal_popups/widget_browser_popup'),
- controller: self,
- willInsertElement: function () {
- this.get('controller').loadAllSharedWidgets();
- this.get('controller').loadMineWidgets();
- },
-
- isLoaded: Em.computed.and('controller.isAllSharedWidgetsLoaded', 'controller.isMineWidgetsLoaded'),
-
- isWidgetEmptyList: Em.computed.empty('filteredContent'),
-
- activeService: '',
- activeStatus: '',
-
- content: function () {
- if (this.get('parentView.isShowMineOnly')) {
- return this.get('controller.mineWidgets');
- } else {
- // merge my widgets and all shared widgets, no duplicated is allowed
- var content = [];
- var widgetMap = {};
- var allWidgets = this.get('controller.allSharedWidgets').concat(this.get('controller.mineWidgets'));
- allWidgets.forEach(function(widget) {
- if (!widgetMap[widget.get("id")]) {
- content.pushObject(widget);
- widgetMap[widget.get("id")] = true;
- }
- });
- return content;
- }
- }.property('controller.allSharedWidgets.length', 'controller.isAllSharedWidgetsLoaded',
- 'controller.mineWidgets.length', 'controller.isMineWidgetsLoaded', 'parentView.isShowMineOnly'),
-
- /**
- * displaying content filtered by service name and status.
- */
- filteredContent: function () {
- var activeService = this.get('activeService') ? this.get('activeService') : this.get('controller.content.serviceName');
- var result = [];
- this.get('content').forEach(function (widget) {
- if (widget.get('serviceName').indexOf(activeService) >= 0) {
- result.pushObject(widget);
- }
- });
- return result;
- }.property('content', 'activeService', 'activeStatus'),
-
- /**
- * service name filter
- */
- services: function () {
- var view = this;
- var services = App.Service.find().filter(function(item){
- var stackService = App.StackService.find().findProperty('serviceName', item.get('serviceName'));
- return stackService.get('isServiceWithWidgets');
- });
- return services.map(function (service) {
- return Em.Object.create({
- value: service.get('serviceName'),
- label: service.get('displayName'),
- isActive: function () {
- var activeService = view.get('activeService') ? view.get('activeService') : view.get('controller.content.serviceName');
- return this.get('value') == activeService;
- }.property('value', 'view.activeService')
- })
- });
- }.property('activeService'),
-
- filterByService: function (event) {
- this.set('activeService', event.context);
- },
-
- createWidget: function () {
- this.get('parentView').onPrimary();
- this.get('controller').createWidget();
- },
-
- ensureTooltip: function () {
- Em.run.later(this, function () {
- App.tooltip($("[rel='shared-icon-tooltip']"));
- }, 1000);
- }.observes('activeService', 'parentView.isShowMineOnly'),
-
- didInsertElement: function () {
- this.ensureTooltip();
- }
- })
- });
- },
-
goToView: function(event) {
App.router.route(event.context.get('internalAmbariUrl'));
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/controllers/main/service/widgets/create/wizard_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/widgets/create/wizard_controller.js b/ambari-web/app/controllers/main/service/widgets/create/wizard_controller.js
index e833ead..a46c5e4 100644
--- a/ambari-web/app/controllers/main/service/widgets/create/wizard_controller.js
+++ b/ambari-web/app/controllers/main/service/widgets/create/wizard_controller.js
@@ -417,7 +417,7 @@ App.WidgetWizardController = App.WizardController.extend({
var self = this;
var successCallBack = function() {
self.get('popup').hide();
- App.router.transitionTo('main.services.service.summary', service);
+ App.router.transitionTo('main.services.service.metrics', service);
App.get('router.updateController').updateAll();
};
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 7cde3d1..3c4f038 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -2199,6 +2199,7 @@ Em.I18n.translations = {
'services.service.info.menu.summary':'Summary',
'services.service.info.menu.configs':'Configs',
'services.service.info.menu.heatmaps':'Heatmaps',
+ 'services.service.info.menu.metrics':'Metrics',
'services.service.info.summary.hostsRunningMonitor':'{0}/{1}',
'services.service.info.summary.serversHostCount':'{0} more',
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/styles/enhanced_service_dashboard.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/enhanced_service_dashboard.less b/ambari-web/app/styles/enhanced_service_dashboard.less
index 34a4763..00b46a8 100644
--- a/ambari-web/app/styles/enhanced_service_dashboard.less
+++ b/ambari-web/app/styles/enhanced_service_dashboard.less
@@ -26,6 +26,10 @@
clear: both;
+ .service-widgets-box {
+ padding: 10px 1.1% 10px 1.1%;
+ }
+
#add-widget-action-box {
background-color: @add-widget-btn-color;
width: 97%;
@@ -69,7 +73,7 @@
width: 93%;
}
.span2p4 {
- width: 22.7%;
+ width: 24.4%;
height: 100%;
background-color: white;
margin: 5px 0 5px 5px;
@@ -188,6 +192,26 @@
}
}
+@media (min-width: 1200px) {
+
+ .service-metrics-block .service-widgets-box {
+ padding: 10px 1.3% 10px 1.3%;
+ }
+
+ #widget_layout .span2p4 {
+ width: 24.5%;
+ *width: 24.5%;
+ }
+}
+
+@media (min-width: 1500px) {
+
+ #widget_layout .span2p4 {
+ width: 24.6%;
+ *width: 24.6%;
+ }
+}
+
#widget-preview {
max-width: 200px;
margin: auto;
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/templates/main/service/info/metrics.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/metrics.hbs b/ambari-web/app/templates/main/service/info/metrics.hbs
new file mode 100644
index 0000000..6834c06
--- /dev/null
+++ b/ambari-web/app/templates/main/service/info/metrics.hbs
@@ -0,0 +1,104 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+{{#if view.serviceHasMetrics}}
+ <div class="service-metrics-block">
+ <div class="panel panel-default">
+ <div class="panel-heading">
+ <div class="row">
+ <div class="col-md-7 col-lg-7">
+ <h4 class="panel-title">{{t services.service.metrics}}</h4>
+ </div>
+ <div class="col-md-5 col-lg-5">
+ {{#if showTimeRangeControl}}
+ {{view view.timeRangeListView}}
+ {{/if}}
+ {{#if isServiceWithEnhancedWidgets}}
+ {{#if isAmbariMetricsInstalled}}
+ <div class="btn-group pull-right actions">
+ <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">
+ {{t common.actions}} <span class="caret"></span>
+ </button>
+ <ul class="dropdown-menu">
+ {{#each option in view.widgetActions}}
+ <li {{bindAttr class="option.layouts:dropdown-submenu"}}>
+ {{#if option.isAction}}
+ <a href="#" {{action doWidgetAction option.action target="view"}}>
+ <i {{bindAttr class="option.class"}}></i>
+ {{option.label}}
+ </a>
+ {{#if option.layouts}}
+ <ul class="dropdown-menu">
+ {{#each layout in option.layouts}}
+ <li>
+ <a href="javascript:void(0);">
+ {{layout.layoutName}}
+ </a>
+ </li>
+ {{/each}}
+ </ul>
+ {{/if}}
+ {{/if}}
+ </li>
+ {{/each}}
+ </ul>
+ </div>
+ {{/if}}
+ {{/if}}
+ </div>
+ </div>
+ </div>
+ <div class="panel-body service-widgets-box">
+ {{#if isServiceWithEnhancedWidgets}}
+ <div id="widget_layout" class="thumbnails">
+ {{#each widget in controller.widgets}}
+ <div class="widget span2p4" {{bindAttr id="widget.id"}}>
+ {{view widget.viewClass contentBinding="widget" idBinding="widget.id"}}
+ </div>
+ {{/each}}
+ {{#if isAmbariMetricsInstalled}}
+ <div class="span2p4">
+ <button id="add-widget-action-box" {{action "goToWidgetsBrowser" controller.content
+ target="controller"}}
+ rel="add-widget-tooltip" {{translateAttr
+ data-original-title="dashboard.widgets.addButton.tooltip"}}>
+ <i class="glyphicon glyphicon-plus"></i></button>
+ </div>
+ {{/if}}
+ </div>
+ {{/if}}
+ <table class="graphs">
+ {{#each graphs in view.serviceMetricGraphs}}
+ <tr>
+ {{#each graph in graphs}}
+ <td>
+ <div>
+ {{view graph}}
+ </div>
+ </td>
+ {{/each}}
+ </tr>
+ {{/each}}
+ </table>
+ </div>
+ </div>
+ </div>
+{{/if}}
+
+
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/templates/main/service/info/summary.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/summary.hbs b/ambari-web/app/templates/main/service/info/summary.hbs
index 075cae0..b0c9e7f 100644
--- a/ambari-web/app/templates/main/service/info/summary.hbs
+++ b/ambari-web/app/templates/main/service/info/summary.hbs
@@ -134,90 +134,6 @@
</div>
</div>
</div>
- {{! widgets in the metrics panel are loaded seperately from summary page text information
- and does not get block due to any global API poller information }}
- {{#if view.isServiceMetricLoaded}}
- <div class="service-metrics-block">
- <div class="panel panel-default">
- <div class="panel-heading">
- <div class="row">
- <div class="col-md-7 col-lg-7">
- <h4 class="panel-title">{{t services.service.metrics}}</h4>
- </div>
- <div class="col-md-5 col-lg-5">
- {{#if showTimeRangeControl}}
- {{view view.timeRangeListView}}
- {{/if}}
- {{#if isServiceWithEnhancedWidgets}}
- {{#if isAmbariMetricsInstalled}}
- <div class="btn-group pull-right actions">
- <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">
- {{t common.actions}} <span class="caret"></span>
- </button>
- <ul class="dropdown-menu">
- {{#each option in view.widgetActions}}
- <li {{bindAttr class="option.layouts:dropdown-submenu"}}>
- {{#if option.isAction}}
- <a href="#" {{action doWidgetAction option.action target="view"}}>
- <i {{bindAttr class="option.class"}}></i>
- {{option.label}}
- </a>
- {{#if option.layouts}}
- <ul class="dropdown-menu">
- {{#each layout in option.layouts}}
- <li>
- <a href="javascript:void(0);">
- {{layout.layoutName}}
- </a>
- </li>
- {{/each}}
- </ul>
- {{/if}}
- {{/if}}
- </li>
- {{/each}}
- </ul>
- </div>
- {{/if}}
- {{/if}}
- </div>
- </div>
- </div>
- <div class="panel-body">
- {{#if isServiceWithEnhancedWidgets}}
- <div id="widget_layout" class="thumbnails">
- {{#each widget in controller.widgets}}
- <div class="widget span2p4" {{bindAttr id="widget.id"}}>
- {{view widget.viewClass contentBinding="widget" idBinding="widget.id"}}
- </div>
- {{/each}}
- {{#if isAmbariMetricsInstalled}}
- <div class="span2p4">
- <button id="add-widget-action-box"
- {{action "goToWidgetsBrowser" controller.content target="controller"}}
- rel="add-widget-tooltip" {{translateAttr data-original-title="dashboard.widgets.addButton.tooltip"}}>
- <i class="glyphicon glyphicon-plus"></i></button>
- </div>
- {{/if}}
- </div>
- {{/if}}
- <table class="graphs">
- {{#each graphs in view.serviceMetricGraphs}}
- <tr>
- {{#each graph in graphs}}
- <td>
- <div>
- {{view graph}}
- </div>
- </td>
- {{/each}}
- </tr>
- {{/each}}
- </table>
- </div>
- </div>
- </div>
- {{/if}}
</div>
{{#if view.collapsedSections}}
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/templates/main/service/item.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/item.hbs b/ambari-web/app/templates/main/service/item.hbs
index e942eb1..df26a9d 100644
--- a/ambari-web/app/templates/main/service/item.hbs
+++ b/ambari-web/app/templates/main/service/item.hbs
@@ -16,7 +16,10 @@
* limitations under the License.
}}
-{{view App.MainServiceInfoMenuView configTabBinding="view.hasConfigTab" heatmapTabBinding="view.hasHeatmapTab"}}
+{{view App.MainServiceInfoMenuView
+ configTabBinding="view.hasConfigTab"
+ heatmapTabBinding="view.hasHeatmapTab"
+ metricTabBinding="view.hasMetricTab"}}
{{#isAuthorized "SERVICE.RUN_CUSTOM_COMMAND, SERVICE.RUN_SERVICE_CHECK, SERVICE.START_STOP, SERVICE.TOGGLE_MAINTENANCE, SERVICE.ENABLE_HA"}}
<div class="service-button">
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/views.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views.js b/ambari-web/app/views.js
index 8031434..50729a7 100644
--- a/ambari-web/app/views.js
+++ b/ambari-web/app/views.js
@@ -348,6 +348,7 @@ require('views/main/charts/heatmap/heatmap_rack');
require('views/main/charts/heatmap/heatmap_host');
require('views/main/charts/heatmap/heatmap_host_detail');
require('views/main/service/info/heatmap_view');
+require('views/main/service/info/metrics_view');
require('views/main/service/widgets/create/wizard_view');
require('views/main/service/widgets/create/step1_view');
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/views/main/service/info/menu.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/menu.js b/ambari-web/app/views/main/service/info/menu.js
index 3533a72..89d5401 100644
--- a/ambari-web/app/views/main/service/info/menu.js
+++ b/ambari-web/app/views/main/service/info/menu.js
@@ -45,6 +45,13 @@ App.MainServiceInfoMenuView = Em.CollectionView.extend({
routing: 'configs'
});
}
+ if (this.get('metricTab')) {
+ menuItems.push({
+ label: Em.I18n.t('services.service.info.menu.metrics'),
+ id: 'metrics-service-tab',
+ routing: 'metrics'
+ });
+ }
return menuItems;
}.property(),
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/views/main/service/info/metrics_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/metrics_view.js b/ambari-web/app/views/main/service/info/metrics_view.js
new file mode 100644
index 0000000..161dce1
--- /dev/null
+++ b/ambari-web/app/views/main/service/info/metrics_view.js
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+var App = require('app');
+var misc = require('utils/misc');
+require('views/main/service/service');
+require('data/service_graph_config');
+
+App.MainServiceInfoMetricsView = Em.View.extend(App.Persist, App.TimeRangeMixin, {
+ templateName: require('templates/main/service/info/metrics'),
+ /**
+ * @property {Number} chunkSize - number of columns in Metrics section
+ */
+ chunkSize: 5,
+
+ service: null,
+
+ svc: function () {
+ var svc = this.get('controller.content');
+ var svcName = svc.get('serviceName');
+ if (svcName) {
+ switch (svcName.toLowerCase()) {
+ case 'hdfs':
+ svc = App.HDFSService.find().objectAt(0);
+ break;
+ case 'yarn':
+ svc = App.YARNService.find().objectAt(0);
+ break;
+ case 'hbase':
+ svc = App.HBaseService.find().objectAt(0);
+ break;
+ case 'flume':
+ svc = App.FlumeService.find().objectAt(0);
+ break;
+ default:
+ break;
+ }
+ }
+ return svc;
+ }.property('controller.content.serviceName').volatile(),
+
+ getServiceModel: function (serviceName) {
+ var extended = App.Service.extendedModel[serviceName];
+ if (extended) {
+ return App[extended].find().objectAt(0);
+ }
+ return App.Service.find(serviceName);
+ },
+
+ serviceName: Em.computed.alias('service.serviceName'),
+
+ /**
+ * Contains graphs for this particular service
+ */
+ serviceMetricGraphs: [],
+
+ /**
+ * @type {boolean}
+ * @default false
+ */
+ serviceHasMetrics: false,
+
+ /**
+ * Key-name to store time range in Persist
+ * @type {string}
+ */
+ persistKey: Em.computed.format('time-range-service-{0}', 'service.serviceName'),
+
+ didInsertElement: function () {
+ var svcName = this.get('controller.content.serviceName');
+ this.set('service', this.getServiceModel(svcName));
+ var isMetricsSupported = svcName !== 'STORM' || App.get('isStormMetricsSupported');
+
+ this.get('controller').getActiveWidgetLayout();
+ if (App.get('supports.customizedWidgetLayout')) {
+ this.get('controller').loadWidgetLayouts();
+ }
+
+ if (svcName && isMetricsSupported) {
+ var allServices = require('data/service_graph_config');
+ this.constructGraphObjects(allServices[svcName.toLowerCase()]);
+ }
+ this.makeSortable();
+ this.addWidgetTooltip();
+ },
+
+ addWidgetTooltip: function() {
+ Em.run.later(this, function () {
+ App.tooltip($("[rel='add-widget-tooltip']"));
+ // enable description show up on hover
+ $('.img-thumbnail').hoverIntent(function() {
+ if ($(this).is('hover')) {
+ $(this).find('.hidden-description').delay(1000).fadeIn(200).end();
+ }
+ }, function() {
+ $(this).find('.hidden-description').stop().hide().end();
+ });
+ }, 1000);
+ },
+
+ willDestroyElement: function() {
+ $("[rel='add-widget-tooltip']").tooltip('destroy');
+ $('.img-thumbnail').off();
+ $('#widget_layout').sortable('destroy');
+ $('.widget.span2p4').detach().remove();
+ this.get('serviceMetricGraphs').clear();
+ this.set('service', null);
+ },
+
+ /*
+ * Find the graph class associated with the graph name, and split
+ * the array into sections of 5 for displaying on the page
+ * (will only display rows with 5 items)
+ */
+ constructGraphObjects: function (graphNames) {
+ var self = this,
+ stackService = App.StackService.find(this.get('controller.content.serviceName'));
+
+ if (!graphNames && !stackService.get('isServiceWithWidgets')) {
+ this.get('serviceMetricGraphs').clear();
+ this.set('serviceHasMetrics', false);
+ return;
+ }
+
+ // load time range(currentTimeRangeIndex) for current service from server
+ this.getUserPref(self.get('persistKey')).complete(function () {
+ var result = [], graphObjects = [], chunkSize = self.get('chunkSize');
+ if (graphNames) {
+ graphNames.forEach(function (graphName) {
+ graphObjects.push(App["ChartServiceMetrics" + graphName].extend());
+ });
+ }
+ while (graphObjects.length) {
+ result.push(graphObjects.splice(0, chunkSize));
+ }
+ self.set('serviceMetricGraphs', result);
+ self.set('serviceHasMetrics', true);
+ });
+ },
+
+ getUserPrefSuccessCallback: function (response, request) {
+ if (response) {
+ this.set('currentTimeRangeIndex', response);
+ }
+ },
+
+ getUserPrefErrorCallback: function (request) {
+ if (request.status === 404) {
+ this.postUserPref(this.get('persistKey'), 0);
+ this.set('currentTimeRangeIndex', 0);
+ }
+ },
+
+ /**
+ * list of static actions of widget
+ * @type {Array}
+ */
+ staticGeneralWidgetActions: [
+ Em.Object.create({
+ label: Em.I18n.t('dashboard.widgets.actions.browse'),
+ class: 'glyphicon glyphicon-th',
+ action: 'goToWidgetsBrowser',
+ isAction: true
+ })
+ ],
+
+ /**
+ *list of static actions of widget accessible to Admin/Operator privelege
+ * @type {Array}
+ */
+
+ staticAdminPrivelegeWidgetActions: [
+ Em.Object.create({
+ label: Em.I18n.t('dashboard.widgets.create'),
+ class: 'glyphicon glyphicon-plus',
+ action: 'createWidget',
+ isAction: true
+ })
+ ],
+
+ /**
+ * List of static actions related to widget layout
+ */
+ staticWidgetLayoutActions: [
+ Em.Object.create({
+ label: Em.I18n.t('dashboard.widgets.layout.save'),
+ class: 'glyphicon glyphicon-download-alt',
+ action: 'saveLayout',
+ isAction: true
+ }),
+ Em.Object.create({
+ label: Em.I18n.t('dashboard.widgets.layout.import'),
+ class: 'glyphicon glyphicon-file',
+ isAction: true,
+ layouts: App.WidgetLayout.find()
+ })
+ ],
+
+ /**
+ * @type {Array}
+ */
+ widgetActions: function() {
+ var options = [];
+ if (App.isAuthorized('SERVICE.MODIFY_CONFIGS')) {
+ if (App.supports.customizedWidgetLayout) {
+ options.pushObjects(this.get('staticWidgetLayoutActions'));
+ }
+ options.pushObjects(this.get('staticAdminPrivelegeWidgetActions'));
+ }
+ options.pushObjects(this.get('staticGeneralWidgetActions'));
+ return options;
+ }.property(''),
+
+ /**
+ * call action function defined in controller
+ * @param event
+ */
+ doWidgetAction: function(event) {
+ if($.isFunction(this.get('controller')[event.context])) {
+ this.get('controller')[event.context].apply(this.get('controller'));
+ }
+ },
+
+ /**
+ * onclick handler for a time range option
+ * @param {object} event
+ */
+ setTimeRange: function (event) {
+ var graphs = this.get('controller.widgets').filterProperty('widgetType', 'GRAPH'),
+ callback = function () {
+ graphs.forEach(function (widget) {
+ widget.set('properties.time_range', event.context.value);
+ });
+ };
+ this._super(event, callback);
+
+ // Preset time range is specified by user
+ if (event.context.value !== '0') {
+ callback();
+ }
+ },
+
+ /**
+ * Define if some widget is currently moving
+ * @type {boolean}
+ */
+ isMoving: false,
+
+ /**
+ * Make widgets' list sortable on New Dashboard style
+ */
+ makeSortable: function () {
+ var self = this;
+ $('html').on('DOMNodeInserted', '#widget_layout', function () {
+ $(this).sortable({
+ items: "> div",
+ cursor: "move",
+ tolerance: "pointer",
+ scroll: false,
+ update: function () {
+ var widgets = misc.sortByOrder($("#widget_layout .widget").map(function () {
+ return this.id;
+ }), self.get('controller.widgets'));
+ self.get('controller').saveWidgetLayout(widgets);
+ },
+ activate: function () {
+ self.set('isMoving', true);
+ },
+ deactivate: function () {
+ self.set('isMoving', false);
+ }
+ }).disableSelection();
+ $('html').off('DOMNodeInserted', '#widget_layout');
+ });
+ }
+});
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/views/main/service/info/summary.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/summary.js b/ambari-web/app/views/main/service/info/summary.js
index 91b2ca3..a4769e6 100644
--- a/ambari-web/app/views/main/service/info/summary.js
+++ b/ambari-web/app/views/main/service/info/summary.js
@@ -21,13 +21,10 @@ var misc = require('utils/misc');
require('views/main/service/service');
require('data/service_graph_config');
-App.MainServiceInfoSummaryView = Em.View.extend(App.Persist, App.TimeRangeMixin, {
+App.MainServiceInfoSummaryView = Em.View.extend({
templateName: require('templates/main/service/info/summary'),
- /**
- * @property {Number} chunkSize - number of columns in Metrics section
- */
- chunkSize: 5,
- attributes:null,
+
+ attributes: null,
/**
* Contain array with list of master components from <code>App.Service.hostComponets</code> which are
@@ -165,6 +162,47 @@ App.MainServiceInfoSummaryView = Em.View.extend(App.Persist, App.TimeRangeMixin,
Em.run.once(self, 'setComponentsContent');
}.observes('service.hostComponents.length', 'service.slaveComponents.@each.totalCount', 'service.clientComponents.@each.totalCount'),
+ loadServiceSummary: function () {
+ var serviceName = this.get('serviceName');
+ var serviceSummaryView = null;
+
+ if (!serviceName) {
+ return;
+ }
+
+ if (this.get('oldServiceName')) {
+ // do not delete it!
+ return;
+ }
+
+ var customServiceView = this.get('serviceCustomViewsMap')[serviceName];
+ if (customServiceView) {
+ serviceSummaryView = customServiceView.extend({
+ service: this.get('service')
+ });
+ } else {
+ serviceSummaryView = Em.View.extend(App.MainDashboardServiceViewWrapper, {
+ templateName: this.get('templatePathPrefix') + 'base'
+ });
+ }
+ this.set('serviceSummaryView', serviceSummaryView);
+ this.set('oldServiceName', serviceName);
+ }.observes('serviceName'),
+
+ didInsertElement: function () {
+ this._super();
+ var svcName = this.get('controller.content.serviceName');
+ this.set('service', this.getServiceModel(svcName));
+ App.loadTimer.finish('Service Summary Page');
+ },
+
+ willDestroyElement: function() {
+ this.set('service', null);
+ this.get('mastersObj').clear();
+ this.get('slavesObj').clear();
+ this.get('clientObj').clear();
+ },
+
setComponentsContent: function() {
Em.run.next(function() {
if (Em.isNone(this.get('service'))) {
@@ -372,270 +410,5 @@ App.MainServiceInfoSummaryView = Em.View.extend(App.Persist, App.TimeRangeMixin,
rollingRestartStaleConfigSlaveComponents: function (componentName) {
batchUtils.launchHostComponentRollingRestart(componentName.context, this.get('service.displayName'), this.get('service.passiveState') === "ON", true);
- },
-
- /*
- * Find the graph class associated with the graph name, and split
- * the array into sections of 5 for displaying on the page
- * (will only display rows with 5 items)
- */
- constructGraphObjects: function (graphNames) {
- var self = this,
- stackService = App.StackService.find(this.get('controller.content.serviceName'));
-
- if (!graphNames && !stackService.get('isServiceWithWidgets')) {
- this.get('serviceMetricGraphs').clear();
- this.set('isServiceMetricLoaded', false);
- return;
- }
-
- // load time range(currentTimeRangeIndex) for current service from server
- this.getUserPref(self.get('persistKey')).complete(function () {
- var result = [], graphObjects = [], chunkSize = self.get('chunkSize');
- if (graphNames) {
- graphNames.forEach(function (graphName) {
- graphObjects.push(App["ChartServiceMetrics" + graphName].extend());
- });
- }
- while (graphObjects.length) {
- result.push(graphObjects.splice(0, chunkSize));
- }
- self.set('serviceMetricGraphs', result);
- self.set('isServiceMetricLoaded', true);
- });
- },
-
- /**
- * Contains graphs for this particular service
- */
- serviceMetricGraphs: [],
-
- /**
- * @type {boolean}
- * @default false
- */
- isServiceMetricLoaded: false,
-
- /**
- * Key-name to store time range in Persist
- * @type {string}
- */
- persistKey: Em.computed.format('time-range-service-{0}', 'service.serviceName'),
-
- getUserPrefSuccessCallback: function (response, request) {
- if (response) {
- this.set('currentTimeRangeIndex', response);
- }
- },
-
- getUserPrefErrorCallback: function (request) {
- if (request.status === 404) {
- this.postUserPref(this.get('persistKey'), 0);
- this.set('currentTimeRangeIndex', 0);
- }
- },
-
- /**
- * list of static actions of widget
- * @type {Array}
- */
- staticGeneralWidgetActions: [
- Em.Object.create({
- label: Em.I18n.t('dashboard.widgets.actions.browse'),
- class: 'glyphicon glyphicon-th',
- action: 'goToWidgetsBrowser',
- isAction: true
- })
- ],
-
- /**
- *list of static actions of widget accessible to Admin/Operator privelege
- * @type {Array}
- */
-
- staticAdminPrivelegeWidgetActions: [
- Em.Object.create({
- label: Em.I18n.t('dashboard.widgets.create'),
- class: 'glyphicon glyphicon-plus',
- action: 'createWidget',
- isAction: true
- })
- ],
-
- /**
- * List of static actions related to widget layout
- */
- staticWidgetLayoutActions: [
- Em.Object.create({
- label: Em.I18n.t('dashboard.widgets.layout.save'),
- class: 'glyphicon glyphicon-download-alt',
- action: 'saveLayout',
- isAction: true
- }),
- Em.Object.create({
- label: Em.I18n.t('dashboard.widgets.layout.import'),
- class: 'glyphicon glyphicon-file',
- isAction: true,
- layouts: App.WidgetLayout.find()
- })
- ],
-
- /**
- * @type {Array}
- */
- widgetActions: function() {
- var options = [];
- if (App.isAuthorized('SERVICE.MODIFY_CONFIGS')) {
- if (App.supports.customizedWidgetLayout) {
- options.pushObjects(this.get('staticWidgetLayoutActions'));
- }
- options.pushObjects(this.get('staticAdminPrivelegeWidgetActions'));
- }
- options.pushObjects(this.get('staticGeneralWidgetActions'));
- return options;
- }.property(''),
-
- /**
- * call action function defined in controller
- * @param event
- */
- doWidgetAction: function(event) {
- if($.isFunction(this.get('controller')[event.context])) {
- this.get('controller')[event.context].apply(this.get('controller'));
- }
- },
-
- /**
- * onclick handler for a time range option
- * @param {object} event
- */
- setTimeRange: function (event) {
- var graphs = this.get('controller.widgets').filterProperty('widgetType', 'GRAPH'),
- callback = function () {
- graphs.forEach(function (widget) {
- widget.set('properties.time_range', event.context.value);
- });
- };
- this._super(event, callback);
-
- // Preset time range is specified by user
- if (event.context.value !== '0') {
- callback();
- }
- },
-
- loadServiceSummary: function () {
- var serviceName = this.get('serviceName');
- var serviceSummaryView = null;
-
- if (!serviceName) {
- return;
- }
-
- if (this.get('oldServiceName')) {
- // do not delete it!
- return;
- }
-
- var customServiceView = this.get('serviceCustomViewsMap')[serviceName];
- if (customServiceView) {
- serviceSummaryView = customServiceView.extend({
- service: this.get('service')
- });
- } else {
- serviceSummaryView = Em.View.extend(App.MainDashboardServiceViewWrapper, {
- templateName: this.get('templatePathPrefix') + 'base'
- });
- }
- this.set('serviceSummaryView', serviceSummaryView);
- this.set('oldServiceName', serviceName);
- }.observes('serviceName'),
-
-
- /**
- * Service metrics panel not displayed when metrics service (ex:Ganglia) is not in stack definition.
- *
- * @type {boolean}
- */
- isNoServiceMetricsService: Em.computed.equal('App.services.serviceMetrics.length', 0),
-
- didInsertElement: function () {
- this._super();
- var svcName = this.get('controller.content.serviceName');
- this.set('service', this.getServiceModel(svcName));
- var isMetricsSupported = svcName !== 'STORM' || App.get('isStormMetricsSupported');
-
- this.get('controller').getActiveWidgetLayout();
- if (App.get('supports.customizedWidgetLayout')) {
- this.get('controller').loadWidgetLayouts();
- }
-
- if (svcName && isMetricsSupported) {
- var allServices = require('data/service_graph_config');
- this.constructGraphObjects(allServices[svcName.toLowerCase()]);
- }
- this.makeSortable();
- this.addWidgetTooltip();
- App.loadTimer.finish('Service Summary Page');
- },
-
- addWidgetTooltip: function() {
- Em.run.later(this, function () {
- App.tooltip($("[rel='add-widget-tooltip']"));
- // enable description show up on hover
- $('.img-thumbnail').hoverIntent(function() {
- if ($(this).is('hover')) {
- $(this).find('.hidden-description').delay(1000).fadeIn(200).end();
- }
- }, function() {
- $(this).find('.hidden-description').stop().hide().end();
- });
- }, 1000);
- },
-
- willDestroyElement: function() {
- $("[rel='add-widget-tooltip']").tooltip('destroy');
- $('.img-thumbnail').off();
- $('#widget_layout').sortable('destroy');
- $('.widget.span2p4').detach().remove();
- this.get('serviceMetricGraphs').clear();
- this.set('service', null);
- this.get('mastersObj').clear();
- this.get('slavesObj').clear();
- this.get('clientObj').clear();
- },
-
- /**
- * Define if some widget is currently moving
- * @type {boolean}
- */
- isMoving: false,
-
- /**
- * Make widgets' list sortable on New Dashboard style
- */
- makeSortable: function () {
- var self = this;
- $('html').on('DOMNodeInserted', '#widget_layout', function () {
- $(this).sortable({
- items: "> div",
- cursor: "move",
- tolerance: "pointer",
- scroll: false,
- update: function () {
- var widgets = misc.sortByOrder($("#widget_layout .widget").map(function () {
- return this.id;
- }), self.get('controller.widgets'));
- self.get('controller').saveWidgetLayout(widgets);
- },
- activate: function () {
- self.set('isMoving', true);
- },
- deactivate: function () {
- self.set('isMoving', false);
- }
- }).disableSelection();
- $('html').off('DOMNodeInserted', '#widget_layout');
- });
}
});
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/views/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/item.js b/ambari-web/app/views/main/service/item.js
index 43d75e6..45c783b 100644
--- a/ambari-web/app/views/main/service/item.js
+++ b/ambari-web/app/views/main/service/item.js
@@ -326,6 +326,12 @@ App.MainServiceItemView = Em.View.extend({
return App.get('services.servicesWithHeatmapTab').contains(this.get('controller.content.serviceName'));
}.property('controller.content.serviceName', 'App.services.servicesWithHeatmapTab'),
+ hasMetricTab: function() {
+ let serviceName = this.get('controller.content.serviceName');
+ let graphs = require('data/service_graph_config')[serviceName.toLowerCase()];
+ return graphs || App.StackService.find(serviceName).get('isServiceWithWidgets');
+ }.property('controller.content.serviceName'),
+
didInsertElement: function () {
this.get('controller').setStartStopState();
},
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/test/controllers/main/service/info/metric_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/info/metric_test.js b/ambari-web/test/controllers/main/service/info/metric_test.js
new file mode 100644
index 0000000..5ef6279
--- /dev/null
+++ b/ambari-web/test/controllers/main/service/info/metric_test.js
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+require('controllers/main/service/info/metric');
+var testHelpers = require('test/helpers');
+function getController() {
+ return App.MainServiceInfoMetricsController.create();
+}
+
+describe('App.MainServiceInfoMetricsController', function () {
+
+ var controller;
+
+ beforeEach(function () {
+ controller = App.MainServiceInfoMetricsController.create();
+ });
+
+ App.TestAliases.testAsComputedOr(getController(), 'showTimeRangeControl', ['!isServiceWithEnhancedWidgets', 'someWidgetGraphExists']);
+
+
+ describe("#getActiveWidgetLayout() for Enhanced Dashboard", function () {
+
+ it("make GET call", function () {
+ controller.reopen({
+ isServiceWithEnhancedWidgets: true,
+ content: Em.Object.create({serviceName: 'HDFS'})
+ });
+ controller.getActiveWidgetLayout();
+ expect(testHelpers.findAjaxRequest('name', 'widgets.layouts.active.get')).to.exists;
+ });
+ });
+
+ describe("#getActiveWidgetLayoutSuccessCallback()", function () {
+ beforeEach(function () {
+ sinon.stub( App.widgetLayoutMapper, 'map');
+ sinon.stub( App.widgetMapper, 'map');
+ });
+ afterEach(function () {
+ App.widgetLayoutMapper.map.restore();
+ App.widgetMapper.map.restore();
+ });
+ it("isWidgetLayoutsLoaded should be set to true", function () {
+ controller.reopen({
+ isServiceWithEnhancedWidgets: true,
+ content: Em.Object.create({serviceName: 'HDFS'})
+ });
+ controller.getActiveWidgetLayoutSuccessCallback({items:[{
+ WidgetLayoutInfo: {}
+ }]});
+ expect(controller.get('isWidgetsLoaded')).to.be.true;
+ });
+
+ });
+
+ describe("#hideWidgetSuccessCallback()", function () {
+ beforeEach(function () {
+ sinon.stub(App.widgetLayoutMapper, 'map');
+ sinon.stub(controller, 'propertyDidChange');
+ var params = {
+ data: {
+ WidgetLayoutInfo: {
+ widgets: [
+ {id: 1}
+ ]
+ }
+ }
+ };
+ controller.hideWidgetSuccessCallback({}, {}, params);
+ });
+ afterEach(function () {
+ App.widgetLayoutMapper.map.restore();
+ controller.propertyDidChange.restore();
+ });
+ it("mapper is called with valid data", function () {
+ expect(App.widgetLayoutMapper.map.calledWith({
+ items: [{
+ WidgetLayoutInfo: {
+ widgets: [
+ {
+ WidgetInfo: {
+ id: 1
+ }
+ }
+ ]
+ }
+ }]
+ })).to.be.true;
+ });
+ it('`widgets` is forced to be recalculated', function () {
+ expect(controller.propertyDidChange.calledWith('widgets')).to.be.true;
+ });
+ });
+
+});
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/test/controllers/main/service/info/summary_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/info/summary_test.js b/ambari-web/test/controllers/main/service/info/summary_test.js
index 51dd595..e5cc32a 100644
--- a/ambari-web/test/controllers/main/service/info/summary_test.js
+++ b/ambari-web/test/controllers/main/service/info/summary_test.js
@@ -18,7 +18,6 @@
var App = require('app');
require('controllers/main/service/info/summary');
-var testHelpers = require('test/helpers');
function getController() {
return App.MainServiceInfoSummaryController.create();
}
@@ -31,8 +30,6 @@ describe('App.MainServiceInfoSummaryController', function () {
controller = App.MainServiceInfoSummaryController.create();
});
-App.TestAliases.testAsComputedOr(getController(), 'showTimeRangeControl', ['!isServiceWithEnhancedWidgets', 'someWidgetGraphExists']);
-
describe('#setRangerPlugins', function () {
var cases = [
@@ -184,77 +181,4 @@ App.TestAliases.testAsComputedOr(getController(), 'showTimeRangeControl', ['!isS
});
- describe("#getActiveWidgetLayout() for Enhanced Dashboard", function () {
-
- it("make GET call", function () {
- var _controller = App.MainServiceInfoSummaryController.create({
- isServiceWithEnhancedWidgets: true,
- content: Em.Object.create({serviceName: 'HDFS'})
- });
- _controller.getActiveWidgetLayout();
- expect(testHelpers.findAjaxRequest('name', 'widgets.layouts.active.get')).to.exists;
- });
- });
-
- describe("#getActiveWidgetLayoutSuccessCallback()", function () {
- beforeEach(function () {
- sinon.stub( App.widgetLayoutMapper, 'map');
- sinon.stub( App.widgetMapper, 'map');
- });
- afterEach(function () {
- App.widgetLayoutMapper.map.restore();
- App.widgetMapper.map.restore();
- });
- it("isWidgetLayoutsLoaded should be set to true", function () {
- var _controller = App.MainServiceInfoSummaryController.create({
- isServiceWithEnhancedWidgets: true,
- content: Em.Object.create({serviceName: 'HDFS'})
- });
- _controller.getActiveWidgetLayoutSuccessCallback({items:[{
- WidgetLayoutInfo: {}
- }]});
- expect(_controller.get('isWidgetsLoaded')).to.be.true;
- });
-
- });
-
- describe("#hideWidgetSuccessCallback()", function () {
- beforeEach(function () {
- sinon.stub(App.widgetLayoutMapper, 'map');
- sinon.stub(controller, 'propertyDidChange');
- var params = {
- data: {
- WidgetLayoutInfo: {
- widgets: [
- {id: 1}
- ]
- }
- }
- };
- controller.hideWidgetSuccessCallback({}, {}, params);
- });
- afterEach(function () {
- App.widgetLayoutMapper.map.restore();
- controller.propertyDidChange.restore();
- });
- it("mapper is called with valid data", function () {
- expect(App.widgetLayoutMapper.map.calledWith({
- items: [{
- WidgetLayoutInfo: {
- widgets: [
- {
- WidgetInfo: {
- id: 1
- }
- }
- ]
- }
- }]
- })).to.be.true;
- });
- it('`widgets` is forced to be recalculated', function () {
- expect(controller.propertyDidChange.calledWith('widgets')).to.be.true;
- });
- });
-
});
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/test/views/main/service/info/metrics_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/service/info/metrics_view_test.js b/ambari-web/test/views/main/service/info/metrics_view_test.js
new file mode 100644
index 0000000..916d451
--- /dev/null
+++ b/ambari-web/test/views/main/service/info/metrics_view_test.js
@@ -0,0 +1,334 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+var App = require('app');
+require('views/main/service/info/metrics_view');
+
+describe('App.MainServiceInfoMetricsView', function() {
+
+ var view = App.MainServiceInfoMetricsView.create({
+ controller: Em.Object.create({
+ content: Em.Object.create({
+ id: 'HDFS',
+ serviceName: 'HDFS',
+ hostComponents: []
+ }),
+ getActiveWidgetLayout: Em.K,
+ loadWidgetLayouts: Em.K
+ }),
+ service: Em.Object.create()
+ });
+
+ describe("#getServiceModel()", function() {
+
+ beforeEach(function() {
+ sinon.stub(App.Service, 'find').returns({serviceName: 'S1'});
+ sinon.stub(App.HDFSService, 'find').returns([{serviceName: 'HDFS'}]);
+ });
+ afterEach(function() {
+ App.Service.find.restore();
+ App.HDFSService.find.restore();
+ });
+
+ it("HDFS service", function() {
+ expect(view.getServiceModel('HDFS')).to.eql({serviceName: 'HDFS'});
+ });
+
+ it("Simple model service", function() {
+ expect(view.getServiceModel('S1')).to.eql({serviceName: 'S1'});
+ });
+ });
+
+ describe("#constructGraphObjects()", function() {
+ var mock = Em.Object.create({
+ isServiceWithWidgets: false
+ });
+
+ beforeEach(function() {
+ sinon.stub(App.StackService, 'find').returns(mock);
+ sinon.stub(view, 'getUserPref').returns({
+ complete: function(callback){callback();}
+ })
+ });
+ afterEach(function() {
+ App.StackService.find.restore();
+ view.getUserPref.restore();
+ });
+
+ it("metrics not loaded", function() {
+ mock.set('isServiceWithWidgets', false);
+ view.constructGraphObjects(null);
+ expect(view.get('serviceHasMetrics')).to.be.false;
+ expect(view.getUserPref.called).to.be.false;
+ });
+
+ it("metrics loaded", function() {
+ App.ChartServiceMetricsG1 = Em.Object.extend();
+ mock.set('isServiceWithWidgets', true);
+ view.constructGraphObjects(['G1']);
+ expect(view.get('serviceHasMetrics')).to.be.true;
+ expect(view.getUserPref.calledOnce).to.be.true;
+ expect(view.get('serviceMetricGraphs')).to.not.be.empty;
+ });
+ });
+
+ describe("#getUserPrefSuccessCallback()", function() {
+
+ it("currentTimeRangeIndex should be set", function() {
+ view.getUserPrefSuccessCallback(1);
+ expect(view.get('currentTimeRangeIndex')).to.equal(1);
+ });
+ });
+
+ describe("#getUserPrefErrorCallback()", function() {
+
+ beforeEach(function() {
+ sinon.stub(view, 'postUserPref');
+ });
+ afterEach(function() {
+ view.postUserPref.restore();
+ });
+
+ it("request.status = 404", function() {
+ view.getUserPrefErrorCallback({status: 404});
+ expect(view.get('currentTimeRangeIndex')).to.equal(0);
+ expect(view.postUserPref.calledOnce).to.be.true;
+ });
+
+ it("request.status = 403", function() {
+ view.getUserPrefErrorCallback({status: 403});
+ expect(view.postUserPref.called).to.be.false;
+ });
+ });
+
+ describe("#widgetActions", function() {
+
+ beforeEach(function() {
+ this.mock = sinon.stub(App, 'isAuthorized');
+ view.setProperties({
+ staticWidgetLayoutActions: [{id: 1}],
+ staticAdminPrivelegeWidgetActions: [{id: 2}],
+ staticGeneralWidgetActions: [{id: 3}]
+ });
+ });
+ afterEach(function() {
+ this.mock.restore();
+ });
+
+ it("not authorized", function() {
+ this.mock.returns(false);
+ view.propertyDidChange('widgetActions');
+ expect(view.get('widgetActions').mapProperty('id')).to.eql([3]);
+ });
+
+ it("is authorized", function() {
+ this.mock.returns(true);
+ App.supports.customizedWidgetLayout = true;
+ view.propertyDidChange('widgetActions');
+ expect(view.get('widgetActions').mapProperty('id')).to.eql([1, 2, 3]);
+ });
+ });
+
+ describe("#doWidgetAction()", function() {
+
+ beforeEach(function() {
+ view.set('controller.action1', Em.K);
+ sinon.stub(view.get('controller'), 'action1');
+ });
+ afterEach(function() {
+ view.get('controller').action1.restore();
+ });
+
+ it("action exist", function() {
+ view.doWidgetAction({context: 'action1'});
+ expect(view.get('controller').action1.calledOnce).to.be.true;
+ });
+ });
+
+ describe("#setTimeRange", function() {
+
+ it("range = 0", function() {
+ var widget = Em.Object.create({
+ widgetType: 'GRAPH',
+ properties: {
+ time_range: '0'
+ }
+ });
+ view.set('controller.widgets', [widget]);
+ view.setTimeRange({context: {value: '0'}});
+ expect(widget.get('properties').time_range).to.be.equal('0')
+ });
+
+ it("range = 1", function() {
+ var widget = Em.Object.create({
+ widgetType: 'GRAPH',
+ properties: {
+ time_range: 0
+ }
+ });
+ view.set('controller.widgets', [widget]);
+ view.setTimeRange({context: {value: '1'}});
+ expect(widget.get('properties').time_range).to.be.equal('1')
+ });
+ });
+
+ describe("#makeSortable()", function() {
+ var mock = {
+ on: function(arg1, arg2, callback) {
+ callback();
+ },
+ off: Em.K,
+ sortable: function() {
+ return {
+ disableSelection: Em.K
+ }
+ }
+ };
+
+ beforeEach(function() {
+ sinon.stub(window, '$').returns(mock);
+ sinon.spy(mock, 'on');
+ sinon.spy(mock, 'off');
+ sinon.spy(mock, 'sortable');
+ view.makeSortable();
+ });
+ afterEach(function() {
+ window.$.restore();
+ mock.on.restore();
+ mock.off.restore();
+ mock.sortable.restore();
+ });
+
+ it("on() should be called", function() {
+ expect(mock.on.calledWith('DOMNodeInserted', '#widget_layout')).to.be.true;
+ });
+
+ it("sortable() should be called", function() {
+ expect(mock.sortable.calledOnce).to.be.true;
+ });
+
+ it("off() should be called", function() {
+ expect(mock.off.calledWith('DOMNodeInserted', '#widget_layout')).to.be.true;
+ });
+ });
+
+ describe('#didInsertElement', function () {
+
+ beforeEach(function () {
+ sinon.stub(view, 'constructGraphObjects', Em.K);
+ this.mock = sinon.stub(App, 'get');
+ sinon.stub(view, 'getServiceModel');
+ sinon.stub(view.get('controller'), 'getActiveWidgetLayout');
+ sinon.stub(view.get('controller'), 'loadWidgetLayouts');
+ sinon.stub(view, 'makeSortable');
+ sinon.stub(view, 'addWidgetTooltip');
+
+ });
+
+ afterEach(function () {
+ view.constructGraphObjects.restore();
+ this.mock.restore();
+ view.getServiceModel.restore();
+ view.get('controller').getActiveWidgetLayout.restore();
+ view.get('controller').loadWidgetLayouts.restore();
+ view.makeSortable.restore();
+ view.addWidgetTooltip.restore();
+ });
+
+ it("getServiceModel should be called", function() {
+ view.didInsertElement();
+ expect(view.getServiceModel.calledOnce).to.be.true;
+ });
+ it("addWidgetTooltip should be called", function() {
+ view.didInsertElement();
+ expect(view.addWidgetTooltip.calledOnce).to.be.true;
+ });
+ it("makeSortable should be called", function() {
+ view.didInsertElement();
+ expect(view.makeSortable.calledOnce).to.be.true;
+ });
+ it("getActiveWidgetLayout should be called", function() {
+ view.didInsertElement();
+ expect(view.get('controller').getActiveWidgetLayout.calledOnce).to.be.true;
+ });
+
+ describe("serviceName is null, metrics not supported, widgets not supported", function() {
+ beforeEach(function () {
+ view.set('controller.content.serviceName', null);
+ this.mock.returns(false);
+ view.didInsertElement();
+ });
+
+ it("loadWidgetLayouts should not be called", function() {
+ expect(view.get('controller').loadWidgetLayouts.called).to.be.false;
+ });
+ it("constructGraphObjects should not be called", function() {
+ expect(view.constructGraphObjects.called).to.be.false;
+ });
+ });
+
+ describe("serviceName is set, metrics is supported, widgets is supported", function() {
+ beforeEach(function () {
+ view.set('controller.content.serviceName', 'S1');
+ this.mock.returns(true);
+ view.didInsertElement();
+ });
+
+ it("loadWidgetLayouts should be called", function() {
+ expect(view.get('controller').loadWidgetLayouts.calledOnce).to.be.true;
+ });
+ it("constructGraphObjects should be called", function() {
+ expect(view.constructGraphObjects.calledOnce).to.be.true;
+ });
+ });
+ });
+
+ describe("#addWidgetTooltip()", function() {
+ var mock = {
+ hoverIntent: Em.K
+ };
+
+ beforeEach(function() {
+ sinon.stub(Em.run, 'later', function(arg1, callback) {
+ callback();
+ });
+ sinon.stub(App, 'tooltip');
+ sinon.stub(window, '$').returns(mock);
+ sinon.spy(mock, 'hoverIntent');
+ view.addWidgetTooltip();
+ });
+ afterEach(function() {
+ Em.run.later.restore();
+ App.tooltip.restore();
+ window.$.restore();
+ mock.hoverIntent.restore();
+ });
+
+ it("Em.run.later should be called", function() {
+ expect(Em.run.later.calledOnce).to.be.true;
+ });
+ it("App.tooltip should be called", function() {
+ expect(App.tooltip.calledOnce).to.be.true;
+ });
+ it("hoverIntent should be called", function() {
+ expect(mock.hoverIntent.calledOnce).to.be.true;
+ });
+ });
+
+});
\ No newline at end of file
[10/31] ambari git commit: Revert "AMBARI-22160. hadooplzo package
installation failed on devdeploys (aonishuk)"
Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f1c4626b/ambari-server/snippet/Snippet.java
----------------------------------------------------------------------
diff --git a/ambari-server/snippet/Snippet.java b/ambari-server/snippet/Snippet.java
deleted file mode 100644
index f13d533..0000000
--- a/ambari-server/snippet/Snippet.java
+++ /dev/null
@@ -1,8 +0,0 @@
-package snippet;
-
-public class Snippet {
- public static void main(String[] args) {
- /home/user/ambari/ambari-views/bin/.project
- }
-}
-
http://git-wip-us.apache.org/repos/asf/ambari/blob/f1c4626b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index c32044c..2224d31 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -95,9 +95,7 @@ class TestHBaseMaster(RMFTestCase):
try_install=True,
os_type=('Redhat', '6.4', 'Final'),
checked_call_mocks = [(0, "OK.", "")],
- available_packages_in_repos = ['hbase_2_3_0_1_1234'],
)
-
# only assert that the correct package is trying to be installed
self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',
http://git-wip-us.apache.org/repos/asf/ambari/blob/f1c4626b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index ae33a2a..bff8642 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -80,8 +80,7 @@ class RMFTestCase(TestCase):
mocks_dict={},
try_install=False,
command_args=[],
- log_out_files=False,
- available_packages_in_repos = []):
+ log_out_files=False):
norm_path = os.path.normpath(path)
@@ -126,7 +125,6 @@ class RMFTestCase(TestCase):
Script.instance = None
script_class_inst = RMFTestCase._get_attr(script_module, classname)()
script_class_inst.log_out_files = log_out_files
- script_class_inst.available_packages_in_repos = available_packages_in_repos
method = RMFTestCase._get_attr(script_class_inst, command)
except IOError, err:
raise RuntimeError("Cannot load class %s from %s: %s" % (classname, norm_path, err.message))
[12/31] ambari git commit: Revert "AMBARI-22160. hadooplzo package
installation failed on devdeploys (aonishuk)"
Posted by jl...@apache.org.
Revert "AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)"
This reverts commit fc80a1837cc613160e3c60cc3290b7e517b5cd45.
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f1c4626b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f1c4626b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f1c4626b
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: f1c4626b9b4a6aafc48b71bd7d4e892362af1843
Parents: beef96d
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Oct 9 12:22:35 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Oct 9 12:22:35 2017 -0400
----------------------------------------------------------------------
.../libraries/script/script.py | 44 +-
.../resources/Ambari-DDL-AzureDB-CREATE.sql | 2147 ------------------
ambari-server/snippet/Snippet.java | 8 -
.../stacks/2.0.6/HBASE/test_hbase_master.py | 2 -
.../src/test/python/stacks/utils/RMFTestCase.py | 4 +-
5 files changed, 15 insertions(+), 2190 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f1c4626b/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index cd8fce4..d5b4469 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -501,7 +501,6 @@ class Script(object):
Script.stack_version_from_distro_select = pkg_provider.get_installed_package_version(
stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
-
return Script.stack_version_from_distro_select
@@ -526,20 +525,22 @@ class Script(object):
"""
This function replaces ${stack_version} placeholder with actual version. If the package
version is passed from the server, use that as an absolute truth.
-
+
:param name name of the package
:param repo_version actual version of the repo currently installing
"""
- if not STACK_VERSION_PLACEHOLDER in name:
- return name
-
stack_version_package_formatted = ""
+ if not repo_version:
+ repo_version = self.get_stack_version_before_packages_installed()
+
package_delimiter = '-' if OSCheck.is_ubuntu_family() else '_'
# repositoryFile is the truth
# package_version should be made to the form W_X_Y_Z_nnnn
package_version = default("repositoryFile/repoVersion", None)
+ if package_version is not None:
+ package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
# TODO remove legacy checks
if package_version is None:
@@ -549,16 +550,6 @@ class Script(object):
if package_version is None:
package_version = default("hostLevelParams/package_version", None)
- if package_version is None or '-' not in package_version:
- self.load_available_packages()
- package_name = self.get_package_from_available(name, self.available_packages_in_repos)
- if package_name is None:
- raise Fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos))
- return package_name
-
- if package_version is not None:
- package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
-
# The cluster effective version comes down when the version is known after the initial
# install. In that case we should not be guessing which version when invoking INSTALL, but
# use the supplied version to build the package_version
@@ -577,7 +568,6 @@ class Script(object):
# Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we try to specify it.
if not package_version or '*' in package_version:
- repo_version = self.get_stack_version_before_packages_installed()
stack_version_package_formatted = repo_version.replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name
package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
@@ -770,17 +760,6 @@ class Script(object):
"""
self.install_packages(env)
- def load_available_packages(self):
- if self.available_packages_in_repos:
- return self.available_packages_in_repos
-
- pkg_provider = get_provider("Package")
- try:
- self.available_packages_in_repos = pkg_provider.get_available_packages_in_repos(Script.config['repositoryFile']['repositories'])
- except Exception as err:
- Logger.exception("Unable to load available packages")
- self.available_packages_in_repos = []
-
def install_packages(self, env):
"""
List of packages that are required< by service is received from the server
@@ -800,14 +779,20 @@ class Script(object):
return
pass
try:
+ package_list_str = config['hostLevelParams']['package_list']
agent_stack_retry_on_unavailability = bool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
agent_stack_retry_count = int(config['hostLevelParams']['agent_stack_retry_count'])
- package_list_str = config['hostLevelParams']['package_list']
+ pkg_provider = get_provider("Package")
+ try:
+ available_packages_in_repos = pkg_provider.get_available_packages_in_repos(config['repositoryFile']['repositories'])
+ except Exception as err:
+ Logger.exception("Unable to load available packages")
+ available_packages_in_repos = []
if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
package_list = json.loads(package_list_str)
for package in package_list:
if self.check_package_condition(package):
- name = self.format_package_name(package['name'])
+ name = self.get_package_from_available(package['name'], available_packages_in_repos)
# HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
# TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
# <osFamily>any<osFamily> which would cause installation failure on Windows.
@@ -1107,6 +1092,5 @@ class Script(object):
def __init__(self):
- self.available_packages_in_repos = []
if Script.instance is not None:
raise Fail("An instantiation already exists! Use, get_instance() method.")
[19/31] ambari git commit: AMBARI-22166 - Not able to perform revert
after deleting the upgraded service (jonathanhurley)
Posted by jl...@apache.org.
AMBARI-22166 - Not able to perform revert after deleting the upgraded service (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8cffd722
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8cffd722
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8cffd722
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 8cffd72227c22da2bc3bad30f1c3e877bd26cad4
Parents: 4242225
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Oct 9 13:21:40 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Oct 9 16:40:45 2017 -0400
----------------------------------------------------------------------
.../ambari/server/state/UpgradeContext.java | 24 ++++++--
.../ambari/server/state/UpgradeContextTest.java | 60 +++++++++++++++++++-
2 files changed, 76 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/8cffd722/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index 67a8950..de0f868 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -304,8 +304,8 @@ public class UpgradeContext {
throw new AmbariException(
String.format("There are no upgrades for cluster %s which are marked as revertable",
cluster.getClusterName()));
- }
-
+ }
+
if (!revertUpgrade.getOrchestration().isRevertable()) {
throw new AmbariException(String.format("The %s repository type is not revertable",
revertUpgrade.getOrchestration()));
@@ -323,14 +323,26 @@ public class UpgradeContext {
revertableUpgrade.getRepositoryVersion().getVersion()));
}
+ // !!! build all service-specific reversions
Set<RepositoryVersionEntity> priors = new HashSet<>();
+ Map<String, Service> clusterServices = cluster.getServices();
for (UpgradeHistoryEntity history : revertUpgrade.getHistory()) {
+ String serviceName = history.getServiceName();
+ String componentName = history.getComponentName();
+
priors.add(history.getFromReposistoryVersion());
- // !!! build all service-specific
- m_services.add(history.getServiceName());
- m_sourceRepositoryMap.put(history.getServiceName(), history.getTargetRepositoryVersion());
- m_targetRepositoryMap.put(history.getServiceName(), history.getFromReposistoryVersion());
+ // if the service is no longer installed, do nothing
+ if (!clusterServices.containsKey(serviceName)) {
+ LOG.warn("{}/{} will not be reverted since it is no longer installed in the cluster",
+ serviceName, componentName);
+
+ continue;
+ }
+
+ m_services.add(serviceName);
+ m_sourceRepositoryMap.put(serviceName, history.getTargetRepositoryVersion());
+ m_targetRepositoryMap.put(serviceName, history.getFromReposistoryVersion());
}
if (priors.size() != 1) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/8cffd722/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
index dc77fa6..5176ffe 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
@@ -23,6 +23,7 @@ import static junit.framework.Assert.assertTrue;
import static org.easymock.EasyMock.eq;
import static org.easymock.EasyMock.expect;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -111,6 +112,11 @@ public class UpgradeContextTest extends EasyMockSupport {
private VersionDefinitionXml m_vdfXml;
/**
+ * The upgrade history to return for the completed upgrade.
+ */
+ private List<UpgradeHistoryEntity> m_upgradeHistory = new ArrayList<>();
+
+ /**
* The cluster services.
*/
private Map<String, Service> m_services = new HashMap<>();
@@ -128,7 +134,7 @@ public class UpgradeContextTest extends EasyMockSupport {
expect(upgradeHistoryEntity.getServiceName()).andReturn(HDFS_SERVICE_NAME).anyTimes();
expect(upgradeHistoryEntity.getFromReposistoryVersion()).andReturn(m_sourceRepositoryVersion).anyTimes();
expect(upgradeHistoryEntity.getTargetRepositoryVersion()).andReturn(m_targetRepositoryVersion).anyTimes();
- List<UpgradeHistoryEntity> upgradeHistory = Lists.newArrayList(upgradeHistoryEntity);
+ m_upgradeHistory = Lists.newArrayList(upgradeHistoryEntity);
expect(m_repositoryVersionDAO.findByPK(1L)).andReturn(m_sourceRepositoryVersion).anyTimes();
expect(m_repositoryVersionDAO.findByPK(99L)).andReturn(m_targetRepositoryVersion).anyTimes();
@@ -143,12 +149,13 @@ public class UpgradeContextTest extends EasyMockSupport {
expect(m_completedRevertableUpgrade.getDirection()).andReturn(Direction.UPGRADE).anyTimes();
expect(m_completedRevertableUpgrade.getRepositoryVersion()).andReturn(m_targetRepositoryVersion).anyTimes();
expect(m_completedRevertableUpgrade.getOrchestration()).andReturn(RepositoryType.PATCH).anyTimes();
- expect(m_completedRevertableUpgrade.getHistory()).andReturn(upgradeHistory).anyTimes();
+ expect(m_completedRevertableUpgrade.getHistory()).andReturn(m_upgradeHistory).anyTimes();
expect(m_completedRevertableUpgrade.getUpgradePackage()).andReturn(null).anyTimes();
RepositoryVersionEntity hdfsRepositoryVersion = createNiceMock(RepositoryVersionEntity.class);
expect(m_hdfsService.getDesiredRepositoryVersion()).andReturn(hdfsRepositoryVersion).anyTimes();
+ expect(m_zookeeperService.getDesiredRepositoryVersion()).andReturn(hdfsRepositoryVersion).anyTimes();
expect(m_cluster.getService(HDFS_SERVICE_NAME)).andReturn(m_hdfsService).anyTimes();
m_services.put(HDFS_SERVICE_NAME, m_hdfsService);
@@ -331,6 +338,55 @@ public class UpgradeContextTest extends EasyMockSupport {
}
/**
+ * Tests that the {@link UpgradeContext} for a reversion has the correct
+ * services included in the reversion if one of the services in the original
+ * upgrade has since been deleted.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testRevertWithDeletedService() throws Exception {
+ UpgradeHelper upgradeHelper = createNiceMock(UpgradeHelper.class);
+ ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
+ UpgradePack upgradePack = createNiceMock(UpgradePack.class);
+
+ // give the completed upgrade 2 services which can be reverted
+ UpgradeHistoryEntity upgradeHistoryEntity = createNiceMock(UpgradeHistoryEntity.class);
+ expect(upgradeHistoryEntity.getServiceName()).andReturn(ZOOKEEPER_SERVICE_NAME).anyTimes();
+ expect(upgradeHistoryEntity.getFromReposistoryVersion()).andReturn(m_sourceRepositoryVersion).anyTimes();
+ expect(upgradeHistoryEntity.getTargetRepositoryVersion()).andReturn(m_targetRepositoryVersion).anyTimes();
+ m_upgradeHistory.add(upgradeHistoryEntity);
+
+ expect(upgradeHelper.suggestUpgradePack(EasyMock.anyString(), EasyMock.anyObject(StackId.class),
+ EasyMock.anyObject(StackId.class), EasyMock.anyObject(Direction.class),
+ EasyMock.anyObject(UpgradeType.class), EasyMock.anyString())).andReturn(upgradePack).once();
+
+ expect(m_upgradeDAO.findRevertable(1L)).andReturn(m_completedRevertableUpgrade).once();
+
+ // remove HDFS, add ZK
+ m_services.remove(HDFS_SERVICE_NAME);
+ expect(m_cluster.getService(ZOOKEEPER_SERVICE_NAME)).andReturn(m_zookeeperService).anyTimes();
+ m_services.put(ZOOKEEPER_SERVICE_NAME, m_zookeeperService);
+ assertEquals(1, m_services.size());
+
+ Map<String, Object> requestMap = new HashMap<>();
+ requestMap.put(UpgradeResourceProvider.UPGRADE_TYPE, UpgradeType.ROLLING.name());
+ requestMap.put(UpgradeResourceProvider.UPGRADE_REVERT_UPGRADE_ID, "1");
+
+ replayAll();
+
+ UpgradeContext context = new UpgradeContext(m_cluster, requestMap, null, upgradeHelper,
+ m_upgradeDAO, m_repositoryVersionDAO, configHelper);
+
+ assertEquals(Direction.DOWNGRADE, context.getDirection());
+ assertEquals(RepositoryType.PATCH, context.getOrchestrationType());
+ assertEquals(1, context.getSupportedServices().size());
+ assertTrue(context.isPatchRevert());
+
+ verifyAll();
+ }
+
+ /**
* Tests that if a different {@link UpgradeEntity} is returned instead of the one
* specified by the
*
[06/31] ambari git commit: AMBARI-22160. hadooplzo package
installation failed on devdeploys (aonishuk)
Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/fc80a183/ambari-server/snippet/Snippet.java
----------------------------------------------------------------------
diff --git a/ambari-server/snippet/Snippet.java b/ambari-server/snippet/Snippet.java
new file mode 100644
index 0000000..f13d533
--- /dev/null
+++ b/ambari-server/snippet/Snippet.java
@@ -0,0 +1,8 @@
+package snippet;
+
+public class Snippet {
+ public static void main(String[] args) {
+ /home/user/ambari/ambari-views/bin/.project
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/fc80a183/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 2224d31..c32044c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -95,7 +95,9 @@ class TestHBaseMaster(RMFTestCase):
try_install=True,
os_type=('Redhat', '6.4', 'Final'),
checked_call_mocks = [(0, "OK.", "")],
+ available_packages_in_repos = ['hbase_2_3_0_1_1234'],
)
+
# only assert that the correct package is trying to be installed
self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',
http://git-wip-us.apache.org/repos/asf/ambari/blob/fc80a183/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index bff8642..ae33a2a 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -80,7 +80,8 @@ class RMFTestCase(TestCase):
mocks_dict={},
try_install=False,
command_args=[],
- log_out_files=False):
+ log_out_files=False,
+ available_packages_in_repos = []):
norm_path = os.path.normpath(path)
@@ -125,6 +126,7 @@ class RMFTestCase(TestCase):
Script.instance = None
script_class_inst = RMFTestCase._get_attr(script_module, classname)()
script_class_inst.log_out_files = log_out_files
+ script_class_inst.available_packages_in_repos = available_packages_in_repos
method = RMFTestCase._get_attr(script_class_inst, command)
except IOError, err:
raise RuntimeError("Cannot load class %s from %s: %s" % (classname, norm_path, err.message))
[09/31] ambari git commit: AMBARI-22089. Hive View 2.0 - Unable to
update existing saved queries,
view creates new records under saved queries tab (pallavkul)
Posted by jl...@apache.org.
AMBARI-22089. Hive View 2.0 - Unable to update existing saved queries, view creates new records under saved queries tab (pallavkul)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/beef96d4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/beef96d4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/beef96d4
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: beef96d413d4986cb0cc11c317d11d5a181da14c
Parents: fc80a18
Author: pallavkul <pa...@gmail.com>
Authored: Mon Oct 9 21:17:35 2017 +0530
Committer: pallavkul <pa...@gmail.com>
Committed: Mon Oct 9 21:17:35 2017 +0530
----------------------------------------------------------------------
.../savedQueries/SavedQueryResourceManager.java | 17 ++++--
.../savedQueries/SavedQueryService.java | 46 +++++++--------
.../resources/ui/app/routes/queries/query.js | 60 ++++++++++++++------
.../resources/ui/app/services/saved-queries.js | 21 +++++++
.../hive20/src/main/resources/ui/yarn.lock | 2 +-
5 files changed, 98 insertions(+), 48 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/beef96d4/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
index 3690683..fff202c 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
@@ -115,12 +115,17 @@ public class SavedQueryResourceManager extends PersonalCRUDResourceManager<Saved
}
@Override
- public SavedQuery update(SavedQuery newObject, String id) throws ItemNotFound {
- SavedQuery savedQuery = super.update(newObject, id);
- // Emptying short query so that in next read, this gets updated with proper value
- // from the queryFile
- emptyShortQueryField(savedQuery);
- return savedQuery;
+ public SavedQuery update(SavedQuery object, String id) throws ItemNotFound {
+ String query = object.getShortQuery();
+ object.setShortQuery(makeShortQuery(query));
+ object = super.update(object, id);
+ try {
+ createDefaultQueryFile(object, query);
+
+ } catch (ServiceFormattedException e) {
+ cleanupAfterErrorAndThrowAgain(object, e);
+ }
+ return object;
}
@Override
http://git-wip-us.apache.org/repos/asf/ambari/blob/beef96d4/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryService.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryService.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryService.java
index 7139ce8..35382f9 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryService.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryService.java
@@ -83,36 +83,36 @@ public class SavedQueryService extends BaseService {
@Path("{queryId}")
@Produces(MediaType.APPLICATION_JSON)
public Response getOne(@PathParam("queryId") String queryId,
- @QueryParam("op") String operation) {
+ @QueryParam("op") String operation) {
try {
- final SavedQuery savedQuery = getResourceManager().read(queryId);
- if(operation.equals("download")) {
- StreamingOutput stream = new StreamingOutput() {
- @Override
- public void write(OutputStream os) throws IOException, WebApplicationException {
- Writer writer = new BufferedWriter(new OutputStreamWriter(os));
- try {
+ final SavedQuery savedQuery = getResourceManager().read(queryId);
+ if(operation!= null && operation.equals("download")) {
+ StreamingOutput stream = new StreamingOutput() {
+ @Override
+ public void write(OutputStream os) throws IOException, WebApplicationException {
+ Writer writer = new BufferedWriter(new OutputStreamWriter(os));
+ try {
BufferedReader br=new BufferedReader(new InputStreamReader(getSharedObjectsFactory().getHdfsApi().open(savedQuery.getQueryFile())));
- String line;
- line=br.readLine();
- while (line != null){
- writer.write(line+"\n");
- line = br.readLine();
+ String line;
+ line=br.readLine();
+ while (line != null){
+ writer.write(line+"\n");
+ line = br.readLine();
}
writer.flush();
- } catch (InterruptedException e) {
- e.printStackTrace();
- } finally {
- writer.close();
- }
- }
- };
- return Response.ok(stream).
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ } finally {
+ writer.close();
+ }
+ }
+ };
+ return Response.ok(stream).
type(MediaType.TEXT_PLAIN).
- build();
+ build();
}
else {
- JSONObject object = new JSONObject();
+ JSONObject object = new JSONObject();
object.put("savedQuery", savedQuery);
return Response.ok(object).build();
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/beef96d4/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js b/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
index 9e8b6db..9bb5c8f 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
@@ -631,6 +631,8 @@ export default Ember.Route.extend(UILoggerMixin, {
let owner = this.get('controller.model').get('owner');
let queryFile = this.get('controller.model').get('queryFile');
let logFile = this.get('controller.model').get('logFile');
+ let shortQuery = (currentQuery.length > 0) ? currentQuery : ";";
+ let savedQueryId = this.get('controller.model').get('id')
let payload = {"title" : newTitle,
"dataBase": selectedDb,
@@ -639,32 +641,54 @@ export default Ember.Route.extend(UILoggerMixin, {
"queryFile" : queryFile,
"logFile" : logFile};
- let newSaveQuery = this.get('store').createRecord('saved-query',
- { dataBase:selectedDb,
- title:newTitle,
- queryFile: queryFile,
- owner: owner,
- shortQuery: (currentQuery.length > 0) ? currentQuery : ";"
- });
+ let existingSavedQuery = this.get('store').peekRecord('saved-query', savedQueryId);
- newSaveQuery.save().then((data) => {
- console.log('saved query saved');
+ if(existingSavedQuery){
- this.get('controller.model').set('title', newTitle);
- this.get('controller.model').set('isQueryDirty', false);
- this.get('controller').set('worksheetModalSuccess', true);
+ this.get('savedQueries').updateSavedQuery(existingSavedQuery.get('id'), shortQuery, selectedDb, owner).then( data => {
+ console.log('saved query updated.');
+ this.get('controller.model').set('title', newTitle);
+ this.get('controller.model').set('isQueryDirty', false);
+ this.get('controller').set('worksheetModalSuccess', true);
- Ember.run.later(() => {
- this.get('controller').set('showWorksheetModal', false);
- this.closeWorksheetAfterSave();
- }, 2 * 1000);
+ Ember.run.later(() => {
+ this.get('controller').set('showWorksheetModal', false);
+ this.closeWorksheetAfterSave();
+ }, 2 * 1000);
- });
+ }).catch(function (response) {
+ console.log('error', response);
+ });
+
+ } else{
+
+ let newSaveQuery = this.get('store').createRecord('saved-query',
+ { dataBase:selectedDb,
+ title:newTitle,
+ queryFile: queryFile,
+ owner: owner,
+ shortQuery: (currentQuery.length > 0) ? currentQuery : ";"
+ });
+
+ newSaveQuery.save().then((data) => {
+ console.log('saved query saved');
+ this.get('controller.model').set('title', newTitle);
+ this.get('controller.model').set('isQueryDirty', false);
+ this.get('controller').set('worksheetModalSuccess', true);
+
+ Ember.run.later(() => {
+ this.get('controller').set('showWorksheetModal', false);
+ this.closeWorksheetAfterSave();
+ }, 2 * 1000);
+
+ });
+
+ }
},
- closeWorksheetModal(){
+ closeWorksheetModal(){
this.get('controller').set('showWorksheetModal', false);
this.closeWorksheetAfterSave();
this.get('controller.model').set('tabDataToClose', null);
http://git-wip-us.apache.org/repos/asf/ambari/blob/beef96d4/contrib/views/hive20/src/main/resources/ui/app/services/saved-queries.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/services/saved-queries.js b/contrib/views/hive20/src/main/resources/ui/app/services/saved-queries.js
index 4b4b29e..b1a28ce 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/services/saved-queries.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/services/saved-queries.js
@@ -50,6 +50,27 @@ export default Ember.Service.extend({
});
},
+ updateSavedQuery(savedQueryId, shortQuery, selectedDb, owner){
+ return new Ember.RSVP.Promise((resolve, reject) => {
+
+ this.get('store').findRecord('saved-query', savedQueryId, {async: true} ).then(savedQuery => {
+ savedQuery.set('shortQuery', shortQuery);
+ savedQuery.set('dataBase', selectedDb );
+ savedQuery.set('owner', owner );
+ savedQuery.save().then(() => {
+ return resolve("");
+ })
+
+
+
+
+ }).catch(function (response) {
+ console.log('error', response);
+ return reject(response);
+ });
+ });
+ },
+
fetchSavedQuery(path) {
let url = this.get('store').adapterFor('application').buildURL()+ '/files/' + encodeURIComponent(path);
http://git-wip-us.apache.org/repos/asf/ambari/blob/beef96d4/contrib/views/hive20/src/main/resources/ui/yarn.lock
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/yarn.lock b/contrib/views/hive20/src/main/resources/ui/yarn.lock
index 477a15c..607cf81 100644
--- a/contrib/views/hive20/src/main/resources/ui/yarn.lock
+++ b/contrib/views/hive20/src/main/resources/ui/yarn.lock
@@ -569,7 +569,7 @@ babel-plugin-transform-es2015-block-scoped-functions@^6.22.0:
dependencies:
babel-runtime "^6.22.0"
-babel-plugin-transform-es2015-block-scoping@^6.23.0:
+babel-plugin-transform-es2015-block-scoping@^6.23.0, babel-plugin-transform-es2015-block-scoping@^6.24.1:
version "6.24.1"
resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.24.1.tgz#76c295dc3a4741b1665adfd3167215dcff32a576"
dependencies:
[21/31] ambari git commit: AMBARI-22160. hadooplzo package
installation failed on devdeploys (aonishuk)
Posted by jl...@apache.org.
AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e037a8d7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e037a8d7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e037a8d7
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: e037a8d7194ac97da9f746e52eb53cf15ba2415f
Parents: 3b8e807
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Oct 10 14:30:13 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Oct 10 14:30:13 2017 +0300
----------------------------------------------------------------------
.../libraries/script/script.py | 45 ++++++++++++++------
.../stacks/2.0.6/HBASE/test_hbase_master.py | 2 +
.../src/test/python/stacks/utils/RMFTestCase.py | 4 +-
3 files changed, 37 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/e037a8d7/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index d5b4469..4282213 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -501,6 +501,7 @@ class Script(object):
Script.stack_version_from_distro_select = pkg_provider.get_installed_package_version(
stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
+
return Script.stack_version_from_distro_select
@@ -525,22 +526,20 @@ class Script(object):
"""
This function replaces ${stack_version} placeholder with actual version. If the package
version is passed from the server, use that as an absolute truth.
-
+
:param name name of the package
:param repo_version actual version of the repo currently installing
"""
- stack_version_package_formatted = ""
+ if not STACK_VERSION_PLACEHOLDER in name:
+ return name
- if not repo_version:
- repo_version = self.get_stack_version_before_packages_installed()
+ stack_version_package_formatted = ""
package_delimiter = '-' if OSCheck.is_ubuntu_family() else '_'
# repositoryFile is the truth
# package_version should be made to the form W_X_Y_Z_nnnn
package_version = default("repositoryFile/repoVersion", None)
- if package_version is not None:
- package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
# TODO remove legacy checks
if package_version is None:
@@ -550,6 +549,17 @@ class Script(object):
if package_version is None:
package_version = default("hostLevelParams/package_version", None)
+ package_version = None
+ if (package_version is None or '-' not in package_version) and default('/repositoryFile', None):
+ self.load_available_packages()
+ package_name = self.get_package_from_available(name, self.available_packages_in_repos)
+ if package_name is None:
+ raise Fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos))
+ return package_name
+
+ if package_version is not None:
+ package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
+
# The cluster effective version comes down when the version is known after the initial
# install. In that case we should not be guessing which version when invoking INSTALL, but
# use the supplied version to build the package_version
@@ -568,6 +578,7 @@ class Script(object):
# Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we try to specify it.
if not package_version or '*' in package_version:
+ repo_version = self.get_stack_version_before_packages_installed()
stack_version_package_formatted = repo_version.replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name
package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
@@ -760,6 +771,19 @@ class Script(object):
"""
self.install_packages(env)
+ def load_available_packages(self):
+ if self.available_packages_in_repos:
+ return self.available_packages_in_repos
+
+
+ pkg_provider = get_provider("Package")
+ try:
+ available_packages_in_repos = pkg_provider.get_available_packages_in_repos(self.get_config()['repositoryFile']['repositories'])
+ except Exception as err:
+ Logger.exception("Unable to load available packages")
+ available_packages_in_repos = []
+
+
def install_packages(self, env):
"""
List of packages that are required< by service is received from the server
@@ -782,17 +806,11 @@ class Script(object):
package_list_str = config['hostLevelParams']['package_list']
agent_stack_retry_on_unavailability = bool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
agent_stack_retry_count = int(config['hostLevelParams']['agent_stack_retry_count'])
- pkg_provider = get_provider("Package")
- try:
- available_packages_in_repos = pkg_provider.get_available_packages_in_repos(config['repositoryFile']['repositories'])
- except Exception as err:
- Logger.exception("Unable to load available packages")
- available_packages_in_repos = []
if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
package_list = json.loads(package_list_str)
for package in package_list:
if self.check_package_condition(package):
- name = self.get_package_from_available(package['name'], available_packages_in_repos)
+ name = self.format_package_name(package['name'])
# HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
# TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
# <osFamily>any<osFamily> which would cause installation failure on Windows.
@@ -1092,5 +1110,6 @@ class Script(object):
def __init__(self):
+ self.available_packages_in_repos = []
if Script.instance is not None:
raise Fail("An instantiation already exists! Use, get_instance() method.")
http://git-wip-us.apache.org/repos/asf/ambari/blob/e037a8d7/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 2224d31..e32393d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -95,8 +95,10 @@ class TestHBaseMaster(RMFTestCase):
try_install=True,
os_type=('Redhat', '6.4', 'Final'),
checked_call_mocks = [(0, "OK.", "")],
+ available_packages_in_repos = ['hbase_2_3_0_1_1234'],
)
+
# only assert that the correct package is trying to be installed
self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',
retry_count=5,
http://git-wip-us.apache.org/repos/asf/ambari/blob/e037a8d7/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index bff8642..ae33a2a 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -80,7 +80,8 @@ class RMFTestCase(TestCase):
mocks_dict={},
try_install=False,
command_args=[],
- log_out_files=False):
+ log_out_files=False,
+ available_packages_in_repos = []):
norm_path = os.path.normpath(path)
@@ -125,6 +126,7 @@ class RMFTestCase(TestCase):
Script.instance = None
script_class_inst = RMFTestCase._get_attr(script_module, classname)()
script_class_inst.log_out_files = log_out_files
+ script_class_inst.available_packages_in_repos = available_packages_in_repos
method = RMFTestCase._get_attr(script_class_inst, command)
except IOError, err:
raise RuntimeError("Cannot load class %s from %s: %s" % (classname, norm_path, err.message))
[22/31] ambari git commit: AMBARI-22160. hadooplzo package
installation failed on devdeploys (aonishuk)
Posted by jl...@apache.org.
AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e19db403
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e19db403
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e19db403
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: e19db403e8a3320a5b47503e43e2a35277149a3f
Parents: e037a8d
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Oct 10 15:32:10 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Oct 10 15:32:10 2017 +0300
----------------------------------------------------------------------
.../python/resource_management/libraries/script/script.py | 4 ++--
.../HDFS/2.1.0.2.0/package/scripts/hdfs.py | 10 ++++++----
.../HDFS/2.1.0.2.0/package/scripts/install_params.py | 6 ------
.../HDFS/2.1.0.2.0/package/scripts/params_linux.py | 2 --
.../HDFS/3.0.0.3.0/package/scripts/hdfs.py | 10 ++++++----
.../HDFS/3.0.0.3.0/package/scripts/install_params.py | 6 ------
.../HDFS/3.0.0.3.0/package/scripts/params_linux.py | 2 --
.../OOZIE/4.0.0.2.0/package/scripts/oozie.py | 6 ++++--
.../OOZIE/4.0.0.2.0/package/scripts/params_linux.py | 3 ---
.../OOZIE/4.2.0.3.0/package/scripts/oozie.py | 5 +++--
.../OOZIE/4.2.0.3.0/package/scripts/params_linux.py | 3 ---
11 files changed, 21 insertions(+), 36 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 4282213..bf8c0dc 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -778,10 +778,10 @@ class Script(object):
pkg_provider = get_provider("Package")
try:
- available_packages_in_repos = pkg_provider.get_available_packages_in_repos(self.get_config()['repositoryFile']['repositories'])
+ self.available_packages_in_repos = pkg_provider.get_available_packages_in_repos(self.get_config()['repositoryFile']['repositories'])
except Exception as err:
Logger.exception("Unable to load available packages")
- available_packages_in_repos = []
+ self.available_packages_in_repos = []
def install_packages(self, env):
http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
index e054209..07c7616 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -25,6 +25,7 @@ from resource_management.core.resources import Package
from resource_management.core.source import Template
from resource_management.core.resources.service import ServiceConfig
from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
import os
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@@ -138,10 +139,11 @@ def hdfs(name=None):
content=Template("slaves.j2")
)
- if params.lzo_enabled and len(params.lzo_packages) > 0:
- Package(params.lzo_packages,
- retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
- retry_count=params.agent_stack_retry_count)
+ if params.lzo_enabled:
+ lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+ Package(lzo_packages,
+ retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+ retry_count=params.agent_stack_retry_count)
def install_snappy():
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
index fe488c3..235f231 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
@@ -23,7 +23,6 @@ if OSCheck.is_windows_family():
exclude_packages = []
else:
from resource_management.libraries.functions.default import default
- from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.script.script import Script
_config = Script.get_config()
@@ -32,8 +31,3 @@ else:
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
- lzo_packages = get_lzo_packages(stack_version_unformatted)
-
- exclude_packages = []
- if not lzo_enabled:
- exclude_packages += lzo_packages
http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 76b430b..bb6349b 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -40,7 +40,6 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.format_jvm_option import format_jvm_option
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions.get_architecture import get_architecture
@@ -389,7 +388,6 @@ HdfsResource = functools.partial(
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
name_node_params = default("/commandParams/namenode", None)
http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
index e054209..07c7616 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
@@ -25,6 +25,7 @@ from resource_management.core.resources import Package
from resource_management.core.source import Template
from resource_management.core.resources.service import ServiceConfig
from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
import os
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@@ -138,10 +139,11 @@ def hdfs(name=None):
content=Template("slaves.j2")
)
- if params.lzo_enabled and len(params.lzo_packages) > 0:
- Package(params.lzo_packages,
- retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
- retry_count=params.agent_stack_retry_count)
+ if params.lzo_enabled:
+ lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+ Package(lzo_packages,
+ retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+ retry_count=params.agent_stack_retry_count)
def install_snappy():
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
index fe488c3..235f231 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
@@ -23,7 +23,6 @@ if OSCheck.is_windows_family():
exclude_packages = []
else:
from resource_management.libraries.functions.default import default
- from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.script.script import Script
_config = Script.get_config()
@@ -32,8 +31,3 @@ else:
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
- lzo_packages = get_lzo_packages(stack_version_unformatted)
-
- exclude_packages = []
- if not lzo_enabled:
- exclude_packages += lzo_packages
http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
index de735f4..2fa6208 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
@@ -40,7 +40,6 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.format_jvm_option import format_jvm_option
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
@@ -378,7 +377,6 @@ HdfsResource = functools.partial(
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
name_node_params = default("/commandParams/namenode", None)
http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 64f9d54..f215a1e 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -37,6 +37,7 @@ from resource_management.libraries.functions.copy_tarball import get_current_ver
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.security_commons import update_credential_provider_path
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.core.resources.packaging import Package
from resource_management.core.shell import as_user, as_sudo, call, checked_call
from resource_management.core.exceptions import Fail
@@ -305,8 +306,9 @@ def oozie_server_specific(upgrade_type):
Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
not_if = no_op_test)
- if params.lzo_enabled and len(params.all_lzo_packages) > 0:
- Package(params.all_lzo_packages,
+ if params.lzo_enabled:
+ all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+ Package(all_lzo_packages,
retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
retry_count=params.agent_stack_retry_count)
Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index b66e157..a0f0672 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -30,7 +30,6 @@ from resource_management.libraries.functions import get_port_from_url
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.get_architecture import get_architecture
@@ -388,5 +387,3 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
# The logic for LZO also exists in HDFS' params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-
-all_lzo_packages = get_lzo_packages(stack_version_unformatted)
http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
index d916d3b..0771e93 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
@@ -275,8 +275,9 @@ def oozie_server_specific():
Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
not_if = no_op_test)
- if params.lzo_enabled and len(params.all_lzo_packages) > 0:
- Package(params.all_lzo_packages,
+ if params.lzo_enabled:
+ all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+ Package(all_lzo_packages,
retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
retry_count=params.agent_stack_retry_count)
Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
index d30a465..70b89b7 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
@@ -28,7 +28,6 @@ from resource_management.libraries.functions import get_port_from_url
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.get_architecture import get_architecture
@@ -370,5 +369,3 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
# The logic for LZO also exists in HDFS' params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-
-all_lzo_packages = get_lzo_packages(stack_version_unformatted)
[03/31] ambari git commit: Revert "AMBARI-22089. Hive View 2.0 -
Unable to update existing saved queries,
view creates new records under saved queries tab (pallavkul)"
Posted by jl...@apache.org.
Revert "AMBARI-22089. Hive View 2.0 - Unable to update existing saved queries, view creates new records under saved queries tab (pallavkul)"
This reverts commit d3b67eeab455b01f8f921039b50818a6ded32839.
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e83c86dc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e83c86dc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e83c86dc
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: e83c86dc310e4878f57f2cec9e32e8dc83394913
Parents: ce2a0a0
Author: pallavkul <pa...@gmail.com>
Authored: Mon Oct 9 16:47:14 2017 +0530
Committer: pallavkul <pa...@gmail.com>
Committed: Mon Oct 9 16:47:14 2017 +0530
----------------------------------------------------------------------
.../savedQueries/SavedQueryResourceManager.java | 17 ++----
.../resources/ui/app/routes/queries/query.js | 63 +++++---------------
.../hive20/src/main/resources/ui/yarn.lock | 2 +-
3 files changed, 21 insertions(+), 61 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/e83c86dc/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
index fff202c..3690683 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
@@ -115,17 +115,12 @@ public class SavedQueryResourceManager extends PersonalCRUDResourceManager<Saved
}
@Override
- public SavedQuery update(SavedQuery object, String id) throws ItemNotFound {
- String query = object.getShortQuery();
- object.setShortQuery(makeShortQuery(query));
- object = super.update(object, id);
- try {
- createDefaultQueryFile(object, query);
-
- } catch (ServiceFormattedException e) {
- cleanupAfterErrorAndThrowAgain(object, e);
- }
- return object;
+ public SavedQuery update(SavedQuery newObject, String id) throws ItemNotFound {
+ SavedQuery savedQuery = super.update(newObject, id);
+ // Emptying short query so that in next read, this gets updated with proper value
+ // from the queryFile
+ emptyShortQueryField(savedQuery);
+ return savedQuery;
}
@Override
http://git-wip-us.apache.org/repos/asf/ambari/blob/e83c86dc/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js b/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
index 3e5adc1..9e8b6db 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
@@ -631,71 +631,36 @@ export default Ember.Route.extend(UILoggerMixin, {
let owner = this.get('controller.model').get('owner');
let queryFile = this.get('controller.model').get('queryFile');
let logFile = this.get('controller.model').get('logFile');
- let shortQuery = (currentQuery.length > 0) ? currentQuery : ";";
- let savedQueryId = this.get('controller.model').get('id');
-
- this.store.findAll('savedQuery').then(savedQueries => {
- return savedQueries.toArray();
- }).then((existingSavedQueries) =>{
-
- var queryExist = existingSavedQueries.filterBy('id', savedQueryId).get('firstObject');
-
- if(queryExist){
- this.send('updateSavedQuery', queryExist.get('id'));
- } else{
- this.send('addSavedQuery', selectedDb, newTitle, owner, shortQuery );
- }
-
-
- });
-
- },
-
- addSavedQuery(selectedDb, newTitle, owner, shortQuery){
+ let payload = {"title" : newTitle,
+ "dataBase": selectedDb,
+ "owner" : owner,
+ "shortQuery" : (currentQuery.length > 0) ? currentQuery : ";",
+ "queryFile" : queryFile,
+ "logFile" : logFile};
let newSaveQuery = this.get('store').createRecord('saved-query',
{ dataBase:selectedDb,
title:newTitle,
+ queryFile: queryFile,
owner: owner,
- shortQuery: shortQuery
+ shortQuery: (currentQuery.length > 0) ? currentQuery : ";"
});
+
newSaveQuery.save().then((data) => {
+ console.log('saved query saved');
+
this.get('controller.model').set('title', newTitle);
this.get('controller.model').set('isQueryDirty', false);
this.get('controller').set('worksheetModalSuccess', true);
+
Ember.run.later(() => {
this.get('controller').set('showWorksheetModal', false);
this.closeWorksheetAfterSave();
}, 2 * 1000);
- });
-
- },
-
- updateSavedQuery(savedQueryId){
- let currentQuery = this.get('controller.model').get('query');
- let selectedDb = this.get('controller.model').get('selectedDb');
- let owner = this.get('controller.model').get('owner');
-
- this.get('store').findRecord('saved-query', savedQueryId ).then(savedQuery => {
- savedQuery.set('shortQuery', (currentQuery.length > 0) ? currentQuery : ";");
- savedQuery.set('dataBase', selectedDb );
- savedQuery.set('owner', owner );
-
- savedQuery.save().then(savedQuery => {
-
- this.get('controller.model').set('isQueryDirty', false);
- this.get('controller').set('worksheetModalSuccess', true);
-
- Ember.run.later(() => {
- this.get('controller').set('showWorksheetModal', false);
- this.closeWorksheetAfterSave();
- }, 2 * 1000);
- })
-
- });
+ });
},
@@ -703,7 +668,7 @@ export default Ember.Route.extend(UILoggerMixin, {
this.get('controller').set('showWorksheetModal', false);
this.closeWorksheetAfterSave();
this.get('controller.model').set('tabDataToClose', null);
- },
+ },
expandQueryEdidorPanel(){
if(!this.get('isQueryEdidorPaneExpanded')){
http://git-wip-us.apache.org/repos/asf/ambari/blob/e83c86dc/contrib/views/hive20/src/main/resources/ui/yarn.lock
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/yarn.lock b/contrib/views/hive20/src/main/resources/ui/yarn.lock
index 607cf81..477a15c 100644
--- a/contrib/views/hive20/src/main/resources/ui/yarn.lock
+++ b/contrib/views/hive20/src/main/resources/ui/yarn.lock
@@ -569,7 +569,7 @@ babel-plugin-transform-es2015-block-scoped-functions@^6.22.0:
dependencies:
babel-runtime "^6.22.0"
-babel-plugin-transform-es2015-block-scoping@^6.23.0, babel-plugin-transform-es2015-block-scoping@^6.24.1:
+babel-plugin-transform-es2015-block-scoping@^6.23.0:
version "6.24.1"
resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.24.1.tgz#76c295dc3a4741b1665adfd3167215dcff32a576"
dependencies:
[11/31] ambari git commit: Revert "AMBARI-22160. hadooplzo package
installation failed on devdeploys (aonishuk)"
Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f1c4626b/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql b/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
deleted file mode 100644
index b54132c..0000000
--- a/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
+++ /dev/null
@@ -1,2147 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License") you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Schema population script for $(AMBARIDBNAME)
-
-Use this script in sqlcmd mode, setting the environment variables like this:
-set AMBARIDBNAME=ambari
-
-sqlcmd -S localhost\SQLEXPRESS -i C:\app\ambari-server-1.3.0-SNAPSHOT\resources\Ambari-DDL-SQLServer-CREATE.sql
-*/
-
-
-------create the database------
-
-------create tables and grant privileges to db user---------
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('stack') AND type = 'U')
-BEGIN
-CREATE TABLE stack(
- stack_id BIGINT NOT NULL,
- stack_name VARCHAR(255) NOT NULL,
- stack_version VARCHAR(255) NOT NULL,
- CONSTRAINT PK_stack PRIMARY KEY CLUSTERED (stack_id),
- CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('extension') AND type = 'U')
-BEGIN
-CREATE TABLE extension(
- extension_id BIGINT NOT NULL,
- extension_name VARCHAR(255) NOT NULL,
- extension_version VARCHAR(255) NOT NULL,
- CONSTRAINT PK_extension PRIMARY KEY CLUSTERED (extension_id),
- CONSTRAINT UQ_extension UNIQUE (extension_name, extension_version))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('extensionlink') AND type = 'U')
-BEGIN
-CREATE TABLE extensionlink(
- link_id BIGINT NOT NULL,
- stack_id BIGINT NOT NULL,
- extension_id BIGINT NOT NULL,
- CONSTRAINT PK_extensionlink PRIMARY KEY CLUSTERED (link_id),
- CONSTRAINT FK_extensionlink_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
- CONSTRAINT FK_extensionlink_extension_id FOREIGN KEY (extension_id) REFERENCES extension(extension_id),
- CONSTRAINT UQ_extension_link UNIQUE (stack_id, extension_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminresourcetype') AND type = 'U')
-BEGIN
-CREATE TABLE adminresourcetype (
- resource_type_id INTEGER NOT NULL,
- resource_type_name VARCHAR(255) NOT NULL,
- CONSTRAINT PK_adminresourcetype PRIMARY KEY CLUSTERED (resource_type_id)
- )
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminresource') AND type = 'U')
-BEGIN
-CREATE TABLE adminresource (
- resource_id BIGINT NOT NULL,
- resource_type_id INTEGER NOT NULL,
- CONSTRAINT PK_adminresource PRIMARY KEY CLUSTERED (resource_id),
- CONSTRAINT FK_resource_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusters') AND type = 'U')
-BEGIN
-CREATE TABLE clusters (
- cluster_id BIGINT NOT NULL,
- resource_id BIGINT NOT NULL,
- upgrade_id BIGINT,
- cluster_info VARCHAR(255) NOT NULL,
- cluster_name VARCHAR(100) NOT NULL UNIQUE,
- provisioning_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
- security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
- desired_cluster_state VARCHAR(255) NOT NULL,
- desired_stack_id BIGINT NOT NULL,
- CONSTRAINT PK_clusters PRIMARY KEY CLUSTERED (cluster_id),
- CONSTRAINT FK_clusters_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
- CONSTRAINT FK_clusters_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusterconfig') AND type = 'U')
-BEGIN
-CREATE TABLE clusterconfig (
- config_id BIGINT NOT NULL,
- version_tag VARCHAR(255) NOT NULL,
- version BIGINT NOT NULL,
- type_name VARCHAR(255) NOT NULL,
- cluster_id BIGINT NOT NULL,
- stack_id BIGINT NOT NULL,
- selected SMALLINT NOT NULL DEFAULT 0,
- config_data VARCHAR(MAX) NOT NULL,
- config_attributes VARCHAR(MAX),
- create_timestamp BIGINT NOT NULL,
- unmapped SMALLINT NOT NULL DEFAULT 0,
- selected_timestamp BIGINT NOT NULL DEFAULT 0,
- CONSTRAINT PK_clusterconfig PRIMARY KEY CLUSTERED (config_id),
- CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
- CONSTRAINT FK_clusterconfig_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
- CONSTRAINT UQ_config_type_tag UNIQUE (cluster_id, type_name, version_tag),
- CONSTRAINT UQ_config_type_version UNIQUE (cluster_id, type_name, version))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('serviceconfig') AND type = 'U')
-BEGIN
-CREATE TABLE serviceconfig (
- service_config_id BIGINT NOT NULL,
- cluster_id BIGINT NOT NULL,
- service_name VARCHAR(255) NOT NULL,
- version BIGINT NOT NULL,
- create_timestamp BIGINT NOT NULL,
- stack_id BIGINT NOT NULL,
- user_name VARCHAR(255) NOT NULL DEFAULT '_db',
- group_id BIGINT,
- note VARCHAR(MAX),
- CONSTRAINT PK_serviceconfig PRIMARY KEY CLUSTERED (service_config_id),
- CONSTRAINT FK_serviceconfig_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
- CONSTRAINT UQ_scv_service_version UNIQUE (cluster_id, service_name, version))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hosts') AND type = 'U')
-BEGIN
-CREATE TABLE hosts (
- host_id BIGINT NOT NULL,
- host_name VARCHAR(255) NOT NULL,
- cpu_count INTEGER NOT NULL,
- ph_cpu_count INTEGER,
- cpu_info VARCHAR(255) NOT NULL,
- discovery_status VARCHAR(2000) NOT NULL,
- host_attributes VARCHAR(MAX) NOT NULL,
- ipv4 VARCHAR(255),
- ipv6 VARCHAR(255),
- public_host_name VARCHAR(255),
- last_registration_time BIGINT NOT NULL,
- os_arch VARCHAR(255) NOT NULL,
- os_info VARCHAR(1000) NOT NULL,
- os_type VARCHAR(255) NOT NULL,
- rack_info VARCHAR(255) NOT NULL,
- total_mem BIGINT NOT NULL,
- CONSTRAINT PK_hosts PRIMARY KEY CLUSTERED (host_id),
- CONSTRAINT UQ_hosts_host_name UNIQUE (host_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('serviceconfighosts') AND type = 'U')
-BEGIN
-CREATE TABLE serviceconfighosts (
- service_config_id BIGINT NOT NULL,
- host_id BIGINT NOT NULL,
- CONSTRAINT PK_serviceconfighosts PRIMARY KEY CLUSTERED (service_config_id, host_id),
- CONSTRAINT FK_scvhosts_host_id FOREIGN KEY (host_id) REFERENCES hosts(host_id),
- CONSTRAINT FK_scvhosts_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('serviceconfigmapping') AND type = 'U')
-BEGIN
-CREATE TABLE serviceconfigmapping (
- service_config_id BIGINT NOT NULL,
- config_id BIGINT NOT NULL,
- CONSTRAINT PK_serviceconfigmapping PRIMARY KEY CLUSTERED (service_config_id, config_id),
- CONSTRAINT FK_scvm_config FOREIGN KEY (config_id) REFERENCES clusterconfig(config_id),
- CONSTRAINT FK_scvm_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusterservices') AND type = 'U')
-BEGIN
-CREATE TABLE clusterservices (
- service_name VARCHAR(255) NOT NULL,
- cluster_id BIGINT NOT NULL,
- service_enabled INT NOT NULL,
- CONSTRAINT PK_clusterservices PRIMARY KEY CLUSTERED (service_name, cluster_id),
- CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusterstate') AND type = 'U')
-BEGIN
-CREATE TABLE clusterstate (
- cluster_id BIGINT NOT NULL,
- current_cluster_state VARCHAR(255) NOT NULL,
- current_stack_id BIGINT NOT NULL,
- CONSTRAINT PK_clusterstate PRIMARY KEY CLUSTERED (cluster_id),
- CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
- CONSTRAINT FK_cs_current_stack_id FOREIGN KEY (current_stack_id) REFERENCES stack(stack_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('repo_version') AND type = 'U')
-BEGIN
-CREATE TABLE repo_version (
- repo_version_id BIGINT NOT NULL,
- stack_id BIGINT NOT NULL,
- version VARCHAR(255) NOT NULL,
- display_name VARCHAR(128) NOT NULL,
- repositories VARCHAR(MAX) NOT NULL,
- repo_type VARCHAR(255) DEFAULT 'STANDARD' NOT NULL,
- hidden SMALLINT NOT NULL DEFAULT 0,
- resolved BIT NOT NULL DEFAULT 0,
- version_url VARCHAR(1024),
- version_xml VARCHAR(MAX),
- version_xsd VARCHAR(512),
- parent_id BIGINT,
- CONSTRAINT PK_repo_version PRIMARY KEY CLUSTERED (repo_version_id),
- CONSTRAINT FK_repoversion_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
- CONSTRAINT UQ_repo_version_display_name UNIQUE (display_name),
- CONSTRAINT UQ_repo_version_stack_id UNIQUE (stack_id, version))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('servicecomponentdesiredstate') AND type = 'U')
-BEGIN
-CREATE TABLE servicecomponentdesiredstate (
- id BIGINT NOT NULL,
- component_name VARCHAR(255) NOT NULL,
- cluster_id BIGINT NOT NULL,
- desired_repo_version_id BIGINT NOT NULL,
- desired_state VARCHAR(255) NOT NULL,
- service_name VARCHAR(255) NOT NULL,
- recovery_enabled SMALLINT NOT NULL DEFAULT 0,
- repo_state VARCHAR(255) NOT NULL DEFAULT 'NOT_REQUIRED',
- CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
- CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
- CONSTRAINT FK_scds_desired_repo_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
- CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostcomponentdesiredstate') AND type = 'U')
-BEGIN
-CREATE TABLE hostcomponentdesiredstate (
- id BIGINT NOT NULL,
- cluster_id BIGINT NOT NULL,
- component_name VARCHAR(255) NOT NULL,
- desired_state VARCHAR(255) NOT NULL,
- host_id BIGINT NOT NULL,
- service_name VARCHAR(255) NOT NULL,
- admin_state VARCHAR(32),
- maintenance_state VARCHAR(32) NOT NULL,
- security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
- restart_required BIT NOT NULL DEFAULT 0,
- CONSTRAINT PK_hostcomponentdesiredstate PRIMARY KEY CLUSTERED (id),
- CONSTRAINT UQ_hcdesiredstate_name UNIQUE (component_name, service_name, host_id, cluster_id),
- CONSTRAINT FK_hcdesiredstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
- CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostcomponentstate') AND type = 'U')
-BEGIN
-CREATE TABLE hostcomponentstate (
- id BIGINT NOT NULL,
- cluster_id BIGINT NOT NULL,
- component_name VARCHAR(255) NOT NULL,
- version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN',
- current_state VARCHAR(255) NOT NULL,
- host_id BIGINT NOT NULL,
- service_name VARCHAR(255) NOT NULL,
- upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE',
- security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
- CONSTRAINT PK_hostcomponentstate PRIMARY KEY CLUSTERED (id),
- CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
- CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_host_component_state')
-BEGIN
-CREATE NONCLUSTERED INDEX idx_host_component_state on hostcomponentstate(host_id, component_name, service_name, cluster_id)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hoststate') AND type = 'U')
-BEGIN
-CREATE TABLE hoststate (
- agent_version VARCHAR(255) NOT NULL,
- available_mem BIGINT NOT NULL,
- current_state VARCHAR(255) NOT NULL,
- health_status VARCHAR(255),
- host_id BIGINT NOT NULL,
- time_in_state BIGINT NOT NULL,
- maintenance_state VARCHAR(512),
- CONSTRAINT PK_hoststate PRIMARY KEY CLUSTERED (host_id),
- CONSTRAINT FK_hoststate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('servicedesiredstate') AND type = 'U')
-BEGIN
-CREATE TABLE servicedesiredstate (
- cluster_id BIGINT NOT NULL,
- desired_host_role_mapping INTEGER NOT NULL,
- desired_repo_version_id BIGINT NOT NULL,
- desired_state VARCHAR(255) NOT NULL,
- service_name VARCHAR(255) NOT NULL,
- maintenance_state VARCHAR(32) NOT NULL,
- security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
- credential_store_enabled SMALLINT NOT NULL DEFAULT 0,
- CONSTRAINT PK_servicedesiredstate PRIMARY KEY CLUSTERED (cluster_id,service_name),
- CONSTRAINT FK_repo_version_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
- CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminprincipaltype') AND type = 'U')
-BEGIN
-CREATE TABLE adminprincipaltype (
- principal_type_id INTEGER NOT NULL,
- principal_type_name VARCHAR(255) NOT NULL,
- CONSTRAINT PK_adminprincipaltype PRIMARY KEY CLUSTERED (principal_type_id)
- )
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminprincipal') AND type = 'U')
-BEGIN
-CREATE TABLE adminprincipal (
- principal_id BIGINT NOT NULL,
- principal_type_id INTEGER NOT NULL,
- CONSTRAINT PK_adminprincipal PRIMARY KEY CLUSTERED (principal_id),
- CONSTRAINT FK_principal_principal_type_id FOREIGN KEY (principal_type_id) REFERENCES adminprincipaltype(principal_type_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('users') AND type = 'U')
-BEGIN
-CREATE TABLE users (
- user_id INTEGER,
- principal_id BIGINT NOT NULL,
- ldap_user INTEGER NOT NULL DEFAULT 0,
- user_name VARCHAR(255) NOT NULL,
- user_type VARCHAR(255) NOT NULL DEFAULT 'LOCAL',
- create_time DATETIME DEFAULT GETDATE(),
- user_password VARCHAR(255),
- active INTEGER NOT NULL DEFAULT 1,
- active_widget_layouts VARCHAR(1024) DEFAULT NULL,
- CONSTRAINT PK_users PRIMARY KEY CLUSTERED (user_id),
- CONSTRAINT FK_users_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
- CONSTRAINT UNQ_users_0 UNIQUE (user_name, user_type))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('groups') AND type = 'U')
-BEGIN
-CREATE TABLE groups (
- group_id INTEGER,
- principal_id BIGINT NOT NULL,
- group_name VARCHAR(255) NOT NULL,
- ldap_group INTEGER NOT NULL DEFAULT 0,
- group_type VARCHAR(255) NOT NULL DEFAULT 'LOCAL',
- CONSTRAINT PK_groups PRIMARY KEY CLUSTERED (group_id),
- CONSTRAINT FK_groups_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
- CONSTRAINT UNQ_groups_0 UNIQUE (group_name, ldap_group))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('members') AND type = 'U')
-BEGIN
-CREATE TABLE members (
- member_id INTEGER,
- group_id INTEGER NOT NULL,
- user_id INTEGER NOT NULL,
- CONSTRAINT PK_members PRIMARY KEY CLUSTERED (member_id),
- CONSTRAINT FK_members_group_id FOREIGN KEY (group_id) REFERENCES groups (group_id),
- CONSTRAINT FK_members_user_id FOREIGN KEY (user_id) REFERENCES users (user_id),
- CONSTRAINT UNQ_members_0 UNIQUE (group_id, user_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestschedule') AND type = 'U')
-BEGIN
-CREATE TABLE requestschedule (
- schedule_id BIGINT,
- cluster_id BIGINT NOT NULL,
- description VARCHAR(255),
- STATUS VARCHAR(255),
- batch_separation_seconds SMALLINT,
- batch_toleration_limit SMALLINT,
- authenticated_user_id INTEGER,
- create_user VARCHAR(255),
- create_timestamp BIGINT,
- update_user VARCHAR(255),
- update_timestamp BIGINT,
- minutes VARCHAR(10),
- hours VARCHAR(10),
- days_of_month VARCHAR(10),
- month VARCHAR(10),
- day_of_week VARCHAR(10),
- yearToSchedule VARCHAR(10),
- startTime VARCHAR(50),
- endTime VARCHAR(50),
- last_execution_status VARCHAR(255),
- CONSTRAINT PK_requestschedule PRIMARY KEY CLUSTERED (schedule_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('request') AND type = 'U')
-BEGIN
-CREATE TABLE request (
- request_id BIGINT NOT NULL,
- cluster_id BIGINT,
- command_name VARCHAR(255),
- create_time BIGINT NOT NULL,
- end_time BIGINT NOT NULL,
- exclusive_execution BIT NOT NULL DEFAULT 0,
- inputs VARBINARY(MAX),
- request_context VARCHAR(255),
- request_type VARCHAR(255),
- request_schedule_id BIGINT,
- start_time BIGINT NOT NULL,
- status VARCHAR(255),
- cluster_host_info VARBINARY(MAX) NOT NULL,
- CONSTRAINT PK_request PRIMARY KEY CLUSTERED (request_id),
- CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('stage') AND type = 'U')
-BEGIN
-CREATE TABLE stage (
- stage_id BIGINT NOT NULL,
- request_id BIGINT NOT NULL,
- cluster_id BIGINT NOT NULL,
- skippable SMALLINT DEFAULT 0 NOT NULL,
- supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
- log_info VARCHAR(255) NOT NULL,
- request_context VARCHAR(255),
- command_params VARBINARY(MAX),
- host_params VARBINARY(MAX),
- command_execution_type VARCHAR(32) NOT NULL DEFAULT 'STAGE',
- CONSTRAINT PK_stage PRIMARY KEY CLUSTERED (stage_id, request_id),
- CONSTRAINT FK_stage_request_id FOREIGN KEY (request_id) REFERENCES request (request_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('host_role_command') AND type = 'U')
-BEGIN
-CREATE TABLE host_role_command (
- task_id BIGINT NOT NULL,
- attempt_count SMALLINT NOT NULL,
- retry_allowed SMALLINT DEFAULT 0 NOT NULL,
- event VARCHAR(MAX) NOT NULL,
- exitcode INTEGER NOT NULL,
- host_id BIGINT,
- last_attempt_time BIGINT NOT NULL,
- request_id BIGINT NOT NULL,
- role VARCHAR(255),
- stage_id BIGINT NOT NULL,
- start_time BIGINT NOT NULL,
- original_start_time BIGINT NOT NULL,
- end_time BIGINT,
- status VARCHAR(255),
- auto_skip_on_failure SMALLINT DEFAULT 0 NOT NULL,
- std_error VARBINARY(max),
- std_out VARBINARY(max),
- output_log VARCHAR(255) NULL,
- error_log VARCHAR(255) NULL,
- structured_out VARBINARY(max),
- role_command VARCHAR(255),
- command_detail VARCHAR(255),
- custom_command_name VARCHAR(255),
- is_background SMALLINT DEFAULT 0 NOT NULL,
- CONSTRAINT PK_host_role_command PRIMARY KEY CLUSTERED (task_id),
- CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
- CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('execution_command') AND type = 'U')
-BEGIN
-CREATE TABLE execution_command (
- command VARBINARY(MAX),
- task_id BIGINT NOT NULL,
- CONSTRAINT PK_execution_command PRIMARY KEY CLUSTERED (task_id),
- CONSTRAINT FK_execution_command_task_id FOREIGN KEY (task_id) REFERENCES host_role_command (task_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('role_success_criteria') AND type = 'U')
-BEGIN
-CREATE TABLE role_success_criteria (
- ROLE VARCHAR(255) NOT NULL,
- request_id BIGINT NOT NULL,
- stage_id BIGINT NOT NULL,
- success_factor FLOAT NOT NULL,
- CONSTRAINT PK_role_success_criteria PRIMARY KEY CLUSTERED (ROLE, request_id, stage_id),
- CONSTRAINT role_success_criteria_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestresourcefilter') AND type = 'U')
-BEGIN
-CREATE TABLE requestresourcefilter (
- filter_id BIGINT NOT NULL,
- request_id BIGINT NOT NULL,
- service_name VARCHAR(255),
- component_name VARCHAR(255),
- hosts VARBINARY(MAX),
- CONSTRAINT PK_requestresourcefilter PRIMARY KEY CLUSTERED (filter_id),
- CONSTRAINT FK_reqresfilter_req_id FOREIGN KEY (request_id) REFERENCES request (request_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestoperationlevel') AND type = 'U')
-BEGIN
-CREATE TABLE requestoperationlevel (
- operation_level_id BIGINT NOT NULL,
- request_id BIGINT NOT NULL,
- level_name VARCHAR(255),
- cluster_name VARCHAR(255),
- service_name VARCHAR(255),
- host_component_name VARCHAR(255),
- host_id BIGINT NULL, -- unlike most host_id columns, this one allows NULLs because the request can be at the service level
- CONSTRAINT PK_requestoperationlevel PRIMARY KEY CLUSTERED (operation_level_id),
- CONSTRAINT FK_req_op_level_req_id FOREIGN KEY (request_id) REFERENCES request (request_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('ClusterHostMapping') AND type = 'U')
-BEGIN
-CREATE TABLE ClusterHostMapping (
- cluster_id BIGINT NOT NULL,
- host_id BIGINT NOT NULL,
- CONSTRAINT PK_ClusterHostMapping PRIMARY KEY CLUSTERED (cluster_id, host_id),
- CONSTRAINT FK_clhostmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
- CONSTRAINT FK_clusterhostmapping_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('key_value_store') AND type = 'U')
-BEGIN
-CREATE TABLE key_value_store (
- [key] VARCHAR(255),
- [value] VARCHAR(MAX),
- CONSTRAINT PK_key_value_store PRIMARY KEY CLUSTERED ([key])
- )
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostconfigmapping') AND type = 'U')
-BEGIN
-CREATE TABLE hostconfigmapping (
- cluster_id BIGINT NOT NULL,
- host_id BIGINT NOT NULL,
- type_name VARCHAR(255) NOT NULL,
- version_tag VARCHAR(255) NOT NULL,
- service_name VARCHAR(255),
- create_timestamp BIGINT NOT NULL,
- selected INTEGER NOT NULL DEFAULT 0,
- user_name VARCHAR(255) NOT NULL DEFAULT '_db',
- CONSTRAINT PK_hostconfigmapping PRIMARY KEY CLUSTERED (cluster_id, host_id, type_name, create_timestamp),
- CONSTRAINT FK_hostconfmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
- CONSTRAINT FK_hostconfmapping_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('metainfo') AND type = 'U')
-BEGIN
-CREATE TABLE metainfo (
- [metainfo_key] VARCHAR(255),
- [metainfo_value] VARCHAR(255),
- CONSTRAINT PK_metainfo PRIMARY KEY CLUSTERED ([metainfo_key])
- )
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('ambari_sequences') AND type = 'U')
-BEGIN
-CREATE TABLE ambari_sequences (
- sequence_name VARCHAR(255),
- [sequence_value] BIGINT NOT NULL,
- CONSTRAINT PK_ambari_sequences PRIMARY KEY (sequence_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('configgroup') AND type = 'U')
-BEGIN
-CREATE TABLE configgroup (
- group_id BIGINT,
- cluster_id BIGINT NOT NULL,
- group_name VARCHAR(255) NOT NULL,
- tag VARCHAR(1024) NOT NULL,
- description VARCHAR(1024),
- create_timestamp BIGINT NOT NULL,
- service_name VARCHAR(255),
- CONSTRAINT PK_configgroup PRIMARY KEY CLUSTERED (group_id),
- CONSTRAINT FK_configgroup_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('confgroupclusterconfigmapping') AND type = 'U')
-BEGIN
-CREATE TABLE confgroupclusterconfigmapping (
- config_group_id BIGINT NOT NULL,
- cluster_id BIGINT NOT NULL,
- config_type VARCHAR(255) NOT NULL,
- version_tag VARCHAR(255) NOT NULL,
- user_name VARCHAR(255) DEFAULT '_db',
- create_timestamp BIGINT NOT NULL,
- CONSTRAINT PK_confgroupclustercfgmapping PRIMARY KEY CLUSTERED (config_group_id, cluster_id, config_type),
- CONSTRAINT FK_cgccm_gid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id),
- CONSTRAINT FK_confg FOREIGN KEY (cluster_id, config_type, version_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('configgrouphostmapping') AND type = 'U')
-BEGIN
-CREATE TABLE configgrouphostmapping (
- config_group_id BIGINT NOT NULL,
- host_id BIGINT NOT NULL,
- CONSTRAINT PK_configgrouphostmapping PRIMARY KEY CLUSTERED (config_group_id, host_id),
- CONSTRAINT FK_cghm_cgid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id),
- CONSTRAINT FK_cghm_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestschedulebatchrequest') AND type = 'U')
-BEGIN
-CREATE TABLE requestschedulebatchrequest (
- schedule_id BIGINT,
- batch_id BIGINT,
- request_id BIGINT,
- request_type VARCHAR(255),
- request_uri VARCHAR(1024),
- request_body VARBINARY(MAX),
- request_status VARCHAR(255),
- return_code SMALLINT,
- return_message TEXT,
- CONSTRAINT PK_requestschedulebatchrequest PRIMARY KEY CLUSTERED (schedule_id, batch_id),
- CONSTRAINT FK_rsbatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES requestschedule (schedule_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('blueprint') AND type = 'U')
-BEGIN
-CREATE TABLE blueprint (
- blueprint_name VARCHAR(255) NOT NULL,
- stack_id BIGINT NOT NULL,
- security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
- security_descriptor_reference VARCHAR(255),
- CONSTRAINT PK_blueprint PRIMARY KEY CLUSTERED (blueprint_name),
- CONSTRAINT FK_blueprint_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostgroup') AND type = 'U')
-BEGIN
-CREATE TABLE hostgroup (
- blueprint_name VARCHAR(255) NOT NULL,
- NAME VARCHAR(255) NOT NULL,
- cardinality VARCHAR(255) NOT NULL,
- CONSTRAINT PK_hostgroup PRIMARY KEY CLUSTERED (blueprint_name, NAME),
- CONSTRAINT FK_hg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostgroup_component') AND type = 'U')
-BEGIN
-CREATE TABLE hostgroup_component (
- blueprint_name VARCHAR(255) NOT NULL,
- hostgroup_name VARCHAR(255) NOT NULL,
- NAME VARCHAR(255) NOT NULL,
- provision_action VARCHAR(255),
- CONSTRAINT PK_hostgroup_component PRIMARY KEY CLUSTERED (blueprint_name, hostgroup_name, NAME),
- CONSTRAINT FK_hgc_blueprint_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup (blueprint_name, name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('blueprint_configuration') AND type = 'U')
-BEGIN
-CREATE TABLE blueprint_configuration (
- blueprint_name VARCHAR(255) NOT NULL,
- type_name VARCHAR(255) NOT NULL,
- config_data VARCHAR(MAX) NOT NULL,
- config_attributes VARCHAR(MAX),
- CONSTRAINT PK_blueprint_configuration PRIMARY KEY CLUSTERED (blueprint_name, type_name),
- CONSTRAINT FK_cfg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('blueprint_setting') AND type = 'U')
-BEGIN
-CREATE TABLE blueprint_setting (
- id BIGINT NOT NULL,
- blueprint_name VARCHAR(255) NOT NULL,
- setting_name VARCHAR(255) NOT NULL,
- setting_data TEXT NOT NULL,
- CONSTRAINT PK_blueprint_setting PRIMARY KEY (id),
- CONSTRAINT UQ_blueprint_setting_name UNIQUE(blueprint_name,setting_name),
- CONSTRAINT FK_blueprint_setting_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name)
- )
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostgroup_configuration') AND type = 'U')
-BEGIN
-CREATE TABLE hostgroup_configuration (
- blueprint_name VARCHAR(255) NOT NULL,
- hostgroup_name VARCHAR(255) NOT NULL,
- type_name VARCHAR(255) NOT NULL,
- config_data VARCHAR(MAX) NOT NULL,
- config_attributes VARCHAR(MAX),
- CONSTRAINT PK_hostgroup_configuration PRIMARY KEY CLUSTERED (blueprint_name, hostgroup_name, type_name),
- CONSTRAINT FK_hg_cfg_bp_hg_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup (blueprint_name, name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewmain') AND type = 'U')
-BEGIN
-CREATE TABLE viewmain (
- view_name VARCHAR(255) NOT NULL,
- label VARCHAR(255),
- description VARCHAR(2048),
- version VARCHAR(255),
- build VARCHAR(128),
- resource_type_id INTEGER NOT NULL,
- icon VARCHAR(255),
- icon64 VARCHAR(255),
- archive VARCHAR(255),
- mask VARCHAR(255),
- system_view BIT NOT NULL DEFAULT 0,
- CONSTRAINT PK_viewmain PRIMARY KEY CLUSTERED (view_name),
- CONSTRAINT FK_view_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id))
-END
-
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewurl') AND type = 'U')
-BEGIN
-CREATE table viewurl(
- url_id BIGINT ,
- url_name VARCHAR(255) NOT NULL ,
- url_suffix VARCHAR(255) NOT NULL,
- CONSTRAINT PK_viewurl PRIMARY KEY CLUSTERED (url_id)
-)
-END
-
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewinstance') AND type = 'U')
-BEGIN
-CREATE TABLE viewinstance (
- view_instance_id BIGINT,
- resource_id BIGINT NOT NULL,
- view_name VARCHAR(255) NOT NULL,
- NAME VARCHAR(255) NOT NULL,
- label VARCHAR(255),
- description VARCHAR(2048),
- visible CHAR(1),
- icon VARCHAR(255),
- icon64 VARCHAR(255),
- xml_driven CHAR(1),
- alter_names BIT NOT NULL DEFAULT 1,
- cluster_handle BIGINT,
- cluster_type VARCHAR(100) NOT NULL DEFAULT 'LOCAL_AMBARI',
- short_url BIGINT,
- CONSTRAINT PK_viewinstance PRIMARY KEY CLUSTERED (view_instance_id),
- CONSTRAINT FK_instance_url_id FOREIGN KEY (short_url) REFERENCES viewurl(url_id),
- CONSTRAINT FK_viewinst_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name),
- CONSTRAINT FK_viewinstance_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id),
- CONSTRAINT UQ_viewinstance_name UNIQUE (view_name, name),
- CONSTRAINT UQ_viewinstance_name_id UNIQUE (view_instance_id, view_name, name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewinstancedata') AND type = 'U')
-BEGIN
-CREATE TABLE viewinstancedata (
- view_instance_id BIGINT,
- view_name VARCHAR(255) NOT NULL,
- view_instance_name VARCHAR(255) NOT NULL,
- NAME VARCHAR(255) NOT NULL,
- user_name VARCHAR(255) NOT NULL,
- value VARCHAR(2000) NOT NULL,
- CONSTRAINT PK_viewinstancedata PRIMARY KEY CLUSTERED (view_instance_id, NAME, user_name),
- CONSTRAINT FK_viewinstdata_view_name FOREIGN KEY (view_instance_id, view_name, view_instance_name) REFERENCES viewinstance(view_instance_id, view_name, name))
-END
-
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewinstanceproperty') AND type = 'U')
-BEGIN
-CREATE TABLE viewinstanceproperty (
- view_name VARCHAR(255) NOT NULL,
- view_instance_name VARCHAR(255) NOT NULL,
- NAME VARCHAR(255) NOT NULL,
- value VARCHAR(2000),
- CONSTRAINT PK_viewinstanceproperty PRIMARY KEY CLUSTERED (view_name, view_instance_name, NAME),
- CONSTRAINT FK_viewinstprop_view_name FOREIGN KEY (view_name, view_instance_name) REFERENCES viewinstance(view_name, name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewparameter') AND type = 'U')
-BEGIN
-CREATE TABLE viewparameter (
- view_name VARCHAR(255) NOT NULL,
- NAME VARCHAR(255) NOT NULL,
- description VARCHAR(2048),
- label VARCHAR(255),
- placeholder VARCHAR(255),
- default_value VARCHAR(2000),
- cluster_config VARCHAR(255),
- required CHAR(1),
- masked CHAR(1),
- CONSTRAINT PK_viewparameter PRIMARY KEY CLUSTERED (view_name, NAME),
- CONSTRAINT FK_viewparam_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewresource') AND type = 'U')
-BEGIN
-CREATE TABLE viewresource (
- view_name VARCHAR(255) NOT NULL,
- NAME VARCHAR(255) NOT NULL,
- plural_name VARCHAR(255),
- id_property VARCHAR(255),
- subResource_names VARCHAR(255),
- provider VARCHAR(255),
- service VARCHAR(255),
- resource VARCHAR(255),
- CONSTRAINT PK_viewresource PRIMARY KEY CLUSTERED (view_name, NAME),
- CONSTRAINT FK_viewres_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewentity') AND type = 'U')
-BEGIN
-CREATE TABLE viewentity (
- id BIGINT NOT NULL,
- view_name VARCHAR(255) NOT NULL,
- view_instance_name VARCHAR(255) NOT NULL,
- class_name VARCHAR(255) NOT NULL,
- id_property VARCHAR(255),
- CONSTRAINT PK_viewentity PRIMARY KEY CLUSTERED (id),
- CONSTRAINT FK_viewentity_view_name FOREIGN KEY (view_name, view_instance_name) REFERENCES viewinstance(view_name, name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminpermission') AND type = 'U')
-BEGIN
-CREATE TABLE adminpermission (
- permission_id BIGINT NOT NULL,
- permission_name VARCHAR(255) NOT NULL,
- resource_type_id INTEGER NOT NULL,
- permission_label VARCHAR(255),
- principal_id BIGINT NOT NULL,
- sort_order SMALLINT NOT NULL DEFAULT 1,
- CONSTRAINT PK_adminpermission PRIMARY KEY CLUSTERED (permission_id),
- CONSTRAINT FK_permission_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id),
- CONSTRAINT FK_permission_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
- CONSTRAINT UQ_perm_name_resource_type_id UNIQUE (permission_name, resource_type_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('roleauthorization') AND type = 'U')
-BEGIN
-CREATE TABLE roleauthorization (
- authorization_id VARCHAR(100) NOT NULL,
- authorization_name VARCHAR(255) NOT NULL,
- CONSTRAINT PK_roleauthorization PRIMARY KEY (authorization_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('permission_roleauthorization') AND type = 'U')
-BEGIN
-CREATE TABLE permission_roleauthorization (
- permission_id BIGINT NOT NULL,
- authorization_id VARCHAR(100) NOT NULL,
- CONSTRAINT PK_permsn_roleauthorization PRIMARY KEY (permission_id, authorization_id),
- CONSTRAINT FK_permission_roleauth_aid FOREIGN KEY (authorization_id) REFERENCES roleauthorization(authorization_id),
- CONSTRAINT FK_permission_roleauth_pid FOREIGN KEY (permission_id) REFERENCES adminpermission(permission_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminprivilege') AND type = 'U')
-BEGIN
-CREATE TABLE adminprivilege (
- privilege_id BIGINT,
- permission_id BIGINT NOT NULL,
- resource_id BIGINT NOT NULL,
- principal_id BIGINT NOT NULL,
- CONSTRAINT PK_adminprivilege PRIMARY KEY CLUSTERED (privilege_id),
- CONSTRAINT FK_privilege_permission_id FOREIGN KEY (permission_id) REFERENCES adminpermission(permission_id),
- CONSTRAINT FK_privilege_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
- CONSTRAINT FK_privilege_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('host_version') AND type = 'U')
-BEGIN
-CREATE TABLE host_version (
- id BIGINT NOT NULL,
- repo_version_id BIGINT NOT NULL,
- host_id BIGINT NOT NULL,
- STATE VARCHAR(32) NOT NULL,
- CONSTRAINT PK_host_version PRIMARY KEY CLUSTERED (id),
- CONSTRAINT FK_host_version_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
- CONSTRAINT FK_host_version_repovers_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id),
- CONSTRAINT UQ_host_repo UNIQUE(host_id, repo_version_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('artifact') AND type = 'U')
-BEGIN
-CREATE TABLE artifact (
- artifact_name VARCHAR(255) NOT NULL,
- artifact_data TEXT NOT NULL,
- foreign_keys VARCHAR(255) NOT NULL,
- CONSTRAINT PK_artifact PRIMARY KEY CLUSTERED (artifact_name, foreign_keys)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('widget') AND type = 'U')
-BEGIN
-CREATE TABLE widget (
- id BIGINT NOT NULL,
- widget_name VARCHAR(255) NOT NULL,
- widget_type VARCHAR(255) NOT NULL,
- metrics TEXT,
- time_created BIGINT NOT NULL,
- author VARCHAR(255),
- description VARCHAR(2048),
- default_section_name VARCHAR(255),
- scope VARCHAR(255),
- widget_values VARCHAR(4000),
- properties VARCHAR(4000),
- cluster_id BIGINT NOT NULL,
- CONSTRAINT PK_widget PRIMARY KEY CLUSTERED (id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('widget_layout') AND type = 'U')
-BEGIN
-CREATE TABLE widget_layout (
- id BIGINT NOT NULL,
- layout_name VARCHAR(255) NOT NULL,
- section_name VARCHAR(255) NOT NULL,
- scope VARCHAR(255) NOT NULL,
- user_name VARCHAR(255) NOT NULL,
- display_name VARCHAR(255),
- cluster_id BIGINT NOT NULL,
- CONSTRAINT PK_widget_layout PRIMARY KEY CLUSTERED (id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('widget_layout_user_widget') AND type = 'U')
-BEGIN
-CREATE TABLE widget_layout_user_widget (
- widget_layout_id BIGINT NOT NULL,
- widget_id BIGINT NOT NULL,
- widget_order smallint,
- CONSTRAINT PK_widget_layout_user_widget PRIMARY KEY CLUSTERED (widget_layout_id, widget_id),
- CONSTRAINT FK_widget_id FOREIGN KEY (widget_id) REFERENCES widget(id),
- CONSTRAINT FK_widget_layout_id FOREIGN KEY (widget_layout_id) REFERENCES widget_layout(id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_request') AND type = 'U')
-BEGIN
-CREATE TABLE topology_request (
- id BIGINT NOT NULL,
- action VARCHAR(255) NOT NULL,
- cluster_id BIGINT NOT NULL,
- bp_name VARCHAR(100) NOT NULL,
- cluster_properties TEXT,
- cluster_attributes TEXT,
- description VARCHAR(1024),
- provision_action VARCHAR(255),
- CONSTRAINT PK_topology_request PRIMARY KEY CLUSTERED (id),
- CONSTRAINT FK_topology_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_hostgroup') AND type = 'U')
-BEGIN
-CREATE TABLE topology_hostgroup (
- id BIGINT NOT NULL,
- name VARCHAR(255) NOT NULL,
- group_properties TEXT,
- group_attributes TEXT,
- request_id BIGINT NOT NULL,
- CONSTRAINT PK_topology_hostgroup PRIMARY KEY CLUSTERED (id),
- CONSTRAINT FK_hostgroup_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_host_info') AND type = 'U')
-BEGIN
-CREATE TABLE topology_host_info (
- id BIGINT NOT NULL,
- group_id BIGINT NOT NULL,
- fqdn VARCHAR(255),
- host_id BIGINT,
- host_count INTEGER,
- predicate VARCHAR(2048),
- rack_info VARCHAR(255),
- CONSTRAINT PK_topology_host_info PRIMARY KEY CLUSTERED (id),
- CONSTRAINT FK_hostinfo_group_id FOREIGN KEY (group_id) REFERENCES topology_hostgroup(id),
- CONSTRAINT FK_hostinfo_host_id FOREIGN KEY (host_id) REFERENCES hosts(host_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_logical_request') AND type = 'U')
-BEGIN
-CREATE TABLE topology_logical_request (
- id BIGINT NOT NULL,
- request_id BIGINT NOT NULL,
- description VARCHAR(1024),
- CONSTRAINT PK_topology_logical_request PRIMARY KEY CLUSTERED (id),
- CONSTRAINT FK_logicalreq_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_host_request') AND type = 'U')
-BEGIN
-CREATE TABLE topology_host_request (
- id BIGINT NOT NULL,
- logical_request_id BIGINT NOT NULL,
- group_id BIGINT NOT NULL,
- stage_id BIGINT NOT NULL,
- host_name VARCHAR(255),
- CONSTRAINT PK_topology_host_request PRIMARY KEY CLUSTERED (id),
- CONSTRAINT FK_hostreq_group_id FOREIGN KEY (group_id) REFERENCES topology_hostgroup(id),
- CONSTRAINT FK_hostreq_logicalreq_id FOREIGN KEY (logical_request_id) REFERENCES topology_logical_request(id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_host_task') AND type = 'U')
-BEGIN
-CREATE TABLE topology_host_task (
- id BIGINT NOT NULL,
- host_request_id BIGINT NOT NULL,
- type VARCHAR(255) NOT NULL,
- CONSTRAINT PK_topology_host_task PRIMARY KEY CLUSTERED (id),
- CONSTRAINT FK_hosttask_req_id FOREIGN KEY (host_request_id) REFERENCES topology_host_request (id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_logical_task') AND type = 'U')
-BEGIN
-CREATE TABLE topology_logical_task (
- id BIGINT NOT NULL,
- host_task_id BIGINT NOT NULL,
- physical_task_id BIGINT,
- component VARCHAR(255) NOT NULL,
- CONSTRAINT PK_topology_logical_task PRIMARY KEY CLUSTERED (id),
- CONSTRAINT FK_ltask_hosttask_id FOREIGN KEY (host_task_id) REFERENCES topology_host_task (id),
- CONSTRAINT FK_ltask_hrc_id FOREIGN KEY (physical_task_id) REFERENCES host_role_command (task_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('setting') AND type = 'U')
-BEGIN
-CREATE TABLE setting (
- id BIGINT NOT NULL,
- name VARCHAR(255) NOT NULL UNIQUE,
- setting_type VARCHAR(255) NOT NULL,
- content TEXT NOT NULL,
- updated_by VARCHAR(255) NOT NULL DEFAULT '_db',
- update_timestamp BIGINT NOT NULL,
- CONSTRAINT PK_setting PRIMARY KEY (id)
-)
-END
-
-
--- Remote Cluster table
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('remoteambaricluster') AND type = 'U')
-BEGIN
-CREATE TABLE remoteambaricluster(
- cluster_id BIGINT NOT NULL,
- name VARCHAR(255) NOT NULL,
- username VARCHAR(255) NOT NULL,
- url VARCHAR(255) NOT NULL,
- password VARCHAR(255) NOT NULL,
- CONSTRAINT PK_remote_ambari_cluster PRIMARY KEY (cluster_id),
- CONSTRAINT UQ_remote_ambari_cluster UNIQUE (name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('remoteambariclusterservice') AND type = 'U')
-BEGIN
-CREATE TABLE remoteambariclusterservice(
- id BIGINT NOT NULL,
- cluster_id BIGINT NOT NULL,
- service_name VARCHAR(255) NOT NULL,
- CONSTRAINT PK_remote_ambari_service PRIMARY KEY (id),
- CONSTRAINT FK_remote_ambari_cluster_id FOREIGN KEY (cluster_id) REFERENCES remoteambaricluster(cluster_id)
-)
-END
-
-
--- Remote Cluster table ends
-
--- upgrade tables
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade') AND type = 'U')
-BEGIN
-CREATE TABLE upgrade (
- upgrade_id BIGINT NOT NULL,
- cluster_id BIGINT NOT NULL,
- request_id BIGINT NOT NULL,
- direction VARCHAR(255) DEFAULT 'UPGRADE' NOT NULL,
- orchestration VARCHAR(255) DEFAULT 'STANDARD' NOT NULL,
- upgrade_package VARCHAR(255) NOT NULL,
- upgrade_type VARCHAR(32) NOT NULL,
- repo_version_id BIGINT NOT NULL,
- skip_failures BIT NOT NULL DEFAULT 0,
- skip_sc_failures BIT NOT NULL DEFAULT 0,
- downgrade_allowed BIT NOT NULL DEFAULT 1,
- revert_allowed BIT NOT NULL DEFAULT 0,
- suspended BIT DEFAULT 0 NOT NULL,
- CONSTRAINT PK_upgrade PRIMARY KEY CLUSTERED (upgrade_id),
- FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
- FOREIGN KEY (request_id) REFERENCES request(request_id),
- FOREIGN KEY (repo_version_id) REFERENCES repo_version(repo_version_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade_group') AND type = 'U')
-BEGIN
-CREATE TABLE upgrade_group (
- upgrade_group_id BIGINT NOT NULL,
- upgrade_id BIGINT NOT NULL,
- group_name VARCHAR(255) DEFAULT '' NOT NULL,
- group_title VARCHAR(1024) DEFAULT '' NOT NULL,
- CONSTRAINT PK_upgrade_group PRIMARY KEY CLUSTERED (upgrade_group_id),
- FOREIGN KEY (upgrade_id) REFERENCES upgrade(upgrade_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade_item') AND type = 'U')
-BEGIN
-CREATE TABLE upgrade_item (
- upgrade_item_id BIGINT NOT NULL,
- upgrade_group_id BIGINT NOT NULL,
- stage_id BIGINT NOT NULL,
- state VARCHAR(255) DEFAULT 'NONE' NOT NULL,
- hosts TEXT,
- tasks TEXT,
- item_text TEXT,
- CONSTRAINT PK_upgrade_item PRIMARY KEY CLUSTERED (upgrade_item_id),
- FOREIGN KEY (upgrade_group_id) REFERENCES upgrade_group(upgrade_group_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade_history') AND type = 'U')
-BEGIN
-CREATE TABLE upgrade_history(
- id BIGINT NOT NULL,
- upgrade_id BIGINT NOT NULL,
- service_name VARCHAR(255) NOT NULL,
- component_name VARCHAR(255) NOT NULL,
- from_repo_version_id BIGINT NOT NULL,
- target_repo_version_id BIGINT NOT NULL,
- CONSTRAINT PK_upgrade_hist PRIMARY KEY (id),
- CONSTRAINT FK_upgrade_hist_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id),
- CONSTRAINT FK_upgrade_hist_from_repo FOREIGN KEY (from_repo_version_id) REFERENCES repo_version (repo_version_id),
- CONSTRAINT FK_upgrade_hist_target_repo FOREIGN KEY (target_repo_version_id) REFERENCES repo_version (repo_version_id),
- CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('servicecomponent_version') AND type = 'U')
-BEGIN
-CREATE TABLE servicecomponent_version(
- id BIGINT NOT NULL,
- component_id BIGINT NOT NULL,
- repo_version_id BIGINT NOT NULL,
- state VARCHAR(32) NOT NULL,
- user_name VARCHAR(255) NOT NULL,
- CONSTRAINT PK_sc_version PRIMARY KEY (id),
- CONSTRAINT FK_scv_component_id FOREIGN KEY (component_id) REFERENCES servicecomponentdesiredstate (id),
- CONSTRAINT FK_scv_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('ambari_operation_history') AND type = 'U')
-BEGIN
-CREATE TABLE ambari_operation_history(
- id BIGINT NOT NULL,
- from_version VARCHAR(255) NOT NULL,
- to_version VARCHAR(255) NOT NULL,
- start_time BIGINT NOT NULL,
- end_time BIGINT,
- operation_type VARCHAR(255) NOT NULL,
- comments TEXT,
- CONSTRAINT PK_ambari_operation_history PRIMARY KEY (id)
-)
-END
-
-
-
--- tasks indices --
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_stage_request_id')
-BEGIN
-CREATE INDEX idx_stage_request_id ON stage (request_id)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_hrc_request_id')
-BEGIN
-CREATE INDEX idx_hrc_request_id ON host_role_command (request_id)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_hrc_status_role')
-BEGIN
-CREATE INDEX idx_hrc_status_role ON host_role_command (status, role)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_rsc_request_id')
-BEGIN
-CREATE INDEX idx_rsc_request_id ON role_success_criteria (request_id)
-END
-
-
-
--- altering tables by creating unique constraints----------
---------altering tables to add constraints----------
-
--- altering tables by creating foreign keys----------
--- Note, Oracle has a limitation of 32 chars in the FK name, and we should use the same FK name in all DB types.
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('FK_clusters_upgrade_id') AND type = 'F')
-BEGIN
-ALTER TABLE clusters ADD CONSTRAINT FK_clusters_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id)
-END
-
-
--- Kerberos
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('kerberos_principal') AND type = 'U')
-BEGIN
-CREATE TABLE kerberos_principal (
- principal_name VARCHAR(255) NOT NULL,
- is_service SMALLINT NOT NULL DEFAULT 1,
- cached_keytab_path VARCHAR(255),
- CONSTRAINT PK_kerberos_principal PRIMARY KEY CLUSTERED (principal_name)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('kerberos_principal_host') AND type = 'U')
-BEGIN
-CREATE TABLE kerberos_principal_host (
- principal_name VARCHAR(255) NOT NULL,
- host_id BIGINT NOT NULL,
- CONSTRAINT PK_kerberos_principal_host PRIMARY KEY CLUSTERED (principal_name, host_id),
- CONSTRAINT FK_krb_pr_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
- CONSTRAINT FK_krb_pr_host_principalname FOREIGN KEY (principal_name) REFERENCES kerberos_principal (principal_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('kerberos_descriptor') AND type = 'U')
-BEGIN
-CREATE TABLE kerberos_descriptor
-(
- kerberos_descriptor_name VARCHAR(255) NOT NULL,
- kerberos_descriptor VARCHAR(MAX) NOT NULL,
- CONSTRAINT PK_kerberos_descriptor PRIMARY KEY (kerberos_descriptor_name)
-)
-END
-
-
--- Kerberos (end)
-
--- Alerting Framework
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_definition') AND type = 'U')
-BEGIN
-CREATE TABLE alert_definition (
- definition_id BIGINT NOT NULL,
- cluster_id BIGINT NOT NULL,
- definition_name VARCHAR(255) NOT NULL,
- service_name VARCHAR(255) NOT NULL,
- component_name VARCHAR(255),
- scope VARCHAR(255) DEFAULT 'ANY' NOT NULL,
- label VARCHAR(255),
- help_url VARCHAR(512),
- description TEXT,
- enabled SMALLINT DEFAULT 1 NOT NULL,
- schedule_interval INTEGER NOT NULL,
- source_type VARCHAR(255) NOT NULL,
- alert_source TEXT NOT NULL,
- hash VARCHAR(64) NOT NULL,
- ignore_host SMALLINT DEFAULT 0 NOT NULL,
- repeat_tolerance INTEGER DEFAULT 1 NOT NULL,
- repeat_tolerance_enabled SMALLINT DEFAULT 0 NOT NULL,
- CONSTRAINT PK_alert_definition PRIMARY KEY CLUSTERED (definition_id),
- FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
- CONSTRAINT uni_alert_def_name UNIQUE(cluster_id,definition_name)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_history') AND type = 'U')
-BEGIN
-CREATE TABLE alert_history (
- alert_id BIGINT NOT NULL,
- cluster_id BIGINT NOT NULL,
- alert_definition_id BIGINT NOT NULL,
- service_name VARCHAR(255) NOT NULL,
- component_name VARCHAR(255),
- host_name VARCHAR(255),
- alert_instance VARCHAR(255),
- alert_timestamp BIGINT NOT NULL,
- alert_label VARCHAR(1024),
- alert_state VARCHAR(255) NOT NULL,
- alert_text TEXT,
- CONSTRAINT PK_alert_history PRIMARY KEY CLUSTERED (alert_id),
- FOREIGN KEY (alert_definition_id) REFERENCES alert_definition(definition_id),
- FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_current') AND type = 'U')
-BEGIN
-CREATE TABLE alert_current (
- alert_id BIGINT NOT NULL,
- definition_id BIGINT NOT NULL,
- history_id BIGINT NOT NULL UNIQUE,
- maintenance_state VARCHAR(255) NOT NULL,
- original_timestamp BIGINT NOT NULL,
- latest_timestamp BIGINT NOT NULL,
- latest_text TEXT,
- occurrences BIGINT NOT NULL DEFAULT 1,
- firmness VARCHAR(255) NOT NULL DEFAULT 'HARD',
- CONSTRAINT PK_alert_current PRIMARY KEY CLUSTERED (alert_id),
- FOREIGN KEY (definition_id) REFERENCES alert_definition(definition_id),
- FOREIGN KEY (history_id) REFERENCES alert_history(alert_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_group') AND type = 'U')
-BEGIN
-CREATE TABLE alert_group (
- group_id BIGINT NOT NULL,
- cluster_id BIGINT NOT NULL,
- group_name VARCHAR(255) NOT NULL,
- is_default SMALLINT NOT NULL DEFAULT 0,
- service_name VARCHAR(255),
- CONSTRAINT PK_alert_group PRIMARY KEY CLUSTERED (group_id),
- CONSTRAINT uni_alert_group_name UNIQUE(cluster_id,group_name)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_target') AND type = 'U')
-BEGIN
-CREATE TABLE alert_target (
- target_id BIGINT NOT NULL,
- target_name VARCHAR(255) NOT NULL UNIQUE,
- notification_type VARCHAR(64) NOT NULL,
- properties TEXT,
- description VARCHAR(1024),
- is_global SMALLINT NOT NULL DEFAULT 0,
- is_enabled SMALLINT NOT NULL DEFAULT 1,
- CONSTRAINT PK_alert_target PRIMARY KEY CLUSTERED (target_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_target_states') AND type = 'U')
-BEGIN
-CREATE TABLE alert_target_states (
- target_id BIGINT NOT NULL,
- alert_state VARCHAR(255) NOT NULL,
- FOREIGN KEY (target_id) REFERENCES alert_target(target_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_group_target') AND type = 'U')
-BEGIN
-CREATE TABLE alert_group_target (
- group_id BIGINT NOT NULL,
- target_id BIGINT NOT NULL,
- CONSTRAINT PK_alert_group_target PRIMARY KEY CLUSTERED (group_id, target_id),
- FOREIGN KEY (group_id) REFERENCES alert_group(group_id),
- FOREIGN KEY (target_id) REFERENCES alert_target(target_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_grouping') AND type = 'U')
-BEGIN
-CREATE TABLE alert_grouping (
- definition_id BIGINT NOT NULL,
- group_id BIGINT NOT NULL,
- CONSTRAINT PK_alert_grouping PRIMARY KEY CLUSTERED (group_id, definition_id),
- FOREIGN KEY (definition_id) REFERENCES alert_definition(definition_id),
- FOREIGN KEY (group_id) REFERENCES alert_group(group_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_notice') AND type = 'U')
-BEGIN
-CREATE TABLE alert_notice (
- notification_id BIGINT NOT NULL,
- target_id BIGINT NOT NULL,
- history_id BIGINT NOT NULL,
- notify_state VARCHAR(255) NOT NULL,
- uuid VARCHAR(64) NOT NULL UNIQUE,
- CONSTRAINT PK_alert_notice PRIMARY KEY CLUSTERED (notification_id),
- FOREIGN KEY (target_id) REFERENCES alert_target(target_id),
- FOREIGN KEY (history_id) REFERENCES alert_history(alert_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_def_id')
-BEGIN
-CREATE INDEX idx_alert_history_def_id on alert_history(alert_definition_id)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_service')
-BEGIN
-CREATE INDEX idx_alert_history_service on alert_history(service_name)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_host')
-BEGIN
-CREATE INDEX idx_alert_history_host on alert_history(host_name)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_time')
-BEGIN
-CREATE INDEX idx_alert_history_time on alert_history(alert_timestamp)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_state')
-BEGIN
-CREATE INDEX idx_alert_history_state on alert_history(alert_state)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_group_name')
-BEGIN
-CREATE INDEX idx_alert_group_name on alert_group(group_name)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_notice_state')
-BEGIN
-CREATE INDEX idx_alert_notice_state on alert_notice(notify_state)
-END
-
-
----------inserting some data-----------
-BEGIN TRANSACTION
- DELETE metainfo;
- DELETE adminprivilege;
- DELETE permission_roleauthorization;
- DELETE roleauthorization;
- DELETE adminpermission;
- DELETE users;
- DELETE adminprincipal;
- DELETE adminprincipaltype;
- DELETE adminresource;
- DELETE adminresourcetype;
- DELETE ambari_sequences;
- INSERT INTO ambari_sequences (sequence_name, [sequence_value])
- VALUES
- ('cluster_id_seq', 1),
- ('host_id_seq', 0),
- ('user_id_seq', 2),
- ('group_id_seq', 1),
- ('member_id_seq', 1),
- ('host_role_command_id_seq', 1),
- ('configgroup_id_seq', 1),
- ('requestschedule_id_seq', 1),
- ('resourcefilter_id_seq', 1),
- ('viewentity_id_seq', 0),
- ('operation_level_id_seq', 1),
- ('view_instance_id_seq', 1),
- ('resource_type_id_seq', 4),
- ('resource_id_seq', 2),
- ('principal_type_id_seq', 8),
- ('principal_id_seq', 13),
- ('permission_id_seq', 7),
- ('privilege_id_seq', 1),
- ('alert_definition_id_seq', 0),
- ('alert_group_id_seq', 0),
- ('alert_target_id_seq', 0),
- ('alert_history_id_seq', 0),
- ('alert_notice_id_seq', 0),
- ('alert_current_id_seq', 0),
- ('config_id_seq', 11),
- ('repo_version_id_seq', 0),
- ('host_version_id_seq', 0),
- ('service_config_id_seq', 1),
- ('upgrade_id_seq', 0),
- ('upgrade_group_id_seq', 0),
- ('widget_id_seq', 0),
- ('widget_layout_id_seq', 0),
- ('upgrade_item_id_seq', 0),
- ('stack_id_seq', 0),
- ('extension_id_seq', 0),
- ('link_id_seq', 0),
- ('topology_host_info_id_seq', 0),
- ('topology_host_request_id_seq', 0),
- ('topology_host_task_id_seq', 0),
- ('topology_logical_request_id_seq', 0),
- ('topology_logical_task_id_seq', 0),
- ('topology_request_id_seq', 0),
- ('topology_host_group_id_seq', 0),
- ('setting_id_seq', 0),
- ('hostcomponentstate_id_seq', 0),
- ('servicecomponentdesiredstate_id_seq', 0),
- ('upgrade_history_id_seq', 0),
- ('blueprint_setting_id_seq', 0),
- ('ambari_operation_history_id_seq', 0),
- ('remote_cluster_id_seq', 0),
- ('remote_cluster_service_id_seq', 0),
- ('servicecomponent_version_id_seq', 0),
- ('hostcomponentdesiredstate_id_seq', 0)
-
- insert into adminresourcetype (resource_type_id, resource_type_name)
- values
- (1, 'AMBARI'),
- (2, 'CLUSTER'),
- (3, 'VIEW')
-
- insert into adminresource (resource_id, resource_type_id)
- select 1, 1
-
- insert into adminprincipaltype (principal_type_id, principal_type_name)
- values
- (1, 'USER'),
- (2, 'GROUP'),
- (8, 'ROLE')
-
- insert into adminprincipal (principal_id, principal_type_id)
- values
- (1, 1),
- (7, 8),
- (8, 8),
- (9, 8),
- (10, 8),
- (11, 8),
- (12, 8),
- (13, 8)
-
- insert into users(user_id, principal_id, user_name, user_password)
- select 1, 1, 'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00'
-
- insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label, principal_id, sort_order)
- values
- (1, 'AMBARI.ADMINISTRATOR', 1, 'Ambari Administrator', 7, 1),
- (2, 'CLUSTER.USER', 2, 'Cluster User', 8, 6),
- (3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator', 9, 2),
- (4, 'VIEW.USER', 3, 'View User', 10, 7),
- (5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator', 11, 3),
- (6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator', 12, 4),
- (7, 'SERVICE.OPERATOR', 2, 'Service Operator', 13, 5)
-
- INSERT INTO roleauthorization(authorization_id, authorization_name)
- SELECT 'VIEW.USE', 'Use View' UNION ALL
- SELECT 'SERVICE.VIEW_METRICS', 'View metrics' UNION ALL
- SELECT 'SERVICE.VIEW_STATUS_INFO', 'View status information' UNION ALL
- SELECT 'SERVICE.VIEW_CONFIGS', 'View configurations' UNION ALL
- SELECT 'SERVICE.COMPARE_CONFIGS', 'Compare configurations' UNION ALL
- SELECT 'SERVICE.VIEW_ALERTS', 'View service-level alerts' UNION ALL
- SELECT 'SERVICE.START_STOP', 'Start/Stop/Restart Service' UNION ALL
- SELECT 'SERVICE.DECOMMISSION_RECOMMISSION', 'Decommission/recommission' UNION ALL
- SELECT 'SERVICE.RUN_SERVICE_CHECK', 'Run service checks' UNION ALL
- SELECT 'SERVICE.TOGGLE_MAINTENANCE', 'Turn on/off maintenance mode' UNION ALL
- SELECT 'SERVICE.RUN_CUSTOM_COMMAND', 'Perform service-specific tasks' UNION ALL
- SELECT 'SERVICE.MODIFY_CONFIGS', 'Modify configurations' UNION ALL
- SELECT 'SERVICE.MANAGE_ALERTS', 'Manage service-level alerts' UNION ALL
- SELECT 'SERVICE.MANAGE_CONFIG_GROUPS', 'Manage configuration groups' UNION ALL
- SELECT 'SERVICE.MOVE', 'Move service to another host' UNION ALL
- SELECT 'SERVICE.ENABLE_HA', 'Enable HA' UNION ALL
- SELECT 'SERVICE.TOGGLE_ALERTS', 'Enable/disable service-level alerts' UNION ALL
- SELECT 'SERVICE.ADD_DELETE_SERVICES', 'Add/delete services' UNION ALL
- SELECT 'SERVICE.VIEW_OPERATIONAL_LOGS', 'View service operational logs' UNION ALL
- SELECT 'SERVICE.SET_SERVICE_USERS_GROUPS', 'Set service users and groups' UNION ALL
- SELECT 'SERVICE.MANAGE_AUTO_START', 'Manage service auto-start' UNION ALL
- SELECT 'HOST.VIEW_METRICS', 'View metrics' UNION ALL
- SELECT 'HOST.VIEW_STATUS_INFO', 'View status information' UNION ALL
- SELECT 'HOST.VIEW_CONFIGS', 'View configuration' UNION ALL
- SELECT 'HOST.TOGGLE_MAINTENANCE', 'Turn on/off maintenance mode' UNION ALL
- SELECT 'HOST.ADD_DELETE_COMPONENTS', 'Install components' UNION ALL
- SELECT 'HOST.ADD_DELETE_HOSTS', 'Add/Delete hosts' UNION ALL
- SELECT 'CLUSTER.VIEW_METRICS', 'View metrics' UNION ALL
- SELECT 'CLUSTER.VIEW_STATUS_INFO', 'View status information' UNION ALL
- SELECT 'CLUSTER.VIEW_CONFIGS', 'View configuration' UNION ALL
- SELECT 'CLUSTER.VIEW_STACK_DETAILS', 'View stack version details' UNION ALL
- SELECT 'CLUSTER.VIEW_ALERTS', 'View cluster-level alerts' UNION ALL
- SELECT 'CLUSTER.MANAGE_CREDENTIALS', 'Manage external credentials' UNION ALL
- SELECT 'CLUSTER.MODIFY_CONFIGS', 'Modify cluster configurations' UNION ALL
- SELECT 'CLUSTER.MANAGE_ALERTS', 'Manage cluster-level alerts' UNION ALL
- SELECT 'CLUSTER.MANAGE_USER_PERSISTED_DATA', 'Manage cluster-level user persisted data' UNION ALL
- SELECT 'CLUSTER.TOGGLE_ALERTS', 'Enable/disable cluster-level alerts' UNION ALL
- SELECT 'CLUSTER.MANAGE_CONFIG_GROUPS', 'Manage cluster config groups' UNION ALL
- SELECT 'CLUSTER.TOGGLE_KERBEROS', 'Enable/disable Kerberos' UNION ALL
- SELECT 'CLUSTER.UPGRADE_DOWNGRADE_STACK', 'Upgrade/downgrade stack' UNION ALL
- SELECT 'CLUSTER.RUN_CUSTOM_COMMAND', 'Perform custom cluster-level actions' UNION ALL
- SELECT 'CLUSTER.MANAGE_AUTO_START', 'Manage service auto-start configuration' UNION ALL
- SELECT 'CLUSTER.MANAGE_ALERT_NOTIFICATIONS', 'Manage alert notifications configuration' UNION ALL
- SELECT 'AMBARI.ADD_DELETE_CLUSTERS', 'Create new clusters' UNION ALL
- SELECT 'AMBARI.RENAME_CLUSTER', 'Rename clusters' UNION ALL
- SELECT 'AMBARI.MANAGE_SETTINGS', 'Manage settings' UNION ALL
- SELECT 'AMBARI.MANAGE_USERS', 'Manage users' UNION ALL
- SELECT 'AMBARI.MANAGE_GROUPS', 'Manage groups' UNION ALL
- SELECT 'AMBARI.MANAGE_VIEWS', 'Manage Ambari Views' UNION ALL
- SELECT 'AMBARI.ASSIGN_ROLES', 'Assign roles' UNION ALL
- SELECT 'AMBARI.MANAGE_STACK_VERSIONS', 'Manage stack versions' UNION ALL
- SELECT 'AMBARI.EDIT_STACK_REPOS', 'Edit stack repository URLs' UNION ALL
- SELECT 'AMBARI.RUN_CUSTOM_COMMAND', 'Perform custom administrative actions'
-
- -- Set authorizations for View User role
- INSERT INTO permission_roleauthorization(permission_id, authorization_id)
- SELECT permission_id, 'VIEW.USE' FROM adminpermission WHERE permission_name='VIEW.USER'
-
- -- Set authorizations for Cluster User role
- INSERT INTO permission_roleauthorization(permission_id, authorization_id)
- SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='CLUSTER.USER'
-
- -- Set authorizations for Service Operator role
- INSERT INTO permission_roleauthorization(permission_id, authorization_id)
- SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR'
-
- -- Set authorizations for Service Administrator role
- INSERT INTO permission_roleauthorization(permission_id, authorization_id)
- SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR'
-
- -- Set authorizations for Cluster Operator role
- INSERT INTO permission_roleauthorization(permission_id, authorization_id)
- SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MOVE' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.ENABLE_HA' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'HOST.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'HOST.ADD_DELETE_COMPONENTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'HOST.ADD_DELETE_HOSTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_CREDENTIALS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR'
-
- -- Set authorizations for Cluster Administrator role
- INSERT INTO permission_roleauthorization(permission_id, authorization_id)
- SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MOVE' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.ENABLE_HA' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.ADD_DELETE_SERVICES' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.SET_SERVICE_USERS_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.ADD_DELETE_COMPONENTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.ADD_DELETE_HOSTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_CREDENTIALS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.TOGGLE_KERBEROS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.UPGRADE_DOWNGRADE_STACK' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_ALERT_NOTIFICATIONS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR'
-
- -- Set authorizations for Administrator role
- INSERT INTO permission_roleauthorization(permission_id, authorization_id)
- SELECT permission_id, 'VIEW.USE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MOVE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.ENABLE_HA' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.ADD_DELETE_SERVICES' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.SET_SERVICE_USERS_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.ADD_DELETE_COMPONENTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'HOST.ADD_DELETE_HOSTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_CREDENTIALS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.TOGGLE_KERBEROS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.UPGRADE_DOWNGRADE_STACK' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.MANAGE_ALERT_NOTIFICATIONS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'CLUSTER.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'AMBARI.ADD_DELETE_CLUSTERS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'AMBARI.RENAME_CLUSTER' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'AMBARI.MANAGE_SETTINGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'AMBARI.MANAGE_USERS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'AMBARI.MANAGE_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'AMBARI.MANAGE_VIEWS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'AMBARI.ASSIGN_ROLES' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'AMBARI.MANAGE_STACK_VERSIONS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'AMBARI.EDIT_STACK_REPOS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
- SELECT permission_id, 'AMBARI.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR'
-
- insert into adminprivilege (privilege_id, permission_id, resource_id, principal_id)
- select 1, 1, 1, 1
-
- insert into metainfo(metainfo_key, metainfo_value)
- select 'version','${ambariSchemaVersion}'
-COMMIT TRANSACTION
-
--- Quartz tables
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('qrtz_job_details') AND type = 'U')
-BEGIN
-CREATE TABLE qrtz_job_details
- (
- SCHED_NAME VARCHAR(120) NOT NULL,
- JOB_NAME VARCHAR(200) NOT NULL,
- JOB_GROUP VARCHAR(200) NOT NULL,
- DESCRIPTION VARCHAR(250) NULL,
- JOB_CLASS_NAME VARCHAR(250) NOT NULL,
- IS_DURABLE BIT NOT NULL,
- IS_NONCONCURRENT BIT NOT NULL,
- IS_UPDATE_DATA BIT NOT NULL,
- REQUESTS_RECOVERY BIT NOT NULL,
- JOB_DATA VARBINARY(MAX) NULL,
- PRIMARY KEY CLUSTERED (SCHED_NAME,JOB_NAME,JOB_GROUP)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('qrtz_triggers') AND type = 'U')
-BEGIN
-CREATE TABLE qrtz_triggers
- (
- SCHED_NAME VARCHAR(120) NOT NULL,
- TRIGGER_NAME VARCHAR(200) NOT NULL,
- TRIGGER_GROUP VARCHAR(200) NOT NULL,
- JOB_NAME VARCHAR(200) NOT NULL,
- JOB_GROUP VARCHAR(200) NOT NULL,
- DESCRIPTION VARCHAR(
<TRUNCATED>
[04/31] ambari git commit: AMBARI-22168 Move service metrics to
separate tab. (atkach)
Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/test/views/main/service/info/summary_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/service/info/summary_test.js b/ambari-web/test/views/main/service/info/summary_test.js
index 41f2992..9498230 100644
--- a/ambari-web/test/views/main/service/info/summary_test.js
+++ b/ambari-web/test/views/main/service/info/summary_test.js
@@ -30,9 +30,7 @@ describe('App.MainServiceInfoSummaryView', function() {
id: 'HDFS',
serviceName: 'HDFS',
hostComponents: []
- }),
- getActiveWidgetLayout: Em.K,
- loadWidgetLayouts: Em.K
+ })
}),
alertsController: Em.Object.create(),
service: Em.Object.create()
@@ -523,281 +521,4 @@ describe('App.MainServiceInfoSummaryView', function() {
)).to.be.true;
});
});
-
- describe("#constructGraphObjects()", function() {
- var mock = Em.Object.create({
- isServiceWithWidgets: false
- });
-
- beforeEach(function() {
- sinon.stub(App.StackService, 'find').returns(mock);
- sinon.stub(view, 'getUserPref').returns({
- complete: function(callback){callback();}
- })
- });
- afterEach(function() {
- App.StackService.find.restore();
- view.getUserPref.restore();
- });
-
- it("metrics not loaded", function() {
- mock.set('isServiceWithWidgets', false);
- view.constructGraphObjects(null);
- expect(view.get('isServiceMetricLoaded')).to.be.false;
- expect(view.getUserPref.called).to.be.false;
- });
-
- it("metrics loaded", function() {
- App.ChartServiceMetricsG1 = Em.Object.extend();
- mock.set('isServiceWithWidgets', true);
- view.constructGraphObjects(['G1']);
- expect(view.get('isServiceMetricLoaded')).to.be.true;
- expect(view.getUserPref.calledOnce).to.be.true;
- expect(view.get('serviceMetricGraphs')).to.not.be.empty;
- });
- });
-
- describe("#getUserPrefSuccessCallback()", function() {
-
- it("currentTimeRangeIndex should be set", function() {
- view.getUserPrefSuccessCallback(1);
- expect(view.get('currentTimeRangeIndex')).to.equal(1);
- });
- });
-
- describe("#getUserPrefErrorCallback()", function() {
-
- beforeEach(function() {
- sinon.stub(view, 'postUserPref');
- });
- afterEach(function() {
- view.postUserPref.restore();
- });
-
- it("request.status = 404", function() {
- view.getUserPrefErrorCallback({status: 404});
- expect(view.get('currentTimeRangeIndex')).to.equal(0);
- expect(view.postUserPref.calledOnce).to.be.true;
- });
-
- it("request.status = 403", function() {
- view.getUserPrefErrorCallback({status: 403});
- expect(view.postUserPref.called).to.be.false;
- });
- });
-
- describe("#widgetActions", function() {
-
- beforeEach(function() {
- this.mock = sinon.stub(App, 'isAuthorized');
- view.setProperties({
- staticWidgetLayoutActions: [{id: 1}],
- staticAdminPrivelegeWidgetActions: [{id: 2}],
- staticGeneralWidgetActions: [{id: 3}]
- });
- });
- afterEach(function() {
- this.mock.restore();
- });
-
- it("not authorized", function() {
- this.mock.returns(false);
- view.propertyDidChange('widgetActions');
- expect(view.get('widgetActions').mapProperty('id')).to.eql([3]);
- });
-
- it("is authorized", function() {
- this.mock.returns(true);
- App.supports.customizedWidgetLayout = true;
- view.propertyDidChange('widgetActions');
- expect(view.get('widgetActions').mapProperty('id')).to.eql([1, 2, 3]);
- });
- });
-
- describe("#doWidgetAction()", function() {
-
- beforeEach(function() {
- view.set('controller.action1', Em.K);
- sinon.stub(view.get('controller'), 'action1');
- });
- afterEach(function() {
- view.get('controller').action1.restore();
- });
-
- it("action exist", function() {
- view.doWidgetAction({context: 'action1'});
- expect(view.get('controller').action1.calledOnce).to.be.true;
- });
- });
-
- describe("#setTimeRange", function() {
-
- it("range = 0", function() {
- var widget = Em.Object.create({
- widgetType: 'GRAPH',
- properties: {
- time_range: '0'
- }
- });
- view.set('controller.widgets', [widget]);
- view.setTimeRange({context: {value: '0'}});
- expect(widget.get('properties').time_range).to.be.equal('0')
- });
-
- it("range = 1", function() {
- var widget = Em.Object.create({
- widgetType: 'GRAPH',
- properties: {
- time_range: 0
- }
- });
- view.set('controller.widgets', [widget]);
- view.setTimeRange({context: {value: '1'}});
- expect(widget.get('properties').time_range).to.be.equal('1')
- });
- });
-
- describe("#makeSortable()", function() {
- var mock = {
- on: function(arg1, arg2, callback) {
- callback();
- },
- off: Em.K,
- sortable: function() {
- return {
- disableSelection: Em.K
- }
- }
- };
-
- beforeEach(function() {
- sinon.stub(window, '$').returns(mock);
- sinon.spy(mock, 'on');
- sinon.spy(mock, 'off');
- sinon.spy(mock, 'sortable');
- view.makeSortable();
- });
- afterEach(function() {
- window.$.restore();
- mock.on.restore();
- mock.off.restore();
- mock.sortable.restore();
- });
-
- it("on() should be called", function() {
- expect(mock.on.calledWith('DOMNodeInserted', '#widget_layout')).to.be.true;
- });
-
- it("sortable() should be called", function() {
- expect(mock.sortable.calledOnce).to.be.true;
- });
-
- it("off() should be called", function() {
- expect(mock.off.calledWith('DOMNodeInserted', '#widget_layout')).to.be.true;
- });
- });
-
- describe('#didInsertElement', function () {
-
- beforeEach(function () {
- sinon.stub(view, 'constructGraphObjects', Em.K);
- this.mock = sinon.stub(App, 'get');
- sinon.stub(view, 'getServiceModel');
- sinon.stub(view.get('controller'), 'getActiveWidgetLayout');
- sinon.stub(view.get('controller'), 'loadWidgetLayouts');
- sinon.stub(view, 'makeSortable');
- sinon.stub(view, 'addWidgetTooltip');
-
- });
-
- afterEach(function () {
- view.constructGraphObjects.restore();
- this.mock.restore();
- view.getServiceModel.restore();
- view.get('controller').getActiveWidgetLayout.restore();
- view.get('controller').loadWidgetLayouts.restore();
- view.makeSortable.restore();
- view.addWidgetTooltip.restore();
- });
-
- it("getServiceModel should be called", function() {
- view.didInsertElement();
- expect(view.getServiceModel.calledOnce).to.be.true;
- });
- it("addWidgetTooltip should be called", function() {
- view.didInsertElement();
- expect(view.addWidgetTooltip.calledOnce).to.be.true;
- });
- it("makeSortable should be called", function() {
- view.didInsertElement();
- expect(view.makeSortable.calledOnce).to.be.true;
- });
- it("getActiveWidgetLayout should be called", function() {
- view.didInsertElement();
- expect(view.get('controller').getActiveWidgetLayout.calledOnce).to.be.true;
- });
-
- describe("serviceName is null, metrics not supported, widgets not supported", function() {
- beforeEach(function () {
- view.set('controller.content.serviceName', null);
- this.mock.returns(false);
- view.didInsertElement();
- });
-
- it("loadWidgetLayouts should not be called", function() {
- expect(view.get('controller').loadWidgetLayouts.called).to.be.false;
- });
- it("constructGraphObjects should not be called", function() {
- expect(view.constructGraphObjects.called).to.be.false;
- });
- });
-
- describe("serviceName is set, metrics is supported, widgets is supported", function() {
- beforeEach(function () {
- view.set('controller.content.serviceName', 'S1');
- this.mock.returns(true);
- view.didInsertElement();
- });
-
- it("loadWidgetLayouts should be called", function() {
- expect(view.get('controller').loadWidgetLayouts.calledOnce).to.be.true;
- });
- it("constructGraphObjects should be called", function() {
- expect(view.constructGraphObjects.calledOnce).to.be.true;
- });
- });
- });
-
- describe("#addWidgetTooltip()", function() {
- var mock = {
- hoverIntent: Em.K
- };
-
- beforeEach(function() {
- sinon.stub(Em.run, 'later', function(arg1, callback) {
- callback();
- });
- sinon.stub(App, 'tooltip');
- sinon.stub(window, '$').returns(mock);
- sinon.spy(mock, 'hoverIntent');
- view.addWidgetTooltip();
- });
- afterEach(function() {
- Em.run.later.restore();
- App.tooltip.restore();
- window.$.restore();
- mock.hoverIntent.restore();
- });
-
- it("Em.run.later should be called", function() {
- expect(Em.run.later.calledOnce).to.be.true;
- });
- it("App.tooltip should be called", function() {
- expect(App.tooltip.calledOnce).to.be.true;
- });
- it("hoverIntent should be called", function() {
- expect(mock.hoverIntent.calledOnce).to.be.true;
- });
- });
-
});
\ No newline at end of file
[24/31] ambari git commit: AMBARI-22160. hadooplzo package
installation failed on devdeploys (aonishuk)
Posted by jl...@apache.org.
AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b1295362
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b1295362
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b1295362
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: b1295362b9e702dd37bbb3995437d0c4e311ce9f
Parents: cec9f73
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Oct 10 16:09:28 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Oct 10 16:09:28 2017 +0300
----------------------------------------------------------------------
.../libraries/script/script.py | 45 ++++++++++++++------
.../HDFS/2.1.0.2.0/package/scripts/hdfs.py | 10 +++--
.../2.1.0.2.0/package/scripts/install_params.py | 6 ---
.../2.1.0.2.0/package/scripts/params_linux.py | 2 -
.../HDFS/3.0.0.3.0/package/scripts/hdfs.py | 10 +++--
.../3.0.0.3.0/package/scripts/install_params.py | 6 ---
.../3.0.0.3.0/package/scripts/params_linux.py | 2 -
.../OOZIE/4.0.0.2.0/package/scripts/oozie.py | 6 ++-
.../4.0.0.2.0/package/scripts/params_linux.py | 3 --
.../OOZIE/4.2.0.3.0/package/scripts/oozie.py | 5 ++-
.../4.2.0.3.0/package/scripts/params_linux.py | 3 --
.../stacks/2.0.6/HBASE/test_hbase_master.py | 2 +
.../src/test/python/stacks/utils/RMFTestCase.py | 4 +-
13 files changed, 56 insertions(+), 48 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index d5b4469..bf8c0dc 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -501,6 +501,7 @@ class Script(object):
Script.stack_version_from_distro_select = pkg_provider.get_installed_package_version(
stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
+
return Script.stack_version_from_distro_select
@@ -525,22 +526,20 @@ class Script(object):
"""
This function replaces ${stack_version} placeholder with actual version. If the package
version is passed from the server, use that as an absolute truth.
-
+
:param name name of the package
:param repo_version actual version of the repo currently installing
"""
- stack_version_package_formatted = ""
+ if not STACK_VERSION_PLACEHOLDER in name:
+ return name
- if not repo_version:
- repo_version = self.get_stack_version_before_packages_installed()
+ stack_version_package_formatted = ""
package_delimiter = '-' if OSCheck.is_ubuntu_family() else '_'
# repositoryFile is the truth
# package_version should be made to the form W_X_Y_Z_nnnn
package_version = default("repositoryFile/repoVersion", None)
- if package_version is not None:
- package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
# TODO remove legacy checks
if package_version is None:
@@ -550,6 +549,17 @@ class Script(object):
if package_version is None:
package_version = default("hostLevelParams/package_version", None)
+ package_version = None
+ if (package_version is None or '-' not in package_version) and default('/repositoryFile', None):
+ self.load_available_packages()
+ package_name = self.get_package_from_available(name, self.available_packages_in_repos)
+ if package_name is None:
+ raise Fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos))
+ return package_name
+
+ if package_version is not None:
+ package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
+
# The cluster effective version comes down when the version is known after the initial
# install. In that case we should not be guessing which version when invoking INSTALL, but
# use the supplied version to build the package_version
@@ -568,6 +578,7 @@ class Script(object):
# Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we try to specify it.
if not package_version or '*' in package_version:
+ repo_version = self.get_stack_version_before_packages_installed()
stack_version_package_formatted = repo_version.replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name
package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
@@ -760,6 +771,19 @@ class Script(object):
"""
self.install_packages(env)
+ def load_available_packages(self):
+ if self.available_packages_in_repos:
+ return self.available_packages_in_repos
+
+
+ pkg_provider = get_provider("Package")
+ try:
+ self.available_packages_in_repos = pkg_provider.get_available_packages_in_repos(self.get_config()['repositoryFile']['repositories'])
+ except Exception as err:
+ Logger.exception("Unable to load available packages")
+ self.available_packages_in_repos = []
+
+
def install_packages(self, env):
"""
List of packages that are required< by service is received from the server
@@ -782,17 +806,11 @@ class Script(object):
package_list_str = config['hostLevelParams']['package_list']
agent_stack_retry_on_unavailability = bool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
agent_stack_retry_count = int(config['hostLevelParams']['agent_stack_retry_count'])
- pkg_provider = get_provider("Package")
- try:
- available_packages_in_repos = pkg_provider.get_available_packages_in_repos(config['repositoryFile']['repositories'])
- except Exception as err:
- Logger.exception("Unable to load available packages")
- available_packages_in_repos = []
if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
package_list = json.loads(package_list_str)
for package in package_list:
if self.check_package_condition(package):
- name = self.get_package_from_available(package['name'], available_packages_in_repos)
+ name = self.format_package_name(package['name'])
# HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
# TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
# <osFamily>any<osFamily> which would cause installation failure on Windows.
@@ -1092,5 +1110,6 @@ class Script(object):
def __init__(self):
+ self.available_packages_in_repos = []
if Script.instance is not None:
raise Fail("An instantiation already exists! Use, get_instance() method.")
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
index e054209..07c7616 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -25,6 +25,7 @@ from resource_management.core.resources import Package
from resource_management.core.source import Template
from resource_management.core.resources.service import ServiceConfig
from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
import os
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@@ -138,10 +139,11 @@ def hdfs(name=None):
content=Template("slaves.j2")
)
- if params.lzo_enabled and len(params.lzo_packages) > 0:
- Package(params.lzo_packages,
- retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
- retry_count=params.agent_stack_retry_count)
+ if params.lzo_enabled:
+ lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+ Package(lzo_packages,
+ retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+ retry_count=params.agent_stack_retry_count)
def install_snappy():
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
index fe488c3..235f231 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
@@ -23,7 +23,6 @@ if OSCheck.is_windows_family():
exclude_packages = []
else:
from resource_management.libraries.functions.default import default
- from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.script.script import Script
_config = Script.get_config()
@@ -32,8 +31,3 @@ else:
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
- lzo_packages = get_lzo_packages(stack_version_unformatted)
-
- exclude_packages = []
- if not lzo_enabled:
- exclude_packages += lzo_packages
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 76b430b..bb6349b 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -40,7 +40,6 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.format_jvm_option import format_jvm_option
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions.get_architecture import get_architecture
@@ -389,7 +388,6 @@ HdfsResource = functools.partial(
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
name_node_params = default("/commandParams/namenode", None)
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
index e054209..07c7616 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
@@ -25,6 +25,7 @@ from resource_management.core.resources import Package
from resource_management.core.source import Template
from resource_management.core.resources.service import ServiceConfig
from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
import os
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@@ -138,10 +139,11 @@ def hdfs(name=None):
content=Template("slaves.j2")
)
- if params.lzo_enabled and len(params.lzo_packages) > 0:
- Package(params.lzo_packages,
- retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
- retry_count=params.agent_stack_retry_count)
+ if params.lzo_enabled:
+ lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+ Package(lzo_packages,
+ retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+ retry_count=params.agent_stack_retry_count)
def install_snappy():
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
index fe488c3..235f231 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
@@ -23,7 +23,6 @@ if OSCheck.is_windows_family():
exclude_packages = []
else:
from resource_management.libraries.functions.default import default
- from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.script.script import Script
_config = Script.get_config()
@@ -32,8 +31,3 @@ else:
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
- lzo_packages = get_lzo_packages(stack_version_unformatted)
-
- exclude_packages = []
- if not lzo_enabled:
- exclude_packages += lzo_packages
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
index de735f4..2fa6208 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
@@ -40,7 +40,6 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.format_jvm_option import format_jvm_option
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
@@ -378,7 +377,6 @@ HdfsResource = functools.partial(
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
name_node_params = default("/commandParams/namenode", None)
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 64f9d54..f215a1e 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -37,6 +37,7 @@ from resource_management.libraries.functions.copy_tarball import get_current_ver
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.security_commons import update_credential_provider_path
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.core.resources.packaging import Package
from resource_management.core.shell import as_user, as_sudo, call, checked_call
from resource_management.core.exceptions import Fail
@@ -305,8 +306,9 @@ def oozie_server_specific(upgrade_type):
Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
not_if = no_op_test)
- if params.lzo_enabled and len(params.all_lzo_packages) > 0:
- Package(params.all_lzo_packages,
+ if params.lzo_enabled:
+ all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+ Package(all_lzo_packages,
retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
retry_count=params.agent_stack_retry_count)
Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index b66e157..a0f0672 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -30,7 +30,6 @@ from resource_management.libraries.functions import get_port_from_url
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.get_architecture import get_architecture
@@ -388,5 +387,3 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
# The logic for LZO also exists in HDFS' params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-
-all_lzo_packages = get_lzo_packages(stack_version_unformatted)
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
index d916d3b..0771e93 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
@@ -275,8 +275,9 @@ def oozie_server_specific():
Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
not_if = no_op_test)
- if params.lzo_enabled and len(params.all_lzo_packages) > 0:
- Package(params.all_lzo_packages,
+ if params.lzo_enabled:
+ all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+ Package(all_lzo_packages,
retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
retry_count=params.agent_stack_retry_count)
Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
index d30a465..70b89b7 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
@@ -28,7 +28,6 @@ from resource_management.libraries.functions import get_port_from_url
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.get_architecture import get_architecture
@@ -370,5 +369,3 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
# The logic for LZO also exists in HDFS' params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-
-all_lzo_packages = get_lzo_packages(stack_version_unformatted)
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 2224d31..e32393d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -95,8 +95,10 @@ class TestHBaseMaster(RMFTestCase):
try_install=True,
os_type=('Redhat', '6.4', 'Final'),
checked_call_mocks = [(0, "OK.", "")],
+ available_packages_in_repos = ['hbase_2_3_0_1_1234'],
)
+
# only assert that the correct package is trying to be installed
self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',
retry_count=5,
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index bff8642..ae33a2a 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -80,7 +80,8 @@ class RMFTestCase(TestCase):
mocks_dict={},
try_install=False,
command_args=[],
- log_out_files=False):
+ log_out_files=False,
+ available_packages_in_repos = []):
norm_path = os.path.normpath(path)
@@ -125,6 +126,7 @@ class RMFTestCase(TestCase):
Script.instance = None
script_class_inst = RMFTestCase._get_attr(script_module, classname)()
script_class_inst.log_out_files = log_out_files
+ script_class_inst.available_packages_in_repos = available_packages_in_repos
method = RMFTestCase._get_attr(script_class_inst, command)
except IOError, err:
raise RuntimeError("Cannot load class %s from %s: %s" % (classname, norm_path, err.message))
[07/31] ambari git commit: AMBARI-22160. hadooplzo package
installation failed on devdeploys (aonishuk)
Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/fc80a183/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql b/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
new file mode 100644
index 0000000..b54132c
--- /dev/null
+++ b/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
@@ -0,0 +1,2147 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License") you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Schema population script for $(AMBARIDBNAME)
+
+Use this script in sqlcmd mode, setting the environment variables like this:
+set AMBARIDBNAME=ambari
+
+sqlcmd -S localhost\SQLEXPRESS -i C:\app\ambari-server-1.3.0-SNAPSHOT\resources\Ambari-DDL-SQLServer-CREATE.sql
+*/
+
+
+------create the database------
+
+------create tables and grant privileges to db user---------
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('stack') AND type = 'U')
+BEGIN
+CREATE TABLE stack(
+ stack_id BIGINT NOT NULL,
+ stack_name VARCHAR(255) NOT NULL,
+ stack_version VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_stack PRIMARY KEY CLUSTERED (stack_id),
+ CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('extension') AND type = 'U')
+BEGIN
+CREATE TABLE extension(
+ extension_id BIGINT NOT NULL,
+ extension_name VARCHAR(255) NOT NULL,
+ extension_version VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_extension PRIMARY KEY CLUSTERED (extension_id),
+ CONSTRAINT UQ_extension UNIQUE (extension_name, extension_version))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('extensionlink') AND type = 'U')
+BEGIN
+CREATE TABLE extensionlink(
+ link_id BIGINT NOT NULL,
+ stack_id BIGINT NOT NULL,
+ extension_id BIGINT NOT NULL,
+ CONSTRAINT PK_extensionlink PRIMARY KEY CLUSTERED (link_id),
+ CONSTRAINT FK_extensionlink_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+ CONSTRAINT FK_extensionlink_extension_id FOREIGN KEY (extension_id) REFERENCES extension(extension_id),
+ CONSTRAINT UQ_extension_link UNIQUE (stack_id, extension_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminresourcetype') AND type = 'U')
+BEGIN
+CREATE TABLE adminresourcetype (
+ resource_type_id INTEGER NOT NULL,
+ resource_type_name VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_adminresourcetype PRIMARY KEY CLUSTERED (resource_type_id)
+ )
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminresource') AND type = 'U')
+BEGIN
+CREATE TABLE adminresource (
+ resource_id BIGINT NOT NULL,
+ resource_type_id INTEGER NOT NULL,
+ CONSTRAINT PK_adminresource PRIMARY KEY CLUSTERED (resource_id),
+ CONSTRAINT FK_resource_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusters') AND type = 'U')
+BEGIN
+CREATE TABLE clusters (
+ cluster_id BIGINT NOT NULL,
+ resource_id BIGINT NOT NULL,
+ upgrade_id BIGINT,
+ cluster_info VARCHAR(255) NOT NULL,
+ cluster_name VARCHAR(100) NOT NULL UNIQUE,
+ provisioning_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
+ security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
+ desired_cluster_state VARCHAR(255) NOT NULL,
+ desired_stack_id BIGINT NOT NULL,
+ CONSTRAINT PK_clusters PRIMARY KEY CLUSTERED (cluster_id),
+ CONSTRAINT FK_clusters_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+ CONSTRAINT FK_clusters_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusterconfig') AND type = 'U')
+BEGIN
+CREATE TABLE clusterconfig (
+ config_id BIGINT NOT NULL,
+ version_tag VARCHAR(255) NOT NULL,
+ version BIGINT NOT NULL,
+ type_name VARCHAR(255) NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ stack_id BIGINT NOT NULL,
+ selected SMALLINT NOT NULL DEFAULT 0,
+ config_data VARCHAR(MAX) NOT NULL,
+ config_attributes VARCHAR(MAX),
+ create_timestamp BIGINT NOT NULL,
+ unmapped SMALLINT NOT NULL DEFAULT 0,
+ selected_timestamp BIGINT NOT NULL DEFAULT 0,
+ CONSTRAINT PK_clusterconfig PRIMARY KEY CLUSTERED (config_id),
+ CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
+ CONSTRAINT FK_clusterconfig_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+ CONSTRAINT UQ_config_type_tag UNIQUE (cluster_id, type_name, version_tag),
+ CONSTRAINT UQ_config_type_version UNIQUE (cluster_id, type_name, version))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('serviceconfig') AND type = 'U')
+BEGIN
+CREATE TABLE serviceconfig (
+ service_config_id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ version BIGINT NOT NULL,
+ create_timestamp BIGINT NOT NULL,
+ stack_id BIGINT NOT NULL,
+ user_name VARCHAR(255) NOT NULL DEFAULT '_db',
+ group_id BIGINT,
+ note VARCHAR(MAX),
+ CONSTRAINT PK_serviceconfig PRIMARY KEY CLUSTERED (service_config_id),
+ CONSTRAINT FK_serviceconfig_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+ CONSTRAINT UQ_scv_service_version UNIQUE (cluster_id, service_name, version))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hosts') AND type = 'U')
+BEGIN
+CREATE TABLE hosts (
+ host_id BIGINT NOT NULL,
+ host_name VARCHAR(255) NOT NULL,
+ cpu_count INTEGER NOT NULL,
+ ph_cpu_count INTEGER,
+ cpu_info VARCHAR(255) NOT NULL,
+ discovery_status VARCHAR(2000) NOT NULL,
+ host_attributes VARCHAR(MAX) NOT NULL,
+ ipv4 VARCHAR(255),
+ ipv6 VARCHAR(255),
+ public_host_name VARCHAR(255),
+ last_registration_time BIGINT NOT NULL,
+ os_arch VARCHAR(255) NOT NULL,
+ os_info VARCHAR(1000) NOT NULL,
+ os_type VARCHAR(255) NOT NULL,
+ rack_info VARCHAR(255) NOT NULL,
+ total_mem BIGINT NOT NULL,
+ CONSTRAINT PK_hosts PRIMARY KEY CLUSTERED (host_id),
+ CONSTRAINT UQ_hosts_host_name UNIQUE (host_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('serviceconfighosts') AND type = 'U')
+BEGIN
+CREATE TABLE serviceconfighosts (
+ service_config_id BIGINT NOT NULL,
+ host_id BIGINT NOT NULL,
+ CONSTRAINT PK_serviceconfighosts PRIMARY KEY CLUSTERED (service_config_id, host_id),
+ CONSTRAINT FK_scvhosts_host_id FOREIGN KEY (host_id) REFERENCES hosts(host_id),
+ CONSTRAINT FK_scvhosts_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('serviceconfigmapping') AND type = 'U')
+BEGIN
+CREATE TABLE serviceconfigmapping (
+ service_config_id BIGINT NOT NULL,
+ config_id BIGINT NOT NULL,
+ CONSTRAINT PK_serviceconfigmapping PRIMARY KEY CLUSTERED (service_config_id, config_id),
+ CONSTRAINT FK_scvm_config FOREIGN KEY (config_id) REFERENCES clusterconfig(config_id),
+ CONSTRAINT FK_scvm_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusterservices') AND type = 'U')
+BEGIN
+CREATE TABLE clusterservices (
+ service_name VARCHAR(255) NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ service_enabled INT NOT NULL,
+ CONSTRAINT PK_clusterservices PRIMARY KEY CLUSTERED (service_name, cluster_id),
+ CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusterstate') AND type = 'U')
+BEGIN
+CREATE TABLE clusterstate (
+ cluster_id BIGINT NOT NULL,
+ current_cluster_state VARCHAR(255) NOT NULL,
+ current_stack_id BIGINT NOT NULL,
+ CONSTRAINT PK_clusterstate PRIMARY KEY CLUSTERED (cluster_id),
+ CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
+ CONSTRAINT FK_cs_current_stack_id FOREIGN KEY (current_stack_id) REFERENCES stack(stack_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('repo_version') AND type = 'U')
+BEGIN
+CREATE TABLE repo_version (
+ repo_version_id BIGINT NOT NULL,
+ stack_id BIGINT NOT NULL,
+ version VARCHAR(255) NOT NULL,
+ display_name VARCHAR(128) NOT NULL,
+ repositories VARCHAR(MAX) NOT NULL,
+ repo_type VARCHAR(255) DEFAULT 'STANDARD' NOT NULL,
+ hidden SMALLINT NOT NULL DEFAULT 0,
+ resolved BIT NOT NULL DEFAULT 0,
+ version_url VARCHAR(1024),
+ version_xml VARCHAR(MAX),
+ version_xsd VARCHAR(512),
+ parent_id BIGINT,
+ CONSTRAINT PK_repo_version PRIMARY KEY CLUSTERED (repo_version_id),
+ CONSTRAINT FK_repoversion_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+ CONSTRAINT UQ_repo_version_display_name UNIQUE (display_name),
+ CONSTRAINT UQ_repo_version_stack_id UNIQUE (stack_id, version))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('servicecomponentdesiredstate') AND type = 'U')
+BEGIN
+CREATE TABLE servicecomponentdesiredstate (
+ id BIGINT NOT NULL,
+ component_name VARCHAR(255) NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ desired_repo_version_id BIGINT NOT NULL,
+ desired_state VARCHAR(255) NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ recovery_enabled SMALLINT NOT NULL DEFAULT 0,
+ repo_state VARCHAR(255) NOT NULL DEFAULT 'NOT_REQUIRED',
+ CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
+ CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
+ CONSTRAINT FK_scds_desired_repo_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
+ CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostcomponentdesiredstate') AND type = 'U')
+BEGIN
+CREATE TABLE hostcomponentdesiredstate (
+ id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ component_name VARCHAR(255) NOT NULL,
+ desired_state VARCHAR(255) NOT NULL,
+ host_id BIGINT NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ admin_state VARCHAR(32),
+ maintenance_state VARCHAR(32) NOT NULL,
+ security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
+ restart_required BIT NOT NULL DEFAULT 0,
+ CONSTRAINT PK_hostcomponentdesiredstate PRIMARY KEY CLUSTERED (id),
+ CONSTRAINT UQ_hcdesiredstate_name UNIQUE (component_name, service_name, host_id, cluster_id),
+ CONSTRAINT FK_hcdesiredstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
+ CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostcomponentstate') AND type = 'U')
+BEGIN
+CREATE TABLE hostcomponentstate (
+ id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ component_name VARCHAR(255) NOT NULL,
+ version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN',
+ current_state VARCHAR(255) NOT NULL,
+ host_id BIGINT NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE',
+ security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
+ CONSTRAINT PK_hostcomponentstate PRIMARY KEY CLUSTERED (id),
+ CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
+ CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_host_component_state')
+BEGIN
+CREATE NONCLUSTERED INDEX idx_host_component_state on hostcomponentstate(host_id, component_name, service_name, cluster_id)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hoststate') AND type = 'U')
+BEGIN
+CREATE TABLE hoststate (
+ agent_version VARCHAR(255) NOT NULL,
+ available_mem BIGINT NOT NULL,
+ current_state VARCHAR(255) NOT NULL,
+ health_status VARCHAR(255),
+ host_id BIGINT NOT NULL,
+ time_in_state BIGINT NOT NULL,
+ maintenance_state VARCHAR(512),
+ CONSTRAINT PK_hoststate PRIMARY KEY CLUSTERED (host_id),
+ CONSTRAINT FK_hoststate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('servicedesiredstate') AND type = 'U')
+BEGIN
+CREATE TABLE servicedesiredstate (
+ cluster_id BIGINT NOT NULL,
+ desired_host_role_mapping INTEGER NOT NULL,
+ desired_repo_version_id BIGINT NOT NULL,
+ desired_state VARCHAR(255) NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ maintenance_state VARCHAR(32) NOT NULL,
+ security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
+ credential_store_enabled SMALLINT NOT NULL DEFAULT 0,
+ CONSTRAINT PK_servicedesiredstate PRIMARY KEY CLUSTERED (cluster_id,service_name),
+ CONSTRAINT FK_repo_version_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
+ CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminprincipaltype') AND type = 'U')
+BEGIN
+CREATE TABLE adminprincipaltype (
+ principal_type_id INTEGER NOT NULL,
+ principal_type_name VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_adminprincipaltype PRIMARY KEY CLUSTERED (principal_type_id)
+ )
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminprincipal') AND type = 'U')
+BEGIN
+CREATE TABLE adminprincipal (
+ principal_id BIGINT NOT NULL,
+ principal_type_id INTEGER NOT NULL,
+ CONSTRAINT PK_adminprincipal PRIMARY KEY CLUSTERED (principal_id),
+ CONSTRAINT FK_principal_principal_type_id FOREIGN KEY (principal_type_id) REFERENCES adminprincipaltype(principal_type_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('users') AND type = 'U')
+BEGIN
+CREATE TABLE users (
+ user_id INTEGER,
+ principal_id BIGINT NOT NULL,
+ ldap_user INTEGER NOT NULL DEFAULT 0,
+ user_name VARCHAR(255) NOT NULL,
+ user_type VARCHAR(255) NOT NULL DEFAULT 'LOCAL',
+ create_time DATETIME DEFAULT GETDATE(),
+ user_password VARCHAR(255),
+ active INTEGER NOT NULL DEFAULT 1,
+ active_widget_layouts VARCHAR(1024) DEFAULT NULL,
+ CONSTRAINT PK_users PRIMARY KEY CLUSTERED (user_id),
+ CONSTRAINT FK_users_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
+ CONSTRAINT UNQ_users_0 UNIQUE (user_name, user_type))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('groups') AND type = 'U')
+BEGIN
+CREATE TABLE groups (
+ group_id INTEGER,
+ principal_id BIGINT NOT NULL,
+ group_name VARCHAR(255) NOT NULL,
+ ldap_group INTEGER NOT NULL DEFAULT 0,
+ group_type VARCHAR(255) NOT NULL DEFAULT 'LOCAL',
+ CONSTRAINT PK_groups PRIMARY KEY CLUSTERED (group_id),
+ CONSTRAINT FK_groups_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
+ CONSTRAINT UNQ_groups_0 UNIQUE (group_name, ldap_group))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('members') AND type = 'U')
+BEGIN
+CREATE TABLE members (
+ member_id INTEGER,
+ group_id INTEGER NOT NULL,
+ user_id INTEGER NOT NULL,
+ CONSTRAINT PK_members PRIMARY KEY CLUSTERED (member_id),
+ CONSTRAINT FK_members_group_id FOREIGN KEY (group_id) REFERENCES groups (group_id),
+ CONSTRAINT FK_members_user_id FOREIGN KEY (user_id) REFERENCES users (user_id),
+ CONSTRAINT UNQ_members_0 UNIQUE (group_id, user_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestschedule') AND type = 'U')
+BEGIN
+CREATE TABLE requestschedule (
+ schedule_id BIGINT,
+ cluster_id BIGINT NOT NULL,
+ description VARCHAR(255),
+ STATUS VARCHAR(255),
+ batch_separation_seconds SMALLINT,
+ batch_toleration_limit SMALLINT,
+ authenticated_user_id INTEGER,
+ create_user VARCHAR(255),
+ create_timestamp BIGINT,
+ update_user VARCHAR(255),
+ update_timestamp BIGINT,
+ minutes VARCHAR(10),
+ hours VARCHAR(10),
+ days_of_month VARCHAR(10),
+ month VARCHAR(10),
+ day_of_week VARCHAR(10),
+ yearToSchedule VARCHAR(10),
+ startTime VARCHAR(50),
+ endTime VARCHAR(50),
+ last_execution_status VARCHAR(255),
+ CONSTRAINT PK_requestschedule PRIMARY KEY CLUSTERED (schedule_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('request') AND type = 'U')
+BEGIN
+CREATE TABLE request (
+ request_id BIGINT NOT NULL,
+ cluster_id BIGINT,
+ command_name VARCHAR(255),
+ create_time BIGINT NOT NULL,
+ end_time BIGINT NOT NULL,
+ exclusive_execution BIT NOT NULL DEFAULT 0,
+ inputs VARBINARY(MAX),
+ request_context VARCHAR(255),
+ request_type VARCHAR(255),
+ request_schedule_id BIGINT,
+ start_time BIGINT NOT NULL,
+ status VARCHAR(255),
+ cluster_host_info VARBINARY(MAX) NOT NULL,
+ CONSTRAINT PK_request PRIMARY KEY CLUSTERED (request_id),
+ CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('stage') AND type = 'U')
+BEGIN
+CREATE TABLE stage (
+ stage_id BIGINT NOT NULL,
+ request_id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ skippable SMALLINT DEFAULT 0 NOT NULL,
+ supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
+ log_info VARCHAR(255) NOT NULL,
+ request_context VARCHAR(255),
+ command_params VARBINARY(MAX),
+ host_params VARBINARY(MAX),
+ command_execution_type VARCHAR(32) NOT NULL DEFAULT 'STAGE',
+ CONSTRAINT PK_stage PRIMARY KEY CLUSTERED (stage_id, request_id),
+ CONSTRAINT FK_stage_request_id FOREIGN KEY (request_id) REFERENCES request (request_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('host_role_command') AND type = 'U')
+BEGIN
+CREATE TABLE host_role_command (
+ task_id BIGINT NOT NULL,
+ attempt_count SMALLINT NOT NULL,
+ retry_allowed SMALLINT DEFAULT 0 NOT NULL,
+ event VARCHAR(MAX) NOT NULL,
+ exitcode INTEGER NOT NULL,
+ host_id BIGINT,
+ last_attempt_time BIGINT NOT NULL,
+ request_id BIGINT NOT NULL,
+ role VARCHAR(255),
+ stage_id BIGINT NOT NULL,
+ start_time BIGINT NOT NULL,
+ original_start_time BIGINT NOT NULL,
+ end_time BIGINT,
+ status VARCHAR(255),
+ auto_skip_on_failure SMALLINT DEFAULT 0 NOT NULL,
+ std_error VARBINARY(max),
+ std_out VARBINARY(max),
+ output_log VARCHAR(255) NULL,
+ error_log VARCHAR(255) NULL,
+ structured_out VARBINARY(max),
+ role_command VARCHAR(255),
+ command_detail VARCHAR(255),
+ custom_command_name VARCHAR(255),
+ is_background SMALLINT DEFAULT 0 NOT NULL,
+ CONSTRAINT PK_host_role_command PRIMARY KEY CLUSTERED (task_id),
+ CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
+ CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('execution_command') AND type = 'U')
+BEGIN
+CREATE TABLE execution_command (
+ command VARBINARY(MAX),
+ task_id BIGINT NOT NULL,
+ CONSTRAINT PK_execution_command PRIMARY KEY CLUSTERED (task_id),
+ CONSTRAINT FK_execution_command_task_id FOREIGN KEY (task_id) REFERENCES host_role_command (task_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('role_success_criteria') AND type = 'U')
+BEGIN
+CREATE TABLE role_success_criteria (
+ ROLE VARCHAR(255) NOT NULL,
+ request_id BIGINT NOT NULL,
+ stage_id BIGINT NOT NULL,
+ success_factor FLOAT NOT NULL,
+ CONSTRAINT PK_role_success_criteria PRIMARY KEY CLUSTERED (ROLE, request_id, stage_id),
+ CONSTRAINT role_success_criteria_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestresourcefilter') AND type = 'U')
+BEGIN
+CREATE TABLE requestresourcefilter (
+ filter_id BIGINT NOT NULL,
+ request_id BIGINT NOT NULL,
+ service_name VARCHAR(255),
+ component_name VARCHAR(255),
+ hosts VARBINARY(MAX),
+ CONSTRAINT PK_requestresourcefilter PRIMARY KEY CLUSTERED (filter_id),
+ CONSTRAINT FK_reqresfilter_req_id FOREIGN KEY (request_id) REFERENCES request (request_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestoperationlevel') AND type = 'U')
+BEGIN
+CREATE TABLE requestoperationlevel (
+ operation_level_id BIGINT NOT NULL,
+ request_id BIGINT NOT NULL,
+ level_name VARCHAR(255),
+ cluster_name VARCHAR(255),
+ service_name VARCHAR(255),
+ host_component_name VARCHAR(255),
+ host_id BIGINT NULL, -- unlike most host_id columns, this one allows NULLs because the request can be at the service level
+ CONSTRAINT PK_requestoperationlevel PRIMARY KEY CLUSTERED (operation_level_id),
+ CONSTRAINT FK_req_op_level_req_id FOREIGN KEY (request_id) REFERENCES request (request_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('ClusterHostMapping') AND type = 'U')
+BEGIN
+CREATE TABLE ClusterHostMapping (
+ cluster_id BIGINT NOT NULL,
+ host_id BIGINT NOT NULL,
+ CONSTRAINT PK_ClusterHostMapping PRIMARY KEY CLUSTERED (cluster_id, host_id),
+ CONSTRAINT FK_clhostmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
+ CONSTRAINT FK_clusterhostmapping_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('key_value_store') AND type = 'U')
+BEGIN
+CREATE TABLE key_value_store (
+ [key] VARCHAR(255),
+ [value] VARCHAR(MAX),
+ CONSTRAINT PK_key_value_store PRIMARY KEY CLUSTERED ([key])
+ )
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostconfigmapping') AND type = 'U')
+BEGIN
+CREATE TABLE hostconfigmapping (
+ cluster_id BIGINT NOT NULL,
+ host_id BIGINT NOT NULL,
+ type_name VARCHAR(255) NOT NULL,
+ version_tag VARCHAR(255) NOT NULL,
+ service_name VARCHAR(255),
+ create_timestamp BIGINT NOT NULL,
+ selected INTEGER NOT NULL DEFAULT 0,
+ user_name VARCHAR(255) NOT NULL DEFAULT '_db',
+ CONSTRAINT PK_hostconfigmapping PRIMARY KEY CLUSTERED (cluster_id, host_id, type_name, create_timestamp),
+ CONSTRAINT FK_hostconfmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
+ CONSTRAINT FK_hostconfmapping_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('metainfo') AND type = 'U')
+BEGIN
+CREATE TABLE metainfo (
+ [metainfo_key] VARCHAR(255),
+ [metainfo_value] VARCHAR(255),
+ CONSTRAINT PK_metainfo PRIMARY KEY CLUSTERED ([metainfo_key])
+ )
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('ambari_sequences') AND type = 'U')
+BEGIN
+CREATE TABLE ambari_sequences (
+ sequence_name VARCHAR(255),
+ [sequence_value] BIGINT NOT NULL,
+ CONSTRAINT PK_ambari_sequences PRIMARY KEY (sequence_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('configgroup') AND type = 'U')
+BEGIN
+CREATE TABLE configgroup (
+ group_id BIGINT,
+ cluster_id BIGINT NOT NULL,
+ group_name VARCHAR(255) NOT NULL,
+ tag VARCHAR(1024) NOT NULL,
+ description VARCHAR(1024),
+ create_timestamp BIGINT NOT NULL,
+ service_name VARCHAR(255),
+ CONSTRAINT PK_configgroup PRIMARY KEY CLUSTERED (group_id),
+ CONSTRAINT FK_configgroup_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('confgroupclusterconfigmapping') AND type = 'U')
+BEGIN
+CREATE TABLE confgroupclusterconfigmapping (
+ config_group_id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ config_type VARCHAR(255) NOT NULL,
+ version_tag VARCHAR(255) NOT NULL,
+ user_name VARCHAR(255) DEFAULT '_db',
+ create_timestamp BIGINT NOT NULL,
+ CONSTRAINT PK_confgroupclustercfgmapping PRIMARY KEY CLUSTERED (config_group_id, cluster_id, config_type),
+ CONSTRAINT FK_cgccm_gid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id),
+ CONSTRAINT FK_confg FOREIGN KEY (cluster_id, config_type, version_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('configgrouphostmapping') AND type = 'U')
+BEGIN
+CREATE TABLE configgrouphostmapping (
+ config_group_id BIGINT NOT NULL,
+ host_id BIGINT NOT NULL,
+ CONSTRAINT PK_configgrouphostmapping PRIMARY KEY CLUSTERED (config_group_id, host_id),
+ CONSTRAINT FK_cghm_cgid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id),
+ CONSTRAINT FK_cghm_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestschedulebatchrequest') AND type = 'U')
+BEGIN
+CREATE TABLE requestschedulebatchrequest (
+ schedule_id BIGINT,
+ batch_id BIGINT,
+ request_id BIGINT,
+ request_type VARCHAR(255),
+ request_uri VARCHAR(1024),
+ request_body VARBINARY(MAX),
+ request_status VARCHAR(255),
+ return_code SMALLINT,
+ return_message TEXT,
+ CONSTRAINT PK_requestschedulebatchrequest PRIMARY KEY CLUSTERED (schedule_id, batch_id),
+ CONSTRAINT FK_rsbatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES requestschedule (schedule_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('blueprint') AND type = 'U')
+BEGIN
+CREATE TABLE blueprint (
+ blueprint_name VARCHAR(255) NOT NULL,
+ stack_id BIGINT NOT NULL,
+ security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
+ security_descriptor_reference VARCHAR(255),
+ CONSTRAINT PK_blueprint PRIMARY KEY CLUSTERED (blueprint_name),
+ CONSTRAINT FK_blueprint_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostgroup') AND type = 'U')
+BEGIN
+CREATE TABLE hostgroup (
+ blueprint_name VARCHAR(255) NOT NULL,
+ NAME VARCHAR(255) NOT NULL,
+ cardinality VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_hostgroup PRIMARY KEY CLUSTERED (blueprint_name, NAME),
+ CONSTRAINT FK_hg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostgroup_component') AND type = 'U')
+BEGIN
+CREATE TABLE hostgroup_component (
+ blueprint_name VARCHAR(255) NOT NULL,
+ hostgroup_name VARCHAR(255) NOT NULL,
+ NAME VARCHAR(255) NOT NULL,
+ provision_action VARCHAR(255),
+ CONSTRAINT PK_hostgroup_component PRIMARY KEY CLUSTERED (blueprint_name, hostgroup_name, NAME),
+ CONSTRAINT FK_hgc_blueprint_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup (blueprint_name, name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('blueprint_configuration') AND type = 'U')
+BEGIN
+CREATE TABLE blueprint_configuration (
+ blueprint_name VARCHAR(255) NOT NULL,
+ type_name VARCHAR(255) NOT NULL,
+ config_data VARCHAR(MAX) NOT NULL,
+ config_attributes VARCHAR(MAX),
+ CONSTRAINT PK_blueprint_configuration PRIMARY KEY CLUSTERED (blueprint_name, type_name),
+ CONSTRAINT FK_cfg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('blueprint_setting') AND type = 'U')
+BEGIN
+CREATE TABLE blueprint_setting (
+ id BIGINT NOT NULL,
+ blueprint_name VARCHAR(255) NOT NULL,
+ setting_name VARCHAR(255) NOT NULL,
+ setting_data TEXT NOT NULL,
+ CONSTRAINT PK_blueprint_setting PRIMARY KEY (id),
+ CONSTRAINT UQ_blueprint_setting_name UNIQUE(blueprint_name,setting_name),
+ CONSTRAINT FK_blueprint_setting_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name)
+ )
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostgroup_configuration') AND type = 'U')
+BEGIN
+CREATE TABLE hostgroup_configuration (
+ blueprint_name VARCHAR(255) NOT NULL,
+ hostgroup_name VARCHAR(255) NOT NULL,
+ type_name VARCHAR(255) NOT NULL,
+ config_data VARCHAR(MAX) NOT NULL,
+ config_attributes VARCHAR(MAX),
+ CONSTRAINT PK_hostgroup_configuration PRIMARY KEY CLUSTERED (blueprint_name, hostgroup_name, type_name),
+ CONSTRAINT FK_hg_cfg_bp_hg_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup (blueprint_name, name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewmain') AND type = 'U')
+BEGIN
+CREATE TABLE viewmain (
+ view_name VARCHAR(255) NOT NULL,
+ label VARCHAR(255),
+ description VARCHAR(2048),
+ version VARCHAR(255),
+ build VARCHAR(128),
+ resource_type_id INTEGER NOT NULL,
+ icon VARCHAR(255),
+ icon64 VARCHAR(255),
+ archive VARCHAR(255),
+ mask VARCHAR(255),
+ system_view BIT NOT NULL DEFAULT 0,
+ CONSTRAINT PK_viewmain PRIMARY KEY CLUSTERED (view_name),
+ CONSTRAINT FK_view_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id))
+END
+
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewurl') AND type = 'U')
+BEGIN
+CREATE table viewurl(
+ url_id BIGINT ,
+ url_name VARCHAR(255) NOT NULL ,
+ url_suffix VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_viewurl PRIMARY KEY CLUSTERED (url_id)
+)
+END
+
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewinstance') AND type = 'U')
+BEGIN
+CREATE TABLE viewinstance (
+ view_instance_id BIGINT,
+ resource_id BIGINT NOT NULL,
+ view_name VARCHAR(255) NOT NULL,
+ NAME VARCHAR(255) NOT NULL,
+ label VARCHAR(255),
+ description VARCHAR(2048),
+ visible CHAR(1),
+ icon VARCHAR(255),
+ icon64 VARCHAR(255),
+ xml_driven CHAR(1),
+ alter_names BIT NOT NULL DEFAULT 1,
+ cluster_handle BIGINT,
+ cluster_type VARCHAR(100) NOT NULL DEFAULT 'LOCAL_AMBARI',
+ short_url BIGINT,
+ CONSTRAINT PK_viewinstance PRIMARY KEY CLUSTERED (view_instance_id),
+ CONSTRAINT FK_instance_url_id FOREIGN KEY (short_url) REFERENCES viewurl(url_id),
+ CONSTRAINT FK_viewinst_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name),
+ CONSTRAINT FK_viewinstance_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id),
+ CONSTRAINT UQ_viewinstance_name UNIQUE (view_name, name),
+ CONSTRAINT UQ_viewinstance_name_id UNIQUE (view_instance_id, view_name, name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewinstancedata') AND type = 'U')
+BEGIN
+CREATE TABLE viewinstancedata (
+ view_instance_id BIGINT,
+ view_name VARCHAR(255) NOT NULL,
+ view_instance_name VARCHAR(255) NOT NULL,
+ NAME VARCHAR(255) NOT NULL,
+ user_name VARCHAR(255) NOT NULL,
+ value VARCHAR(2000) NOT NULL,
+ CONSTRAINT PK_viewinstancedata PRIMARY KEY CLUSTERED (view_instance_id, NAME, user_name),
+ CONSTRAINT FK_viewinstdata_view_name FOREIGN KEY (view_instance_id, view_name, view_instance_name) REFERENCES viewinstance(view_instance_id, view_name, name))
+END
+
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewinstanceproperty') AND type = 'U')
+BEGIN
+CREATE TABLE viewinstanceproperty (
+ view_name VARCHAR(255) NOT NULL,
+ view_instance_name VARCHAR(255) NOT NULL,
+ NAME VARCHAR(255) NOT NULL,
+ value VARCHAR(2000),
+ CONSTRAINT PK_viewinstanceproperty PRIMARY KEY CLUSTERED (view_name, view_instance_name, NAME),
+ CONSTRAINT FK_viewinstprop_view_name FOREIGN KEY (view_name, view_instance_name) REFERENCES viewinstance(view_name, name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewparameter') AND type = 'U')
+BEGIN
+CREATE TABLE viewparameter (
+ view_name VARCHAR(255) NOT NULL,
+ NAME VARCHAR(255) NOT NULL,
+ description VARCHAR(2048),
+ label VARCHAR(255),
+ placeholder VARCHAR(255),
+ default_value VARCHAR(2000),
+ cluster_config VARCHAR(255),
+ required CHAR(1),
+ masked CHAR(1),
+ CONSTRAINT PK_viewparameter PRIMARY KEY CLUSTERED (view_name, NAME),
+ CONSTRAINT FK_viewparam_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewresource') AND type = 'U')
+BEGIN
+CREATE TABLE viewresource (
+ view_name VARCHAR(255) NOT NULL,
+ NAME VARCHAR(255) NOT NULL,
+ plural_name VARCHAR(255),
+ id_property VARCHAR(255),
+ subResource_names VARCHAR(255),
+ provider VARCHAR(255),
+ service VARCHAR(255),
+ resource VARCHAR(255),
+ CONSTRAINT PK_viewresource PRIMARY KEY CLUSTERED (view_name, NAME),
+ CONSTRAINT FK_viewres_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewentity') AND type = 'U')
+BEGIN
+CREATE TABLE viewentity (
+ id BIGINT NOT NULL,
+ view_name VARCHAR(255) NOT NULL,
+ view_instance_name VARCHAR(255) NOT NULL,
+ class_name VARCHAR(255) NOT NULL,
+ id_property VARCHAR(255),
+ CONSTRAINT PK_viewentity PRIMARY KEY CLUSTERED (id),
+ CONSTRAINT FK_viewentity_view_name FOREIGN KEY (view_name, view_instance_name) REFERENCES viewinstance(view_name, name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminpermission') AND type = 'U')
+BEGIN
+CREATE TABLE adminpermission (
+ permission_id BIGINT NOT NULL,
+ permission_name VARCHAR(255) NOT NULL,
+ resource_type_id INTEGER NOT NULL,
+ permission_label VARCHAR(255),
+ principal_id BIGINT NOT NULL,
+ sort_order SMALLINT NOT NULL DEFAULT 1,
+ CONSTRAINT PK_adminpermission PRIMARY KEY CLUSTERED (permission_id),
+ CONSTRAINT FK_permission_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id),
+ CONSTRAINT FK_permission_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
+ CONSTRAINT UQ_perm_name_resource_type_id UNIQUE (permission_name, resource_type_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('roleauthorization') AND type = 'U')
+BEGIN
+CREATE TABLE roleauthorization (
+ authorization_id VARCHAR(100) NOT NULL,
+ authorization_name VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_roleauthorization PRIMARY KEY (authorization_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('permission_roleauthorization') AND type = 'U')
+BEGIN
+CREATE TABLE permission_roleauthorization (
+ permission_id BIGINT NOT NULL,
+ authorization_id VARCHAR(100) NOT NULL,
+ CONSTRAINT PK_permsn_roleauthorization PRIMARY KEY (permission_id, authorization_id),
+ CONSTRAINT FK_permission_roleauth_aid FOREIGN KEY (authorization_id) REFERENCES roleauthorization(authorization_id),
+ CONSTRAINT FK_permission_roleauth_pid FOREIGN KEY (permission_id) REFERENCES adminpermission(permission_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminprivilege') AND type = 'U')
+BEGIN
+CREATE TABLE adminprivilege (
+ privilege_id BIGINT,
+ permission_id BIGINT NOT NULL,
+ resource_id BIGINT NOT NULL,
+ principal_id BIGINT NOT NULL,
+ CONSTRAINT PK_adminprivilege PRIMARY KEY CLUSTERED (privilege_id),
+ CONSTRAINT FK_privilege_permission_id FOREIGN KEY (permission_id) REFERENCES adminpermission(permission_id),
+ CONSTRAINT FK_privilege_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
+ CONSTRAINT FK_privilege_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('host_version') AND type = 'U')
+BEGIN
+CREATE TABLE host_version (
+ id BIGINT NOT NULL,
+ repo_version_id BIGINT NOT NULL,
+ host_id BIGINT NOT NULL,
+ STATE VARCHAR(32) NOT NULL,
+ CONSTRAINT PK_host_version PRIMARY KEY CLUSTERED (id),
+ CONSTRAINT FK_host_version_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
+ CONSTRAINT FK_host_version_repovers_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id),
+ CONSTRAINT UQ_host_repo UNIQUE(host_id, repo_version_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('artifact') AND type = 'U')
+BEGIN
+CREATE TABLE artifact (
+ artifact_name VARCHAR(255) NOT NULL,
+ artifact_data TEXT NOT NULL,
+ foreign_keys VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_artifact PRIMARY KEY CLUSTERED (artifact_name, foreign_keys)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('widget') AND type = 'U')
+BEGIN
+CREATE TABLE widget (
+ id BIGINT NOT NULL,
+ widget_name VARCHAR(255) NOT NULL,
+ widget_type VARCHAR(255) NOT NULL,
+ metrics TEXT,
+ time_created BIGINT NOT NULL,
+ author VARCHAR(255),
+ description VARCHAR(2048),
+ default_section_name VARCHAR(255),
+ scope VARCHAR(255),
+ widget_values VARCHAR(4000),
+ properties VARCHAR(4000),
+ cluster_id BIGINT NOT NULL,
+ CONSTRAINT PK_widget PRIMARY KEY CLUSTERED (id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('widget_layout') AND type = 'U')
+BEGIN
+CREATE TABLE widget_layout (
+ id BIGINT NOT NULL,
+ layout_name VARCHAR(255) NOT NULL,
+ section_name VARCHAR(255) NOT NULL,
+ scope VARCHAR(255) NOT NULL,
+ user_name VARCHAR(255) NOT NULL,
+ display_name VARCHAR(255),
+ cluster_id BIGINT NOT NULL,
+ CONSTRAINT PK_widget_layout PRIMARY KEY CLUSTERED (id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('widget_layout_user_widget') AND type = 'U')
+BEGIN
+CREATE TABLE widget_layout_user_widget (
+ widget_layout_id BIGINT NOT NULL,
+ widget_id BIGINT NOT NULL,
+ widget_order smallint,
+ CONSTRAINT PK_widget_layout_user_widget PRIMARY KEY CLUSTERED (widget_layout_id, widget_id),
+ CONSTRAINT FK_widget_id FOREIGN KEY (widget_id) REFERENCES widget(id),
+ CONSTRAINT FK_widget_layout_id FOREIGN KEY (widget_layout_id) REFERENCES widget_layout(id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_request') AND type = 'U')
+BEGIN
+CREATE TABLE topology_request (
+ id BIGINT NOT NULL,
+ action VARCHAR(255) NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ bp_name VARCHAR(100) NOT NULL,
+ cluster_properties TEXT,
+ cluster_attributes TEXT,
+ description VARCHAR(1024),
+ provision_action VARCHAR(255),
+ CONSTRAINT PK_topology_request PRIMARY KEY CLUSTERED (id),
+ CONSTRAINT FK_topology_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_hostgroup') AND type = 'U')
+BEGIN
+CREATE TABLE topology_hostgroup (
+ id BIGINT NOT NULL,
+ name VARCHAR(255) NOT NULL,
+ group_properties TEXT,
+ group_attributes TEXT,
+ request_id BIGINT NOT NULL,
+ CONSTRAINT PK_topology_hostgroup PRIMARY KEY CLUSTERED (id),
+ CONSTRAINT FK_hostgroup_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_host_info') AND type = 'U')
+BEGIN
+CREATE TABLE topology_host_info (
+ id BIGINT NOT NULL,
+ group_id BIGINT NOT NULL,
+ fqdn VARCHAR(255),
+ host_id BIGINT,
+ host_count INTEGER,
+ predicate VARCHAR(2048),
+ rack_info VARCHAR(255),
+ CONSTRAINT PK_topology_host_info PRIMARY KEY CLUSTERED (id),
+ CONSTRAINT FK_hostinfo_group_id FOREIGN KEY (group_id) REFERENCES topology_hostgroup(id),
+ CONSTRAINT FK_hostinfo_host_id FOREIGN KEY (host_id) REFERENCES hosts(host_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_logical_request') AND type = 'U')
+BEGIN
+CREATE TABLE topology_logical_request (
+ id BIGINT NOT NULL,
+ request_id BIGINT NOT NULL,
+ description VARCHAR(1024),
+ CONSTRAINT PK_topology_logical_request PRIMARY KEY CLUSTERED (id),
+ CONSTRAINT FK_logicalreq_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_host_request') AND type = 'U')
+BEGIN
+CREATE TABLE topology_host_request (
+ id BIGINT NOT NULL,
+ logical_request_id BIGINT NOT NULL,
+ group_id BIGINT NOT NULL,
+ stage_id BIGINT NOT NULL,
+ host_name VARCHAR(255),
+ CONSTRAINT PK_topology_host_request PRIMARY KEY CLUSTERED (id),
+ CONSTRAINT FK_hostreq_group_id FOREIGN KEY (group_id) REFERENCES topology_hostgroup(id),
+ CONSTRAINT FK_hostreq_logicalreq_id FOREIGN KEY (logical_request_id) REFERENCES topology_logical_request(id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_host_task') AND type = 'U')
+BEGIN
+CREATE TABLE topology_host_task (
+ id BIGINT NOT NULL,
+ host_request_id BIGINT NOT NULL,
+ type VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_topology_host_task PRIMARY KEY CLUSTERED (id),
+ CONSTRAINT FK_hosttask_req_id FOREIGN KEY (host_request_id) REFERENCES topology_host_request (id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_logical_task') AND type = 'U')
+BEGIN
+CREATE TABLE topology_logical_task (
+ id BIGINT NOT NULL,
+ host_task_id BIGINT NOT NULL,
+ physical_task_id BIGINT,
+ component VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_topology_logical_task PRIMARY KEY CLUSTERED (id),
+ CONSTRAINT FK_ltask_hosttask_id FOREIGN KEY (host_task_id) REFERENCES topology_host_task (id),
+ CONSTRAINT FK_ltask_hrc_id FOREIGN KEY (physical_task_id) REFERENCES host_role_command (task_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('setting') AND type = 'U')
+BEGIN
+CREATE TABLE setting (
+ id BIGINT NOT NULL,
+ name VARCHAR(255) NOT NULL UNIQUE,
+ setting_type VARCHAR(255) NOT NULL,
+ content TEXT NOT NULL,
+ updated_by VARCHAR(255) NOT NULL DEFAULT '_db',
+ update_timestamp BIGINT NOT NULL,
+ CONSTRAINT PK_setting PRIMARY KEY (id)
+)
+END
+
+
+-- Remote Cluster table
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('remoteambaricluster') AND type = 'U')
+BEGIN
+CREATE TABLE remoteambaricluster(
+ cluster_id BIGINT NOT NULL,
+ name VARCHAR(255) NOT NULL,
+ username VARCHAR(255) NOT NULL,
+ url VARCHAR(255) NOT NULL,
+ password VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_remote_ambari_cluster PRIMARY KEY (cluster_id),
+ CONSTRAINT UQ_remote_ambari_cluster UNIQUE (name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('remoteambariclusterservice') AND type = 'U')
+BEGIN
+CREATE TABLE remoteambariclusterservice(
+ id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_remote_ambari_service PRIMARY KEY (id),
+ CONSTRAINT FK_remote_ambari_cluster_id FOREIGN KEY (cluster_id) REFERENCES remoteambaricluster(cluster_id)
+)
+END
+
+
+-- Remote Cluster table ends
+
+-- upgrade tables
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade') AND type = 'U')
+BEGIN
+CREATE TABLE upgrade (
+ upgrade_id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ request_id BIGINT NOT NULL,
+ direction VARCHAR(255) DEFAULT 'UPGRADE' NOT NULL,
+ orchestration VARCHAR(255) DEFAULT 'STANDARD' NOT NULL,
+ upgrade_package VARCHAR(255) NOT NULL,
+ upgrade_type VARCHAR(32) NOT NULL,
+ repo_version_id BIGINT NOT NULL,
+ skip_failures BIT NOT NULL DEFAULT 0,
+ skip_sc_failures BIT NOT NULL DEFAULT 0,
+ downgrade_allowed BIT NOT NULL DEFAULT 1,
+ revert_allowed BIT NOT NULL DEFAULT 0,
+ suspended BIT DEFAULT 0 NOT NULL,
+ CONSTRAINT PK_upgrade PRIMARY KEY CLUSTERED (upgrade_id),
+ FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
+ FOREIGN KEY (request_id) REFERENCES request(request_id),
+ FOREIGN KEY (repo_version_id) REFERENCES repo_version(repo_version_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade_group') AND type = 'U')
+BEGIN
+CREATE TABLE upgrade_group (
+ upgrade_group_id BIGINT NOT NULL,
+ upgrade_id BIGINT NOT NULL,
+ group_name VARCHAR(255) DEFAULT '' NOT NULL,
+ group_title VARCHAR(1024) DEFAULT '' NOT NULL,
+ CONSTRAINT PK_upgrade_group PRIMARY KEY CLUSTERED (upgrade_group_id),
+ FOREIGN KEY (upgrade_id) REFERENCES upgrade(upgrade_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade_item') AND type = 'U')
+BEGIN
+CREATE TABLE upgrade_item (
+ upgrade_item_id BIGINT NOT NULL,
+ upgrade_group_id BIGINT NOT NULL,
+ stage_id BIGINT NOT NULL,
+ state VARCHAR(255) DEFAULT 'NONE' NOT NULL,
+ hosts TEXT,
+ tasks TEXT,
+ item_text TEXT,
+ CONSTRAINT PK_upgrade_item PRIMARY KEY CLUSTERED (upgrade_item_id),
+ FOREIGN KEY (upgrade_group_id) REFERENCES upgrade_group(upgrade_group_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade_history') AND type = 'U')
+BEGIN
+CREATE TABLE upgrade_history(
+ id BIGINT NOT NULL,
+ upgrade_id BIGINT NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ component_name VARCHAR(255) NOT NULL,
+ from_repo_version_id BIGINT NOT NULL,
+ target_repo_version_id BIGINT NOT NULL,
+ CONSTRAINT PK_upgrade_hist PRIMARY KEY (id),
+ CONSTRAINT FK_upgrade_hist_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id),
+ CONSTRAINT FK_upgrade_hist_from_repo FOREIGN KEY (from_repo_version_id) REFERENCES repo_version (repo_version_id),
+ CONSTRAINT FK_upgrade_hist_target_repo FOREIGN KEY (target_repo_version_id) REFERENCES repo_version (repo_version_id),
+ CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('servicecomponent_version') AND type = 'U')
+BEGIN
+CREATE TABLE servicecomponent_version(
+ id BIGINT NOT NULL,
+ component_id BIGINT NOT NULL,
+ repo_version_id BIGINT NOT NULL,
+ state VARCHAR(32) NOT NULL,
+ user_name VARCHAR(255) NOT NULL,
+ CONSTRAINT PK_sc_version PRIMARY KEY (id),
+ CONSTRAINT FK_scv_component_id FOREIGN KEY (component_id) REFERENCES servicecomponentdesiredstate (id),
+ CONSTRAINT FK_scv_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('ambari_operation_history') AND type = 'U')
+BEGIN
+CREATE TABLE ambari_operation_history(
+ id BIGINT NOT NULL,
+ from_version VARCHAR(255) NOT NULL,
+ to_version VARCHAR(255) NOT NULL,
+ start_time BIGINT NOT NULL,
+ end_time BIGINT,
+ operation_type VARCHAR(255) NOT NULL,
+ comments TEXT,
+ CONSTRAINT PK_ambari_operation_history PRIMARY KEY (id)
+)
+END
+
+
+
+-- tasks indices --
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_stage_request_id')
+BEGIN
+CREATE INDEX idx_stage_request_id ON stage (request_id)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_hrc_request_id')
+BEGIN
+CREATE INDEX idx_hrc_request_id ON host_role_command (request_id)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_hrc_status_role')
+BEGIN
+CREATE INDEX idx_hrc_status_role ON host_role_command (status, role)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_rsc_request_id')
+BEGIN
+CREATE INDEX idx_rsc_request_id ON role_success_criteria (request_id)
+END
+
+
+
+-- altering tables by creating unique constraints----------
+--------altering tables to add constraints----------
+
+-- altering tables by creating foreign keys----------
+-- Note, Oracle has a limitation of 32 chars in the FK name, and we should use the same FK name in all DB types.
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('FK_clusters_upgrade_id') AND type = 'F')
+BEGIN
+ALTER TABLE clusters ADD CONSTRAINT FK_clusters_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id)
+END
+
+
+-- Kerberos
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('kerberos_principal') AND type = 'U')
+BEGIN
+CREATE TABLE kerberos_principal (
+ principal_name VARCHAR(255) NOT NULL,
+ is_service SMALLINT NOT NULL DEFAULT 1,
+ cached_keytab_path VARCHAR(255),
+ CONSTRAINT PK_kerberos_principal PRIMARY KEY CLUSTERED (principal_name)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('kerberos_principal_host') AND type = 'U')
+BEGIN
+CREATE TABLE kerberos_principal_host (
+ principal_name VARCHAR(255) NOT NULL,
+ host_id BIGINT NOT NULL,
+ CONSTRAINT PK_kerberos_principal_host PRIMARY KEY CLUSTERED (principal_name, host_id),
+ CONSTRAINT FK_krb_pr_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
+ CONSTRAINT FK_krb_pr_host_principalname FOREIGN KEY (principal_name) REFERENCES kerberos_principal (principal_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('kerberos_descriptor') AND type = 'U')
+BEGIN
+CREATE TABLE kerberos_descriptor
+(
+ kerberos_descriptor_name VARCHAR(255) NOT NULL,
+ kerberos_descriptor VARCHAR(MAX) NOT NULL,
+ CONSTRAINT PK_kerberos_descriptor PRIMARY KEY (kerberos_descriptor_name)
+)
+END
+
+
+-- Kerberos (end)
+
+-- Alerting Framework
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_definition') AND type = 'U')
+BEGIN
+CREATE TABLE alert_definition (
+ definition_id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ definition_name VARCHAR(255) NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ component_name VARCHAR(255),
+ scope VARCHAR(255) DEFAULT 'ANY' NOT NULL,
+ label VARCHAR(255),
+ help_url VARCHAR(512),
+ description TEXT,
+ enabled SMALLINT DEFAULT 1 NOT NULL,
+ schedule_interval INTEGER NOT NULL,
+ source_type VARCHAR(255) NOT NULL,
+ alert_source TEXT NOT NULL,
+ hash VARCHAR(64) NOT NULL,
+ ignore_host SMALLINT DEFAULT 0 NOT NULL,
+ repeat_tolerance INTEGER DEFAULT 1 NOT NULL,
+ repeat_tolerance_enabled SMALLINT DEFAULT 0 NOT NULL,
+ CONSTRAINT PK_alert_definition PRIMARY KEY CLUSTERED (definition_id),
+ FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
+ CONSTRAINT uni_alert_def_name UNIQUE(cluster_id,definition_name)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_history') AND type = 'U')
+BEGIN
+CREATE TABLE alert_history (
+ alert_id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ alert_definition_id BIGINT NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ component_name VARCHAR(255),
+ host_name VARCHAR(255),
+ alert_instance VARCHAR(255),
+ alert_timestamp BIGINT NOT NULL,
+ alert_label VARCHAR(1024),
+ alert_state VARCHAR(255) NOT NULL,
+ alert_text TEXT,
+ CONSTRAINT PK_alert_history PRIMARY KEY CLUSTERED (alert_id),
+ FOREIGN KEY (alert_definition_id) REFERENCES alert_definition(definition_id),
+ FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_current') AND type = 'U')
+BEGIN
+CREATE TABLE alert_current (
+ alert_id BIGINT NOT NULL,
+ definition_id BIGINT NOT NULL,
+ history_id BIGINT NOT NULL UNIQUE,
+ maintenance_state VARCHAR(255) NOT NULL,
+ original_timestamp BIGINT NOT NULL,
+ latest_timestamp BIGINT NOT NULL,
+ latest_text TEXT,
+ occurrences BIGINT NOT NULL DEFAULT 1,
+ firmness VARCHAR(255) NOT NULL DEFAULT 'HARD',
+ CONSTRAINT PK_alert_current PRIMARY KEY CLUSTERED (alert_id),
+ FOREIGN KEY (definition_id) REFERENCES alert_definition(definition_id),
+ FOREIGN KEY (history_id) REFERENCES alert_history(alert_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_group') AND type = 'U')
+BEGIN
+CREATE TABLE alert_group (
+ group_id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ group_name VARCHAR(255) NOT NULL,
+ is_default SMALLINT NOT NULL DEFAULT 0,
+ service_name VARCHAR(255),
+ CONSTRAINT PK_alert_group PRIMARY KEY CLUSTERED (group_id),
+ CONSTRAINT uni_alert_group_name UNIQUE(cluster_id,group_name)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_target') AND type = 'U')
+BEGIN
+CREATE TABLE alert_target (
+ target_id BIGINT NOT NULL,
+ target_name VARCHAR(255) NOT NULL UNIQUE,
+ notification_type VARCHAR(64) NOT NULL,
+ properties TEXT,
+ description VARCHAR(1024),
+ is_global SMALLINT NOT NULL DEFAULT 0,
+ is_enabled SMALLINT NOT NULL DEFAULT 1,
+ CONSTRAINT PK_alert_target PRIMARY KEY CLUSTERED (target_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_target_states') AND type = 'U')
+BEGIN
+CREATE TABLE alert_target_states (
+ target_id BIGINT NOT NULL,
+ alert_state VARCHAR(255) NOT NULL,
+ FOREIGN KEY (target_id) REFERENCES alert_target(target_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_group_target') AND type = 'U')
+BEGIN
+CREATE TABLE alert_group_target (
+ group_id BIGINT NOT NULL,
+ target_id BIGINT NOT NULL,
+ CONSTRAINT PK_alert_group_target PRIMARY KEY CLUSTERED (group_id, target_id),
+ FOREIGN KEY (group_id) REFERENCES alert_group(group_id),
+ FOREIGN KEY (target_id) REFERENCES alert_target(target_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_grouping') AND type = 'U')
+BEGIN
+CREATE TABLE alert_grouping (
+ definition_id BIGINT NOT NULL,
+ group_id BIGINT NOT NULL,
+ CONSTRAINT PK_alert_grouping PRIMARY KEY CLUSTERED (group_id, definition_id),
+ FOREIGN KEY (definition_id) REFERENCES alert_definition(definition_id),
+ FOREIGN KEY (group_id) REFERENCES alert_group(group_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_notice') AND type = 'U')
+BEGIN
+CREATE TABLE alert_notice (
+ notification_id BIGINT NOT NULL,
+ target_id BIGINT NOT NULL,
+ history_id BIGINT NOT NULL,
+ notify_state VARCHAR(255) NOT NULL,
+ uuid VARCHAR(64) NOT NULL UNIQUE,
+ CONSTRAINT PK_alert_notice PRIMARY KEY CLUSTERED (notification_id),
+ FOREIGN KEY (target_id) REFERENCES alert_target(target_id),
+ FOREIGN KEY (history_id) REFERENCES alert_history(alert_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_def_id')
+BEGIN
+CREATE INDEX idx_alert_history_def_id on alert_history(alert_definition_id)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_service')
+BEGIN
+CREATE INDEX idx_alert_history_service on alert_history(service_name)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_host')
+BEGIN
+CREATE INDEX idx_alert_history_host on alert_history(host_name)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_time')
+BEGIN
+CREATE INDEX idx_alert_history_time on alert_history(alert_timestamp)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_state')
+BEGIN
+CREATE INDEX idx_alert_history_state on alert_history(alert_state)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_group_name')
+BEGIN
+CREATE INDEX idx_alert_group_name on alert_group(group_name)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_notice_state')
+BEGIN
+CREATE INDEX idx_alert_notice_state on alert_notice(notify_state)
+END
+
+
+---------inserting some data-----------
+BEGIN TRANSACTION
+ DELETE metainfo;
+ DELETE adminprivilege;
+ DELETE permission_roleauthorization;
+ DELETE roleauthorization;
+ DELETE adminpermission;
+ DELETE users;
+ DELETE adminprincipal;
+ DELETE adminprincipaltype;
+ DELETE adminresource;
+ DELETE adminresourcetype;
+ DELETE ambari_sequences;
+ INSERT INTO ambari_sequences (sequence_name, [sequence_value])
+ VALUES
+ ('cluster_id_seq', 1),
+ ('host_id_seq', 0),
+ ('user_id_seq', 2),
+ ('group_id_seq', 1),
+ ('member_id_seq', 1),
+ ('host_role_command_id_seq', 1),
+ ('configgroup_id_seq', 1),
+ ('requestschedule_id_seq', 1),
+ ('resourcefilter_id_seq', 1),
+ ('viewentity_id_seq', 0),
+ ('operation_level_id_seq', 1),
+ ('view_instance_id_seq', 1),
+ ('resource_type_id_seq', 4),
+ ('resource_id_seq', 2),
+ ('principal_type_id_seq', 8),
+ ('principal_id_seq', 13),
+ ('permission_id_seq', 7),
+ ('privilege_id_seq', 1),
+ ('alert_definition_id_seq', 0),
+ ('alert_group_id_seq', 0),
+ ('alert_target_id_seq', 0),
+ ('alert_history_id_seq', 0),
+ ('alert_notice_id_seq', 0),
+ ('alert_current_id_seq', 0),
+ ('config_id_seq', 11),
+ ('repo_version_id_seq', 0),
+ ('host_version_id_seq', 0),
+ ('service_config_id_seq', 1),
+ ('upgrade_id_seq', 0),
+ ('upgrade_group_id_seq', 0),
+ ('widget_id_seq', 0),
+ ('widget_layout_id_seq', 0),
+ ('upgrade_item_id_seq', 0),
+ ('stack_id_seq', 0),
+ ('extension_id_seq', 0),
+ ('link_id_seq', 0),
+ ('topology_host_info_id_seq', 0),
+ ('topology_host_request_id_seq', 0),
+ ('topology_host_task_id_seq', 0),
+ ('topology_logical_request_id_seq', 0),
+ ('topology_logical_task_id_seq', 0),
+ ('topology_request_id_seq', 0),
+ ('topology_host_group_id_seq', 0),
+ ('setting_id_seq', 0),
+ ('hostcomponentstate_id_seq', 0),
+ ('servicecomponentdesiredstate_id_seq', 0),
+ ('upgrade_history_id_seq', 0),
+ ('blueprint_setting_id_seq', 0),
+ ('ambari_operation_history_id_seq', 0),
+ ('remote_cluster_id_seq', 0),
+ ('remote_cluster_service_id_seq', 0),
+ ('servicecomponent_version_id_seq', 0),
+ ('hostcomponentdesiredstate_id_seq', 0)
+
+ insert into adminresourcetype (resource_type_id, resource_type_name)
+ values
+ (1, 'AMBARI'),
+ (2, 'CLUSTER'),
+ (3, 'VIEW')
+
+ insert into adminresource (resource_id, resource_type_id)
+ select 1, 1
+
+ insert into adminprincipaltype (principal_type_id, principal_type_name)
+ values
+ (1, 'USER'),
+ (2, 'GROUP'),
+ (8, 'ROLE')
+
+ insert into adminprincipal (principal_id, principal_type_id)
+ values
+ (1, 1),
+ (7, 8),
+ (8, 8),
+ (9, 8),
+ (10, 8),
+ (11, 8),
+ (12, 8),
+ (13, 8)
+
+ insert into users(user_id, principal_id, user_name, user_password)
+ select 1, 1, 'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00'
+
+ insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label, principal_id, sort_order)
+ values
+ (1, 'AMBARI.ADMINISTRATOR', 1, 'Ambari Administrator', 7, 1),
+ (2, 'CLUSTER.USER', 2, 'Cluster User', 8, 6),
+ (3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator', 9, 2),
+ (4, 'VIEW.USER', 3, 'View User', 10, 7),
+ (5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator', 11, 3),
+ (6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator', 12, 4),
+ (7, 'SERVICE.OPERATOR', 2, 'Service Operator', 13, 5)
+
+ INSERT INTO roleauthorization(authorization_id, authorization_name)
+ SELECT 'VIEW.USE', 'Use View' UNION ALL
+ SELECT 'SERVICE.VIEW_METRICS', 'View metrics' UNION ALL
+ SELECT 'SERVICE.VIEW_STATUS_INFO', 'View status information' UNION ALL
+ SELECT 'SERVICE.VIEW_CONFIGS', 'View configurations' UNION ALL
+ SELECT 'SERVICE.COMPARE_CONFIGS', 'Compare configurations' UNION ALL
+ SELECT 'SERVICE.VIEW_ALERTS', 'View service-level alerts' UNION ALL
+ SELECT 'SERVICE.START_STOP', 'Start/Stop/Restart Service' UNION ALL
+ SELECT 'SERVICE.DECOMMISSION_RECOMMISSION', 'Decommission/recommission' UNION ALL
+ SELECT 'SERVICE.RUN_SERVICE_CHECK', 'Run service checks' UNION ALL
+ SELECT 'SERVICE.TOGGLE_MAINTENANCE', 'Turn on/off maintenance mode' UNION ALL
+ SELECT 'SERVICE.RUN_CUSTOM_COMMAND', 'Perform service-specific tasks' UNION ALL
+ SELECT 'SERVICE.MODIFY_CONFIGS', 'Modify configurations' UNION ALL
+ SELECT 'SERVICE.MANAGE_ALERTS', 'Manage service-level alerts' UNION ALL
+ SELECT 'SERVICE.MANAGE_CONFIG_GROUPS', 'Manage configuration groups' UNION ALL
+ SELECT 'SERVICE.MOVE', 'Move service to another host' UNION ALL
+ SELECT 'SERVICE.ENABLE_HA', 'Enable HA' UNION ALL
+ SELECT 'SERVICE.TOGGLE_ALERTS', 'Enable/disable service-level alerts' UNION ALL
+ SELECT 'SERVICE.ADD_DELETE_SERVICES', 'Add/delete services' UNION ALL
+ SELECT 'SERVICE.VIEW_OPERATIONAL_LOGS', 'View service operational logs' UNION ALL
+ SELECT 'SERVICE.SET_SERVICE_USERS_GROUPS', 'Set service users and groups' UNION ALL
+ SELECT 'SERVICE.MANAGE_AUTO_START', 'Manage service auto-start' UNION ALL
+ SELECT 'HOST.VIEW_METRICS', 'View metrics' UNION ALL
+ SELECT 'HOST.VIEW_STATUS_INFO', 'View status information' UNION ALL
+ SELECT 'HOST.VIEW_CONFIGS', 'View configuration' UNION ALL
+ SELECT 'HOST.TOGGLE_MAINTENANCE', 'Turn on/off maintenance mode' UNION ALL
+ SELECT 'HOST.ADD_DELETE_COMPONENTS', 'Install components' UNION ALL
+ SELECT 'HOST.ADD_DELETE_HOSTS', 'Add/Delete hosts' UNION ALL
+ SELECT 'CLUSTER.VIEW_METRICS', 'View metrics' UNION ALL
+ SELECT 'CLUSTER.VIEW_STATUS_INFO', 'View status information' UNION ALL
+ SELECT 'CLUSTER.VIEW_CONFIGS', 'View configuration' UNION ALL
+ SELECT 'CLUSTER.VIEW_STACK_DETAILS', 'View stack version details' UNION ALL
+ SELECT 'CLUSTER.VIEW_ALERTS', 'View cluster-level alerts' UNION ALL
+ SELECT 'CLUSTER.MANAGE_CREDENTIALS', 'Manage external credentials' UNION ALL
+ SELECT 'CLUSTER.MODIFY_CONFIGS', 'Modify cluster configurations' UNION ALL
+ SELECT 'CLUSTER.MANAGE_ALERTS', 'Manage cluster-level alerts' UNION ALL
+ SELECT 'CLUSTER.MANAGE_USER_PERSISTED_DATA', 'Manage cluster-level user persisted data' UNION ALL
+ SELECT 'CLUSTER.TOGGLE_ALERTS', 'Enable/disable cluster-level alerts' UNION ALL
+ SELECT 'CLUSTER.MANAGE_CONFIG_GROUPS', 'Manage cluster config groups' UNION ALL
+ SELECT 'CLUSTER.TOGGLE_KERBEROS', 'Enable/disable Kerberos' UNION ALL
+ SELECT 'CLUSTER.UPGRADE_DOWNGRADE_STACK', 'Upgrade/downgrade stack' UNION ALL
+ SELECT 'CLUSTER.RUN_CUSTOM_COMMAND', 'Perform custom cluster-level actions' UNION ALL
+ SELECT 'CLUSTER.MANAGE_AUTO_START', 'Manage service auto-start configuration' UNION ALL
+ SELECT 'CLUSTER.MANAGE_ALERT_NOTIFICATIONS', 'Manage alert notifications configuration' UNION ALL
+ SELECT 'AMBARI.ADD_DELETE_CLUSTERS', 'Create new clusters' UNION ALL
+ SELECT 'AMBARI.RENAME_CLUSTER', 'Rename clusters' UNION ALL
+ SELECT 'AMBARI.MANAGE_SETTINGS', 'Manage settings' UNION ALL
+ SELECT 'AMBARI.MANAGE_USERS', 'Manage users' UNION ALL
+ SELECT 'AMBARI.MANAGE_GROUPS', 'Manage groups' UNION ALL
+ SELECT 'AMBARI.MANAGE_VIEWS', 'Manage Ambari Views' UNION ALL
+ SELECT 'AMBARI.ASSIGN_ROLES', 'Assign roles' UNION ALL
+ SELECT 'AMBARI.MANAGE_STACK_VERSIONS', 'Manage stack versions' UNION ALL
+ SELECT 'AMBARI.EDIT_STACK_REPOS', 'Edit stack repository URLs' UNION ALL
+ SELECT 'AMBARI.RUN_CUSTOM_COMMAND', 'Perform custom administrative actions'
+
+ -- Set authorizations for View User role
+ INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+ SELECT permission_id, 'VIEW.USE' FROM adminpermission WHERE permission_name='VIEW.USER'
+
+ -- Set authorizations for Cluster User role
+ INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+ SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='CLUSTER.USER'
+
+ -- Set authorizations for Service Operator role
+ INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+ SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR'
+
+ -- Set authorizations for Service Administrator role
+ INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+ SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR'
+
+ -- Set authorizations for Cluster Operator role
+ INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+ SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MOVE' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.ENABLE_HA' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'HOST.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'HOST.ADD_DELETE_COMPONENTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'HOST.ADD_DELETE_HOSTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_CREDENTIALS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR'
+
+ -- Set authorizations for Cluster Administrator role
+ INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+ SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MOVE' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.ENABLE_HA' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.ADD_DELETE_SERVICES' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.SET_SERVICE_USERS_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.ADD_DELETE_COMPONENTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.ADD_DELETE_HOSTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_CREDENTIALS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.TOGGLE_KERBEROS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.UPGRADE_DOWNGRADE_STACK' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_ALERT_NOTIFICATIONS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR'
+
+ -- Set authorizations for Administrator role
+ INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+ SELECT permission_id, 'VIEW.USE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MOVE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.ENABLE_HA' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.ADD_DELETE_SERVICES' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.SET_SERVICE_USERS_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.ADD_DELETE_COMPONENTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'HOST.ADD_DELETE_HOSTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_CREDENTIALS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.TOGGLE_KERBEROS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.UPGRADE_DOWNGRADE_STACK' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.MANAGE_ALERT_NOTIFICATIONS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'CLUSTER.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'AMBARI.ADD_DELETE_CLUSTERS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'AMBARI.RENAME_CLUSTER' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'AMBARI.MANAGE_SETTINGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'AMBARI.MANAGE_USERS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'AMBARI.MANAGE_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'AMBARI.MANAGE_VIEWS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'AMBARI.ASSIGN_ROLES' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'AMBARI.MANAGE_STACK_VERSIONS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'AMBARI.EDIT_STACK_REPOS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+ SELECT permission_id, 'AMBARI.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR'
+
+ insert into adminprivilege (privilege_id, permission_id, resource_id, principal_id)
+ select 1, 1, 1, 1
+
+ insert into metainfo(metainfo_key, metainfo_value)
+ select 'version','${ambariSchemaVersion}'
+COMMIT TRANSACTION
+
+-- Quartz tables
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('qrtz_job_details') AND type = 'U')
+BEGIN
+CREATE TABLE qrtz_job_details
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ JOB_CLASS_NAME VARCHAR(250) NOT NULL,
+ IS_DURABLE BIT NOT NULL,
+ IS_NONCONCURRENT BIT NOT NULL,
+ IS_UPDATE_DATA BIT NOT NULL,
+ REQUESTS_RECOVERY BIT NOT NULL,
+ JOB_DATA VARBINARY(MAX) NULL,
+ PRIMARY KEY CLUSTERED (SCHED_NAME,JOB_NAME,JOB_GROUP)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('qrtz_triggers') AND type = 'U')
+BEGIN
+CREATE TABLE qrtz_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250)
<TRUNCATED>
[15/31] ambari git commit: AMBARI-22138. When regenerating keytab
files for a service, non-service-specific principals are affected (rlevas)
Posted by jl...@apache.org.
AMBARI-22138. When regenerating keytab files for a service, non-service-specific principals are affected (rlevas)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5af1e539
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5af1e539
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5af1e539
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 5af1e539cce928b32fc5aca67c7bf8dbc2bd3c2e
Parents: b0c24a5
Author: Robert Levas <rl...@hortonworks.com>
Authored: Mon Oct 9 13:06:13 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Mon Oct 9 13:06:19 2017 -0400
----------------------------------------------------------------------
.../server/controller/KerberosHelper.java | 9 +-
.../server/controller/KerberosHelperImpl.java | 244 ++++++++++++-------
.../utilities/RemovableIdentities.java | 2 +-
.../kerberos/CreateKeytabFilesServerAction.java | 2 +-
.../kerberos/CreatePrincipalsServerAction.java | 2 +-
.../kerberos/KerberosServerAction.java | 71 ++++--
.../PrepareDisableKerberosServerAction.java | 3 +-
.../PrepareEnableKerberosServerAction.java | 6 +-
.../PrepareKerberosIdentitiesServerAction.java | 142 ++++++++---
.../kerberos/AbstractKerberosDescriptor.java | 25 ++
.../AbstractKerberosDescriptorContainer.java | 18 +-
.../kerberos/KerberosIdentityDescriptor.java | 160 ++++++++++++
.../server/controller/KerberosHelperTest.java | 5 -
.../utilities/KerberosIdentityCleanerTest.java | 8 +-
.../state/kerberos/KerberosDescriptorTest.java | 150 +++++++++++-
15 files changed, 665 insertions(+), 182 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
index 20c5708..b8e1be1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
@@ -442,12 +442,6 @@ public interface KerberosHelper {
* @param hostFilter a set of hostname indicating the set of hosts to process -
* if null, no filter is relevant; if empty, the filter
* indicates no relevant hosts
- * @param identityFilter a Collection of identity names indicating the relevant
- * identities - if null, no filter is relevant; if empty,
- * the filter indicates no relevant identities
- * @param shouldProcessCommand a Command implementation to determine if the relevant component
- * is in a state in which is should be process for the current
- * Kerberos operation.
* @return a list of ServiceComponentHost instances and should be processed during the relevant
* Kerberos operation.
* @throws AmbariException
@@ -455,8 +449,7 @@ public interface KerberosHelper {
List<ServiceComponentHost> getServiceComponentHostsToProcess(Cluster cluster,
KerberosDescriptor kerberosDescriptor,
Map<String, ? extends Collection<String>> serviceComponentFilter,
- Collection<String> hostFilter, Collection<String> identityFilter,
- Command<Boolean, ServiceComponentHost> shouldProcessCommand)
+ Collection<String> hostFilter)
throws AmbariException;
Set<String> getHostsWithValidKerberosClient(Cluster cluster) throws AmbariException;
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index b691968..f8fe31a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -122,6 +122,7 @@ import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
import org.apache.ambari.server.state.kerberos.VariableReplacementHelper;
import org.apache.ambari.server.state.svccomphost.ServiceComponentHostServerActionEvent;
import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.directory.server.kerberos.shared.keytab.Keytab;
@@ -268,10 +269,12 @@ public class KerberosHelperImpl implements KerberosHelper {
boolean updateConfigurations = !requestProperties.containsKey(DIRECTIVE_IGNORE_CONFIGS)
|| !"true".equalsIgnoreCase(requestProperties.get(DIRECTIVE_IGNORE_CONFIGS));
+ boolean forceAllHosts = (hostFilter == null) || (hostFilter.contains("*"));
+
if ("true".equalsIgnoreCase(value) || "all".equalsIgnoreCase(value)) {
- handler = new CreatePrincipalsAndKeytabsHandler(true, updateConfigurations, true);
+ handler = new CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType.RECREATE_ALL, updateConfigurations, forceAllHosts, true);
} else if ("missing".equalsIgnoreCase(value)) {
- handler = new CreatePrincipalsAndKeytabsHandler(false, updateConfigurations, true);
+ handler = new CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType.CREATE_MISSING, updateConfigurations, forceAllHosts, true);
}
if (handler != null) {
@@ -326,7 +329,7 @@ public class KerberosHelperImpl implements KerberosHelper {
if (serviceComponentsArray.length == 2) {
serviceComponentFilter.put(serviceName, ImmutableSet.copyOf(serviceComponentsArray[1].split(";")));
} else {
- serviceComponentFilter.put(serviceName, null);
+ serviceComponentFilter.put(serviceName, ImmutableSet.of("*"));
}
}
return serviceComponentFilter.build();
@@ -340,7 +343,7 @@ public class KerberosHelperImpl implements KerberosHelper {
RequestStageContainer requestStageContainer, Boolean manageIdentities)
throws AmbariException, KerberosOperationException {
return handle(cluster, getKerberosDetails(cluster, manageIdentities), serviceComponentFilter, hostFilter, identityFilter,
- hostsToForceKerberosOperations, requestStageContainer, new CreatePrincipalsAndKeytabsHandler(false, false,
+ hostsToForceKerberosOperations, requestStageContainer, new CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType.DEFAULT, false, false,
false));
}
@@ -1061,7 +1064,7 @@ public class KerberosHelperImpl implements KerberosHelper {
RequestStageContainer requestStageContainer)
throws KerberosOperationException, AmbariException {
return handleTestIdentity(cluster, getKerberosDetails(cluster, null), commandParamsStage, requestStageContainer,
- new CreatePrincipalsAndKeytabsHandler(false, false, false));
+ new CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType.DEFAULT, false, false, false));
}
@Override
@@ -1230,27 +1233,25 @@ public class KerberosHelperImpl implements KerberosHelper {
public List<ServiceComponentHost> getServiceComponentHostsToProcess(final Cluster cluster,
final KerberosDescriptor kerberosDescriptor,
final Map<String, ? extends Collection<String>> serviceComponentFilter,
- final Collection<String> hostFilter, Collection<String> identityFilter,
- final Command<Boolean, ServiceComponentHost> shouldProcessCommand)
+ final Collection<String> hostFilter)
throws AmbariException {
return getServiceComponentHosts(cluster, new Command<Boolean, ServiceComponentHost>() {
@Override
public Boolean invoke(ServiceComponentHost sch) throws AmbariException {
if (sch != null) {
// Check the host filter
- if ((hostFilter == null) || hostFilter.contains(sch.getHostName())) {
+ if ((hostFilter == null) || hostFilter.contains("*") || hostFilter.contains(sch.getHostName())) {
String serviceName = sch.getServiceName();
// Check the service filter
- if ((serviceComponentFilter == null) || serviceComponentFilter.containsKey(serviceName)) {
+ if ((serviceComponentFilter == null) || serviceComponentFilter.containsKey("*") || serviceComponentFilter.containsKey(serviceName)) {
KerberosServiceDescriptor serviceDescriptor = kerberosDescriptor.getService(serviceName);
if (serviceDescriptor != null) {
- Collection<String> componentFilter = (serviceComponentFilter == null) ? null : serviceComponentFilter.get(serviceName);
+ Collection<String> componentFilter = ((serviceComponentFilter == null) || serviceComponentFilter.containsKey("*")) ? null : serviceComponentFilter.get(serviceName);
- // Check the service/component filter and the shouldProcessCommand
- return (((componentFilter == null) || componentFilter.contains(sch.getServiceComponentName())) &&
- ((shouldProcessCommand == null) || shouldProcessCommand.invoke(sch)));
+ // Check the service/component filter
+ return (((componentFilter == null) || componentFilter.contains("*") || componentFilter.contains(sch.getServiceComponentName())));
}
}
}
@@ -1491,8 +1492,9 @@ public class KerberosHelperImpl implements KerberosHelper {
if (identities != null) {
for (KerberosIdentityDescriptor identity : identities) {
- // If there is no filter or the filter contains the current identity's name...
- if ((identityFilter == null) || identityFilter.contains(identity.getName())) {
+ // If there is no filter or the filter contains the current identity's path...
+ if ((identityFilter == null) || identityFilter.contains(identity.getPath())) {
+
KerberosPrincipalDescriptor principalDescriptor = identity.getPrincipalDescriptor();
String principal = null;
String principalType = null;
@@ -2030,10 +2032,7 @@ public class KerberosHelperImpl implements KerberosHelper {
cluster,
kerberosDescriptor,
serviceComponentFilter,
- hostFilter,
- identityFilter,
- arg -> true);
-
+ hostFilter);
// While iterating over all the ServiceComponentHosts find hosts that have KERBEROS_CLIENT
// components in the INSTALLED state and add them to the hostsWithValidKerberosClient Set.
@@ -3378,12 +3377,11 @@ public class KerberosHelperImpl implements KerberosHelper {
requestStageContainer.addStages(roleGraph.getStages());
}
- public void addDistributeKeytabFilesStage(Cluster cluster, List<ServiceComponentHost> serviceComponentHosts,
- String clusterHostInfoJson, String hostParamsJson,
- Map<String, String> commandParameters,
- RoleCommandOrder roleCommandOrder,
- RequestStageContainer requestStageContainer,
- Set<String> hostsWithValidKerberosClient)
+ void addDistributeKeytabFilesStage(Cluster cluster, String clusterHostInfoJson,
+ String hostParamsJson, Map<String, String> commandParameters,
+ RoleCommandOrder roleCommandOrder,
+ RequestStageContainer requestStageContainer,
+ List<String> hosts)
throws AmbariException {
Stage stage = createNewStage(requestStageContainer.getLastStageId(),
@@ -3393,20 +3391,13 @@ public class KerberosHelperImpl implements KerberosHelper {
StageUtils.getGson().toJson(commandParameters),
hostParamsJson);
- Collection<ServiceComponentHost> filteredComponents = filterServiceComponentHostsForHosts(
- new ArrayList<>(serviceComponentHosts), hostsWithValidKerberosClient);
-
- if (!filteredComponents.isEmpty()) {
- List<String> hostsToUpdate = createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
+ if (!hosts.isEmpty()) {
Map<String, String> requestParams = new HashMap<>();
- List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
- RequestResourceFilter reqResFilter = new RequestResourceFilter(Service.Type.KERBEROS.name(), Role.KERBEROS_CLIENT.name(), hostsToUpdate);
- requestResourceFilters.add(reqResFilter);
ActionExecutionContext actionExecContext = new ActionExecutionContext(
cluster.getClusterName(),
SET_KEYTAB,
- requestResourceFilters,
+ createRequestResourceFilters(hosts),
requestParams);
customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage,
requestParams, null);
@@ -3422,7 +3413,12 @@ public class KerberosHelperImpl implements KerberosHelper {
/**
* Send a custom command to the KERBEROS_CLIENT to check if there are missing keytabs on each hosts.
*/
- public void addCheckMissingKeytabsStage(Cluster cluster, String clusterHostInfoJson, String hostParamsJson, ServiceComponentHostServerActionEvent event, Map<String, String> commandParameters, RoleCommandOrder roleCommandOrder, RequestStageContainer requestStageContainer, List<ServiceComponentHost> serviceComponentHosts) throws AmbariException {
+ void addCheckMissingKeytabsStage(Cluster cluster, String clusterHostInfoJson,
+ String hostParamsJson, Map<String, String> commandParameters,
+ RoleCommandOrder roleCommandOrder,
+ RequestStageContainer requestStageContainer,
+ List<String> hostsToInclude)
+ throws AmbariException {
Stage stage = createNewStage(requestStageContainer.getLastStageId(),
cluster,
requestStageContainer.getId(),
@@ -3430,20 +3426,13 @@ public class KerberosHelperImpl implements KerberosHelper {
StageUtils.getGson().toJson(commandParameters),
hostParamsJson);
- Collection<ServiceComponentHost> filteredComponents = filterServiceComponentHostsForHosts(
- new ArrayList<>(serviceComponentHosts), getHostsWithValidKerberosClient(cluster));
-
- if (!filteredComponents.isEmpty()) {
- List<String> hostsToUpdate = createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
+ if (!hostsToInclude.isEmpty()) {
Map<String, String> requestParams = new HashMap<>();
- List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
- RequestResourceFilter reqResFilter = new RequestResourceFilter(Service.Type.KERBEROS.name(), Role.KERBEROS_CLIENT.name(), hostsToUpdate);
- requestResourceFilters.add(reqResFilter);
ActionExecutionContext actionExecContext = new ActionExecutionContext(
cluster.getClusterName(),
CHECK_KEYTABS,
- requestResourceFilters,
+ createRequestResourceFilters(hostsToInclude),
requestParams);
customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage, requestParams, null);
}
@@ -3454,32 +3443,6 @@ public class KerberosHelperImpl implements KerberosHelper {
requestStageContainer.addStages(roleGraph.getStages());
}
- /**
- * Filter out ServiceComponentHosts that are on on hosts in the specified set of host names.
- * <p/>
- * It is expected that the supplied collection is modifiable. It will be modified inplace.
- *
- * @param serviceComponentHosts a collection of ServiceComponentHost items to test
- * @param hosts a set of host names indicating valid hosts
- * @return a collection of filtered ServiceComponentHost items
- */
- private Collection<ServiceComponentHost> filterServiceComponentHostsForHosts(Collection<ServiceComponentHost> serviceComponentHosts,
- Set<String> hosts) {
-
- if ((serviceComponentHosts != null) && (hosts != null)) {
- Iterator<ServiceComponentHost> iterator = serviceComponentHosts.iterator();
- while (iterator.hasNext()) {
- ServiceComponentHost sch = iterator.next();
-
- if (!hosts.contains(sch.getHostName())) {
- iterator.remove();
- }
- }
- }
-
- return serviceComponentHosts;
- }
-
void addDisableSecurityHookStage(Cluster cluster,
String clusterHostInfoJson,
String hostParamsJson,
@@ -3677,6 +3640,13 @@ public class KerberosHelperImpl implements KerberosHelper {
requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
requestStageContainer.addStages(roleGraph.getStages());
}
+
+ private List<RequestResourceFilter> createRequestResourceFilters(List<String> hostsToInclude) {
+ List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
+ RequestResourceFilter reqResFilter = new RequestResourceFilter(Service.Type.KERBEROS.name(), Role.KERBEROS_CLIENT.name(), hostsToInclude);
+ requestResourceFilters.add(reqResFilter);
+ return requestResourceFilters;
+ }
}
/**
@@ -3746,6 +3716,8 @@ public class KerberosHelperImpl implements KerberosHelper {
roleCommandOrder, requestStageContainer);
if (kerberosDetails.manageIdentities()) {
+ List<String> hostsToInclude = calculateHosts(cluster, serviceComponentHosts, hostsWithValidKerberosClient, false);
+
commandParameters.put(KerberosServerAction.KDC_TYPE, kerberosDetails.getKdcType().name());
// *****************************************************************
@@ -3767,8 +3739,8 @@ public class KerberosHelperImpl implements KerberosHelper {
// *****************************************************************
// Create stage to distribute keytabs
- addDistributeKeytabFilesStage(cluster, serviceComponentHosts, clusterHostInfoJson, hostParamsJson,
- commandParameters, roleCommandOrder, requestStageContainer, hostsWithValidKerberosClient);
+ addDistributeKeytabFilesStage(cluster, clusterHostInfoJson, hostParamsJson, commandParameters,
+ roleCommandOrder, requestStageContainer, hostsToInclude);
}
// *****************************************************************
@@ -3885,10 +3857,11 @@ public class KerberosHelperImpl implements KerberosHelper {
*/
private class CreatePrincipalsAndKeytabsHandler extends Handler {
/**
- * A boolean value indicating whether to create keytabs for all principals (<code>true</code>)
- * or only the ones that are missing (<code>false</code>).
+ * The type of Kerberos operation being performed.
+ *
+ * @see org.apache.ambari.server.serveraction.kerberos.KerberosServerAction.OperationType
*/
- private boolean regenerateAllKeytabs;
+ private KerberosServerAction.OperationType operationType;
/**
* A boolean value indicating whether to update service configurations (<code>true</code>)
@@ -3897,6 +3870,14 @@ public class KerberosHelperImpl implements KerberosHelper {
private boolean updateConfigurations;
/**
+ * A boolean value indicating whether to include all hosts (<code>true</code>) when setting up
+ * agent-side tasks or to select only the hosts found to be relevant (<code>false</code>).
+ * <p>
+ * This is useful if we do not know beforehand, which hosts need to be involved in the operation.
+ */
+ private boolean forceAllHosts;
+
+ /**
* A boolean value indicating whether to include Ambari server identity (<code>true</code>)
* or ignore it (<code>false</code>).
*/
@@ -3906,17 +3887,20 @@ public class KerberosHelperImpl implements KerberosHelper {
* CreatePrincipalsAndKeytabsHandler constructor to set whether this instance should be used to
* regenerate all keytabs or just the ones that have not been distributed
*
- * @param regenerateAllKeytabs A boolean value indicating whether to create keytabs for all
- * principals (<code>true</code> or only the ones that are missing
- * (<code>false</code>)
- * @param updateConfigurations A boolean value indicating whether to update service configurations
- * (<code>true</code>) or ignore any potential configuration changes
- * (<code>false</code>)
+ * @param operationType The type of Kerberos operation being performed
+ * @param updateConfigurations A boolean value indicating whether to update service configurations
+ * (<code>true</code>) or ignore any potential configuration changes
+ * @param forceAllHosts A boolean value indicating whether to include all hosts (<code>true</code>)
+ * when setting up agent-side tasks or to select only the hosts found to be
+ * relevant (<code>false</code>)
+ * @param includeAmbariIdentity A boolean value indicating whether to include Ambari server
+ * identity (<code>true</code>) or ignore it (<code>false</code>)
*/
- public CreatePrincipalsAndKeytabsHandler(boolean regenerateAllKeytabs, boolean updateConfigurations,
- boolean includeAmbariIdentity) {
- this.regenerateAllKeytabs = regenerateAllKeytabs;
+ CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType operationType, boolean updateConfigurations,
+ boolean forceAllHosts, boolean includeAmbariIdentity) {
+ this.operationType = operationType;
this.updateConfigurations = updateConfigurations;
+ this.forceAllHosts = forceAllHosts;
this.includeAmbariIdentity = includeAmbariIdentity;
}
@@ -3947,6 +3931,7 @@ public class KerberosHelperImpl implements KerberosHelper {
}
+ boolean processAmbariIdentity = includeAmbariIdentity;
Map<String, String> commandParameters = new HashMap<>();
commandParameters.put(KerberosServerAction.AUTHENTICATED_USER_NAME, ambariManagementController.getAuthName());
commandParameters.put(KerberosServerAction.DEFAULT_REALM, kerberosDetails.getDefaultRealm());
@@ -3955,22 +3940,29 @@ public class KerberosHelperImpl implements KerberosHelper {
}
if (serviceComponentFilter != null) {
commandParameters.put(KerberosServerAction.SERVICE_COMPONENT_FILTER, StageUtils.getGson().toJson(serviceComponentFilter));
+
+ processAmbariIdentity = serviceComponentFilter.containsKey("AMBARI") &&
+ ((serviceComponentFilter.get("AMBARI") == null) || serviceComponentFilter.get("AMBARI").contains("*") || serviceComponentFilter.get("AMBARI").contains("AMBARI_SERVER"));
}
if (hostFilter != null) {
commandParameters.put(KerberosServerAction.HOST_FILTER, StageUtils.getGson().toJson(hostFilter));
+
+ processAmbariIdentity = hostFilter.contains("*") || hostFilter.contains(StageUtils.getHostName());
}
if (identityFilter != null) {
commandParameters.put(KerberosServerAction.IDENTITY_FILTER, StageUtils.getGson().toJson(identityFilter));
}
- commandParameters.put(KerberosServerAction.REGENERATE_ALL, (regenerateAllKeytabs) ? "true" : "false");
- commandParameters.put(KerberosServerAction.INCLUDE_AMBARI_IDENTITY, (includeAmbariIdentity) ? "true" : "false");
+ commandParameters.put(KerberosServerAction.OPERATION_TYPE, (operationType == null) ? KerberosServerAction.OperationType.DEFAULT.name() : operationType.name());
+ commandParameters.put(KerberosServerAction.INCLUDE_AMBARI_IDENTITY, (processAmbariIdentity) ? "true" : "false");
if (updateConfigurations) {
commandParameters.put(KerberosServerAction.UPDATE_CONFIGURATION_NOTE, "Updated Kerberos-related configurations");
commandParameters.put(KerberosServerAction.UPDATE_CONFIGURATIONS, "true");
}
+ List<String> hostsToInclude = calculateHosts(cluster, serviceComponentHosts, hostsWithValidKerberosClient, forceAllHosts);
+
// *****************************************************************
// Create stage to create principals
addPrepareKerberosIdentitiesStage(cluster, clusterHostInfoJson, hostParamsJson, event,
@@ -3979,9 +3971,9 @@ public class KerberosHelperImpl implements KerberosHelper {
if (kerberosDetails.manageIdentities()) {
commandParameters.put(KerberosServerAction.KDC_TYPE, kerberosDetails.getKdcType().name());
- if (!regenerateAllKeytabs) {
- addCheckMissingKeytabsStage(cluster, clusterHostInfoJson, hostParamsJson, event,
- commandParameters, roleCommandOrder, requestStageContainer, serviceComponentHosts);
+ if (operationType != KerberosServerAction.OperationType.RECREATE_ALL) {
+ addCheckMissingKeytabsStage(cluster, clusterHostInfoJson, hostParamsJson,
+ commandParameters, roleCommandOrder, requestStageContainer, hostsToInclude);
}
// *****************************************************************
@@ -3996,15 +3988,15 @@ public class KerberosHelperImpl implements KerberosHelper {
// *****************************************************************
// Create stage to distribute and configure keytab for Ambari server and configure JAAS
- if (includeAmbariIdentity && kerberosDetails.createAmbariPrincipal()) {
+ if (processAmbariIdentity && kerberosDetails.createAmbariPrincipal()) {
addConfigureAmbariIdentityStage(cluster, clusterHostInfoJson, hostParamsJson, event, commandParameters,
roleCommandOrder, requestStageContainer);
}
// *****************************************************************
// Create stage to distribute keytabs
- addDistributeKeytabFilesStage(cluster, serviceComponentHosts, clusterHostInfoJson,
- hostParamsJson, commandParameters, roleCommandOrder, requestStageContainer, hostsWithValidKerberosClient);
+ addDistributeKeytabFilesStage(cluster, clusterHostInfoJson, hostParamsJson, commandParameters,
+ roleCommandOrder, requestStageContainer, hostsToInclude);
}
if (updateConfigurations) {
@@ -4019,6 +4011,74 @@ public class KerberosHelperImpl implements KerberosHelper {
}
/**
+ * Filter out ServiceComponentHosts that are on on hosts in the specified set of host names.
+ * <p/>
+ * It is expected that the supplied collection is modifiable. It will be modified inplace.
+ *
+ * @param serviceComponentHosts a collection of ServiceComponentHost items to test
+ * @param hosts a set of host names indicating valid hosts
+ * @return a collection of filtered ServiceComponentHost items
+ */
+ private Collection<ServiceComponentHost> filterServiceComponentHostsForHosts(Collection<ServiceComponentHost> serviceComponentHosts,
+ Set<String> hosts) {
+
+ if ((serviceComponentHosts != null) && (hosts != null)) {
+ Iterator<ServiceComponentHost> iterator = serviceComponentHosts.iterator();
+ while (iterator.hasNext()) {
+ ServiceComponentHost sch = iterator.next();
+
+ if (!hosts.contains(sch.getHostName())) {
+ iterator.remove();
+ }
+ }
+ }
+
+ return serviceComponentHosts;
+ }
+
+ /**
+ * Calculate the hosts to include when issuing agent-side commands.
+ * <p>
+ * If forcing all hosts, select only the healthy hosts in the cluster else select only the healthy
+ * hosts from the set of hosts specified in the collection of relevant {@link ServiceComponentHost}.
+ *
+ * @param cluster the cluster
+ * @param serviceComponentHosts a collction of {@link ServiceComponentHost}s that are
+ * relevant to the current operation
+ * @param hostsWithValidKerberosClient the collection of hosts know to have the Kerberos client
+ * component installed
+ * @param forceAllHosts true to process all hosts from the cluster rather than use
+ * the hosts parsed from the set of {@link ServiceComponentHost}s
+ * @return a filtered list of host names
+ * @throws AmbariException
+ */
+ private List<String> calculateHosts(Cluster cluster, List<ServiceComponentHost> serviceComponentHosts, Set<String> hostsWithValidKerberosClient, boolean forceAllHosts) throws AmbariException {
+ if(forceAllHosts) {
+ List<String> hosts = new ArrayList<>();
+ Collection<Host> clusterHosts = cluster.getHosts();
+ if(!CollectionUtils.isEmpty(clusterHosts)) {
+ for(Host host: clusterHosts) {
+ if(host.getState() == HostState.HEALTHY) {
+ hosts.add(host.getHostName());
+ }
+ }
+ }
+
+ return hosts;
+ }
+ else {
+ Collection<ServiceComponentHost> filteredComponents = filterServiceComponentHostsForHosts(
+ new ArrayList<>(serviceComponentHosts), hostsWithValidKerberosClient);
+
+ if (filteredComponents.isEmpty()) {
+ return Collections.emptyList();
+ } else {
+ return createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
+ }
+ }
+ }
+
+ /**
* DeletePrincipalsAndKeytabsHandler is an implementation of the Handler interface used to delete
* principals and keytabs throughout the cluster.
* <p/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
index 66bf7b3..cd23e83 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
@@ -133,7 +133,7 @@ public class RemovableIdentities {
* Remove all identities which are not used by other services or components
*/
public void remove(KerberosHelper kerberosHelper) throws AmbariException, KerberosOperationException {
- Set<String> identitiesToRemove = skipUsed().stream().map(KerberosIdentityDescriptor::getName).collect(toSet());
+ Set<String> identitiesToRemove = skipUsed().stream().map(KerberosIdentityDescriptor::getPath).collect(toSet());
if (!identitiesToRemove.isEmpty()) {
kerberosHelper.deleteIdentities(cluster, components, identitiesToRemove);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreateKeytabFilesServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreateKeytabFilesServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreateKeytabFilesServerAction.java
index 4396a2b..355f515 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreateKeytabFilesServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreateKeytabFilesServerAction.java
@@ -217,7 +217,7 @@ public class CreateKeytabFilesServerAction extends KerberosServerAction {
return commandReport;
}
- boolean regenerateKeytabs = "true".equalsIgnoreCase(getCommandParameterValue(getCommandParameters(), REGENERATE_ALL));
+ boolean regenerateKeytabs = getOperationType(getCommandParameters()) == OperationType.RECREATE_ALL;
boolean onlyKeytabWrite = "true".equalsIgnoreCase(identityRecord.get(KerberosIdentityDataFileReader.ONLY_KEYTAB_WRITE));
boolean grabKeytabFromCache = regenerateKeytabs && onlyKeytabWrite;
// if grabKeytabFromCache=true we will try to get keytab from cache and send to agent, it will be true for
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreatePrincipalsServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreatePrincipalsServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreatePrincipalsServerAction.java
index 069c821..1c0853b9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreatePrincipalsServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreatePrincipalsServerAction.java
@@ -128,7 +128,7 @@ public class CreatePrincipalsServerAction extends KerberosServerAction {
seenPrincipals.add(evaluatedPrincipal);
boolean processPrincipal;
- boolean regenerateKeytabs = "true".equalsIgnoreCase(getCommandParameterValue(getCommandParameters(), REGENERATE_ALL));
+ boolean regenerateKeytabs = getOperationType(getCommandParameters()) == OperationType.RECREATE_ALL;
if (regenerateKeytabs) {
// do not process cached identities that can be passed as is(headless identities)
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
index c86ffa3..1b0f4fb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
@@ -36,6 +36,7 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.utils.StageUtils;
import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -108,31 +109,32 @@ public abstract class KerberosServerAction extends AbstractServerAction {
*/
public static final String DATA_DIRECTORY_PREFIX = ".ambari_";
- /*
+ /**
* Kerberos action shared data entry name for the principal-to-password map
*/
private static final String PRINCIPAL_PASSWORD_MAP = "principal_password_map";
- /*
+ /**
* Kerberos action shared data entry name for the principal-to-key_number map
*/
private static final String PRINCIPAL_KEY_NUMBER_MAP = "principal_key_number_map";
- /*
- * Key used in kerberosCommandParams in ExecutionCommand for base64 encoded keytab content
- */
+ /**
+ * Key used in kerberosCommandParams in ExecutionCommand for base64 encoded keytab content
+ */
public static final String KEYTAB_CONTENT_BASE64 = "keytab_content_base64";
- /*
- * Key used in kerberosCommandParams in ExecutionCommand to indicate whether to generate key keytabs
- * for all principals ("true") or only those that are missing ("false")
- */
- public static final String REGENERATE_ALL = "regenerate_all";
+ /**
+ * Key used in kerberosCommandParams in ExecutionCommand to indicate why type of creation operation to perform.
+ *
+ * @see OperationType
+ */
+ public static final String OPERATION_TYPE = "operation_type";
- /*
- * Key used in kerberosCommandParams in ExecutionCommand to indicate whether to include Ambari server indetity
- * ("true") or ignore it ("false")
- */
+ /**
+ * Key used in kerberosCommandParams in ExecutionCommand to indicate whether to include Ambari server indetity
+ * ("true") or ignore it ("false")
+ */
public static final String INCLUDE_AMBARI_IDENTITY = "include_ambari_identity";
/**
@@ -219,6 +221,22 @@ public abstract class KerberosServerAction extends AbstractServerAction {
}
/**
+ * Given a (command parameter) Map, attempts to safely retrieve the "operation_type" property.
+ *
+ * @param commandParameters a Map containing the dictionary of data to interrogate
+ * @return an OperationType
+ */
+ protected static OperationType getOperationType(Map<String, String> commandParameters) {
+ String value = getCommandParameterValue(commandParameters, OPERATION_TYPE);
+ if(StringUtils.isEmpty(value)) {
+ return OperationType.DEFAULT;
+ }
+ else {
+ return OperationType.valueOf(value.toUpperCase());
+ }
+ }
+
+ /**
* Sets the shared principal-to-password Map used to store principals and generated password for
* use within the current request context.
*
@@ -569,4 +587,29 @@ public abstract class KerberosServerAction extends AbstractServerAction {
}
}
}
+
+ /**
+ * A Kerberos operation type
+ * <ul>
+ * <li>RECREATE_ALL - regenerate keytabs for all principals</li>
+ * <li>CREATE_MISSING - generate keytabs for only those that are missing</li>
+ * <li>DEFAULT - generate needed keytabs for new components</li>
+ * </ul>
+ */
+ public enum OperationType {
+ /**
+ * Regenerate keytabs for all principals
+ */
+ RECREATE_ALL,
+
+ /**
+ * Generate keytabs for only those that are missing
+ */
+ CREATE_MISSING,
+
+ /**
+ * Generate needed keytabs for new components
+ */
+ DEFAULT
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
index f56e946..e1f8419 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
@@ -83,8 +83,7 @@ public class PrepareDisableKerberosServerAction extends AbstractPrepareKerberosS
List<ServiceComponentHost> schToProcess = kerberosHelper.getServiceComponentHostsToProcess(cluster,
kerberosDescriptor,
getServiceComponentFilter(),
- null, identityFilter,
- sch -> true);
+ null);
Map<String, Map<String, String>> kerberosConfigurations = new HashMap<>();
Map<String, String> commandParameters = getCommandParameters();
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
index 3ec84fa..335451f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
@@ -92,8 +92,11 @@ public class PrepareEnableKerberosServerAction extends PrepareKerberosIdentities
}
}
+ KerberosHelper kerberosHelper = getKerberosHelper();
+ Map<String, ? extends Collection<String>> serviceComponentFilter = getServiceComponentFilter();
+ Collection<String> hostFilter = getHostFilter();
Collection<String> identityFilter = getIdentityFilter();
- List<ServiceComponentHost> schToProcess = getServiceComponentHostsToProcess(cluster, kerberosDescriptor, identityFilter);
+ List<ServiceComponentHost> schToProcess = kerberosHelper.getServiceComponentHostsToProcess(cluster, kerberosDescriptor, serviceComponentFilter, hostFilter);
String dataDirectory = getCommandParameterValue(commandParameters, DATA_DIRECTORY);
Map<String, Map<String, String>> kerberosConfigurations = new HashMap<>();
@@ -107,7 +110,6 @@ public class PrepareEnableKerberosServerAction extends PrepareKerberosIdentities
actionLog.writeStdOut(String.format("Processing %d components", schCount));
}
- KerberosHelper kerberosHelper = getKerberosHelper();
Map<String, Set<String>> propertiesToRemove = new HashMap<>();
Map<String, Set<String>> propertiesToIgnore = new HashMap<>();
Set<String> services = cluster.getServices().keySet();
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
index 49828cb..038d1b5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
@@ -32,8 +32,12 @@ import org.apache.ambari.server.agent.CommandReport;
import org.apache.ambari.server.controller.KerberosHelper;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -67,11 +71,22 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
throw new AmbariException("Missing cluster object");
}
+ KerberosHelper kerberosHelper = getKerberosHelper();
+
KerberosDescriptor kerberosDescriptor = getKerberosDescriptor(cluster, false);
+ Map<String, String> commandParameters = getCommandParameters();
+ OperationType operationType = getOperationType(getCommandParameters());
+
+ Map<String, ? extends Collection<String>> serviceComponentFilter = getServiceComponentFilter();
+ Collection<String> hostFilter = getHostFilter();
Collection<String> identityFilter = getIdentityFilter();
- List<ServiceComponentHost> schToProcess = getServiceComponentHostsToProcess(cluster, kerberosDescriptor, identityFilter);
+ // If the operationType is default, use the getServiceComponentHostsToProcess method to determine
+ // which ServiceComponentHosts to process based on the filters. However if we are regenerating
+ // keytabs for a specific set of components, build the identity filter below so we can
+ // customized what needs to be done.
+ List<ServiceComponentHost> schToProcess = kerberosHelper.getServiceComponentHostsToProcess(cluster, kerberosDescriptor,
+ (operationType == OperationType.DEFAULT) ? serviceComponentFilter : null, hostFilter);
- Map<String, String> commandParameters = getCommandParameters();
String dataDirectory = getCommandParameterValue(commandParameters, DATA_DIRECTORY);
Map<String, Map<String, String>> kerberosConfigurations = new HashMap<>();
@@ -84,18 +99,32 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
actionLog.writeStdOut(String.format("Processing %d components", schCount));
}
- KerberosHelper kerberosHelper = getKerberosHelper();
Set<String> services = cluster.getServices().keySet();
Map<String, Set<String>> propertiesToRemove = new HashMap<>();
Map<String, Set<String>> propertiesToIgnore = new HashMap<>();
boolean includeAmbariIdentity = "true".equalsIgnoreCase(getCommandParameterValue(commandParameters, KerberosServerAction.INCLUDE_AMBARI_IDENTITY));
+ // If we are including the Ambari identity; then ensure that if a host filter is set, do not the Ambari service identity.
+ includeAmbariIdentity &= (hostFilter == null);
+
+ if (serviceComponentFilter != null) {
+ // If we are including the Ambari identity; then ensure that if a service/component filter is set,
+ // it contains the AMBARI/AMBARI_SERVER component; else do not include the Ambari service identity.
+ includeAmbariIdentity &= (serviceComponentFilter.get("AMBARI") != null) && serviceComponentFilter.get("AMBARI").contains("AMBARI_SERVER");
+
+ if((operationType != OperationType.DEFAULT)) {
+ // Update the identity filter, if necessary
+ identityFilter = updateIdentityFilter(kerberosDescriptor, identityFilter, serviceComponentFilter);
+ }
+ }
+
// Calculate the current host-specific configurations. These will be used to replace
// variables within the Kerberos descriptor data
Map<String, Map<String, String>> configurations = kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptor, false, false);
processServiceComponentHosts(cluster, kerberosDescriptor, schToProcess, identityFilter, dataDirectory,
- configurations, kerberosConfigurations, includeAmbariIdentity, propertiesToIgnore, !CollectionUtils.isEmpty(getHostFilter()));
+ configurations, kerberosConfigurations, includeAmbariIdentity, propertiesToIgnore,
+ hostFilter != null);
kerberosHelper.applyStackAdvisorUpdates(cluster, services, configurations, kerberosConfigurations,
propertiesToIgnore, propertiesToRemove, true);
@@ -119,35 +148,6 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
}
/**
- * Calls {@link KerberosHelper#getServiceComponentHostsToProcess(Cluster, KerberosDescriptor, Map, Collection, Collection, KerberosHelper.Command)}
- * with no filter on ServiceComponentHosts
- * <p/>
- * The <code>shouldProcessCommand</code> implementation passed to KerberosHelper#getServiceComponentHostsToProcess
- * always returns true, indicating to process all ServiceComponentHosts.
- *
- * @param cluster the cluster
- * @param kerberosDescriptor the current Kerberos descriptor
- * @param identityFilter a list of identities to include, or all if null @return the list of ServiceComponentHosts to process
- * @throws AmbariException
- * @see KerberosHelper#getServiceComponentHostsToProcess(Cluster, KerberosDescriptor, Map, Collection, Collection, KerberosHelper.Command)
- */
- protected List<ServiceComponentHost> getServiceComponentHostsToProcess(Cluster cluster,
- KerberosDescriptor kerberosDescriptor,
- Collection<String> identityFilter)
- throws AmbariException {
- return getKerberosHelper().getServiceComponentHostsToProcess(cluster,
- kerberosDescriptor,
- getServiceComponentFilter(),
- getHostFilter(), identityFilter,
- new KerberosHelper.Command<Boolean, ServiceComponentHost>() {
- @Override
- public Boolean invoke(ServiceComponentHost sch) throws AmbariException {
- return true;
- }
- });
- }
-
- /**
* Calls {@link KerberosHelper#getKerberosDescriptor(Cluster, boolean)}
*
* @param cluster cluster instance
@@ -200,5 +200,81 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
calculatedConfiguration, kerberosConfigurations, includePreconfiguredData);
}
}
+
+ /**
+ * Iterate through the identities in the Kerberos descriptor to find the relevant identities to
+ * add to the identity filter.
+ * <p>
+ * The set of identities to include in the filter are determined by whether they are explicit
+ * identities set in a component or service in the supplied service/component filter.
+ *
+ * @param kerberosDescriptor the Kerberos descriptor
+ * @param identityFilter the existing identity filter
+ * @param serviceComponentFilter the service/component filter
+ * @return a new collection of paths (including any existing paths) to act as the updated identity filter
+ */
+ private Collection<String> updateIdentityFilter(KerberosDescriptor kerberosDescriptor,
+ Collection<String> identityFilter,
+ Map<String, ? extends Collection<String>> serviceComponentFilter) {
+
+ Set<String> updatedFilter = (identityFilter == null) ? new HashSet<>() : new HashSet<>(identityFilter);
+
+ Map<String, KerberosServiceDescriptor> serviceDescriptors = kerberosDescriptor.getServices();
+
+ if (serviceDescriptors != null) {
+ for (KerberosServiceDescriptor serviceDescriptor : serviceDescriptors.values()) {
+ String serviceName = serviceDescriptor.getName();
+
+ if (serviceComponentFilter.containsKey("*") || serviceComponentFilter.containsKey(serviceName)) {
+ Collection<String> componentFilter = serviceComponentFilter.get(serviceName);
+ boolean anyComponent = ((componentFilter == null) || componentFilter.contains("*"));
+
+ // Only include the service-wide identities if the component filter is null contains "*", which indicates
+ // that all component for the given service are to be processed.
+ if (anyComponent) {
+ addIdentitiesToFilter(serviceDescriptor.getIdentities(), updatedFilter, true);
+ }
+
+ Map<String, KerberosComponentDescriptor> componentDescriptors = serviceDescriptor.getComponents();
+ if (componentDescriptors != null) {
+ for (KerberosComponentDescriptor componentDescriptor : componentDescriptors.values()) {
+ String componentName = componentDescriptor.getName();
+ if (anyComponent || (componentFilter.contains(componentName))) {
+ addIdentitiesToFilter(componentDescriptor.getIdentities(), updatedFilter, true);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return updatedFilter;
+ }
+
+ /**
+ * Add the path of each identity in the collection of identities to the supplied identity filter
+ * if that identity is not a reference to another identity or if references are allowed.
+ * @param identityDescriptors the collection of identity descriptors to process
+ * @param identityFilter the identity filter to modify
+ * @param skipReferences
+ */
+ private void addIdentitiesToFilter(List<KerberosIdentityDescriptor> identityDescriptors,
+ Collection<String> identityFilter, boolean skipReferences) {
+ if (!CollectionUtils.isEmpty(identityDescriptors)) {
+ for (KerberosIdentityDescriptor identityDescriptor : identityDescriptors) {
+ if (!skipReferences || !identityDescriptor.isReference()) {
+ String identityPath = identityDescriptor.getPath();
+
+ if (!StringUtils.isEmpty(identityPath)) {
+ identityFilter.add(identityPath);
+
+ // Find and add the references TO this identity to ensure the new/updated keytab file is
+ // sent to the appropriate host(s)
+ addIdentitiesToFilter(identityDescriptor.findReferences(), identityFilter, false);
+ }
+ }
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
index b496942..3a1eb4a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
@@ -259,6 +259,31 @@ public abstract class AbstractKerberosDescriptor {
}
/**
+ * Calculate the path to this identity descriptor for logging purposes.
+ * Examples:
+ * <ul>
+ * <li>/</li>
+ * <li>/SERVICE</li>
+ * <li>/SERVICE/COMPONENT</li>
+ * <li>/SERVICE/COMPONENT/identity_name</li>
+ * </ul>
+ *
+ * @return a path
+ */
+ public String getPath() {
+ //
+ StringBuilder path = new StringBuilder();
+ AbstractKerberosDescriptor current = this;
+ while (current != null && (current.getName() != null)) {
+ path.insert(0, current.getName());
+ path.insert(0, '/');
+ current = current.getParent();
+ }
+
+ return path.toString();
+ }
+
+ /**
* An enumeration of the different Kerberos (sub)descriptors for internal use.
*/
public enum Type {
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
index 9ddb941..73550f4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
@@ -862,22 +862,9 @@ public abstract class AbstractKerberosDescriptorContainer extends AbstractKerber
referencedIdentity = getReferencedIdentityDescriptor(identity.getName());
if(referencedIdentity != null) {
- // Calculate the path to this identity descriptor for logging purposes.
- // Examples:
- // /
- // /SERVICE
- // /SERVICE/COMPONENT
- StringBuilder path = new StringBuilder();
- AbstractKerberosDescriptor parent = identity.getParent();
- while(parent != null && (parent.getName() != null)) {
- path.insert(0, parent.getName());
- path.insert(0, '/');
- parent = parent.getParent();
- }
-
// Log this since it is deprecated...
LOG.warn("Referenced identities should be declared using the identity's \"reference\" attribute, not the identity's \"name\" attribute." +
- " This is a deprecated feature. Problems may occur in the future unless this is corrected: {}:{}", path, identity.getName());
+ " This is a deprecated feature. Problems may occur in the future unless this is corrected: {}:{}", identity.getPath(), identity.getName());
}
}
} catch (AmbariException e) {
@@ -896,6 +883,9 @@ public abstract class AbstractKerberosDescriptorContainer extends AbstractKerber
} else {
dereferencedIdentity = new KerberosIdentityDescriptor(identity.toMap());
}
+
+ // Force the path for this identity descriptor to be the same as the original identity descriptor's.
+ dereferencedIdentity.setPath(identity.getPath());
}
return dereferencedIdentity;
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
index ef45343..200a069 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
@@ -17,10 +17,15 @@
*/
package org.apache.ambari.server.state.kerberos;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
import java.util.Map;
import org.apache.ambari.server.collections.Predicate;
import org.apache.ambari.server.collections.PredicateUtils;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
import com.google.common.base.Optional;
@@ -94,6 +99,8 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
*/
private Predicate when = null;
+ private String path = null;
+
/**
* Creates a new KerberosIdentityDescriptor
*
@@ -157,6 +164,47 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
}
/**
+ * Gets the absolute path to the referenced Kerberos identity definition
+ *
+ * @return the path to the referenced Kerberos identity definition or <code>null</code> if not set
+ */
+ public String getReferenceAbsolutePath() {
+ String absolutePath;
+ if(StringUtils.isEmpty(reference)) {
+ absolutePath = getName();
+ }
+ else {
+ absolutePath = reference;
+ }
+
+ if(!StringUtils.isEmpty(absolutePath) && !absolutePath.startsWith("/")) {
+ String path = getPath();
+ if(path == null) {
+ path = "";
+ }
+
+ if(absolutePath.startsWith("..")) {
+ AbstractKerberosDescriptor parent = getParent();
+ if(parent != null) {
+ parent = parent.getParent();
+
+ if(parent != null) {
+ absolutePath = absolutePath.replace("..", parent.getPath());
+ }
+ }
+ }
+ else if(absolutePath.startsWith(".")) {
+ AbstractKerberosDescriptor parent = getParent();
+ if (parent != null) {
+ absolutePath = absolutePath.replace(".", parent.getPath());
+ }
+ }
+ }
+
+ return absolutePath;
+ }
+
+ /**
* Sets the path to the referenced Kerberos identity definition
*
* @param reference the path to the referenced Kerberos identity definition or <code>null</code>
@@ -356,6 +404,59 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
}
}
+ /**
+ * Determines whether this {@link KerberosIdentityDescriptor} indicates it is a refrence to some
+ * other {@link KerberosIdentityDescriptor}.
+ * <p>
+ * A KerberosIdentityDescriptor is a reference if it's <code>reference</code> attibute is set
+ * or if (for backwards compatibility), its name indicates a path. For exmaple:
+ * <ul>
+ * <li><code>SERVICE/COMPONENT/identitiy_name</code></li>
+ * <li><code>/identity_name</code></li>
+ * <li><code>./identity_name</code></li>
+ * </ul>
+ *
+ * @return true if this {@link KerberosIdentityDescriptor} indicates a reference; otherwise false
+ */
+ public boolean isReference() {
+ String name = getName();
+ return !StringUtils.isEmpty(reference) ||
+ (!StringUtils.isEmpty(name) && (name.startsWith("/") || name.startsWith("./")));
+ }
+
+ /**
+ * Calculate the path to this identity descriptor for logging purposes.
+ * Examples:
+ * /
+ * /SERVICE
+ * /SERVICE/COMPONENT
+ * /SERVICE/COMPONENT/identity_name
+ * <p>
+ * This implementation calculates and caches the path if the path has not been previously set.
+ *
+ * @return a path
+ */
+ @Override
+ public String getPath() {
+ if (path == null) {
+ path = super.getPath();
+ }
+
+ return path;
+ }
+
+ /**
+ * Explicitly set the path to this {@link KerberosIdentityDescriptor}.
+ * <p>
+ * This is useful when creating detached identity descriptors while dereferencing identity references
+ * so that the path information is not lost.
+ *
+ * @param path a path
+ */
+ void setPath(String path) {
+ this.path = path;
+ }
+
@Override
public int hashCode() {
return super.hashCode() +
@@ -406,4 +507,63 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
return false;
}
}
+
+ /**
+ * Find all of the {@link KerberosIdentityDescriptor}s that reference this {@link KerberosIdentityDescriptor}
+ *
+ * @return a list of {@link KerberosIdentityDescriptor}s
+ */
+ public List<KerberosIdentityDescriptor> findReferences() {
+ AbstractKerberosDescriptor root = getRoot();
+ if(root instanceof AbstractKerberosDescriptorContainer) {
+ return findIdentityReferences((AbstractKerberosDescriptorContainer)root, getPath());
+ }
+ else {
+ return null;
+ }
+ }
+
+ /**
+ * Given a root, recursively traverse the tree of {@link AbstractKerberosDescriptorContainer}s looking for
+ * {@link KerberosIdentityDescriptor}s that declare the given path as the referenced Kerberos identity.
+ *
+ * @param root the starting point
+ * @param path the path to the referenced {@link KerberosIdentityDescriptor} in the {@link KerberosDescriptor}
+ * @return a list of {@link KerberosIdentityDescriptor}s
+ */
+ private List<KerberosIdentityDescriptor> findIdentityReferences(AbstractKerberosDescriptorContainer root, String path) {
+ if (root == null) {
+ return null;
+ }
+
+ List<KerberosIdentityDescriptor> references = new ArrayList<>();
+
+ // Process the KerberosIdentityDescriptors found in this node.
+ List<KerberosIdentityDescriptor> identityDescriptors = root.getIdentities();
+ if (identityDescriptors != null) {
+ for (KerberosIdentityDescriptor identityDescriptor : identityDescriptors) {
+ if (identityDescriptor.isReference()) {
+ String reference = identityDescriptor.getReferenceAbsolutePath();
+
+ if (!StringUtils.isEmpty(reference) && path.equals(reference)) {
+ references.add(identityDescriptor);
+ }
+ }
+ }
+ }
+
+ // Process the children of the node
+ Collection<? extends AbstractKerberosDescriptorContainer> children = root.getChildContainers();
+ if(!CollectionUtils.isEmpty(children)) {
+ for (AbstractKerberosDescriptorContainer child : children) {
+ Collection<KerberosIdentityDescriptor> childReferences = findIdentityReferences(child, path);
+ if (!CollectionUtils.isEmpty(childReferences)) {
+ // If references were found in the current child, add them to this node's list of references.
+ references.addAll(childReferences);
+ }
+ }
+ }
+
+ return references;
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
index 60d7fd9..7ed52d2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
@@ -1441,11 +1441,6 @@ public class KerberosHelperTest extends EasyMockSupport {
.andReturn(Collections.singletonList(schKerberosClient))
.once();
- final Clusters clusters = injector.getInstance(Clusters.class);
- expect(clusters.getHost("host1"))
- .andReturn(host)
- .once();
-
final AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
expect(ambariManagementController.findConfigurationTagsWithOverrides(cluster, null))
.andReturn(Collections.emptyMap())
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
index 663934f..2518da9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
@@ -77,7 +77,7 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
@Test
public void removesAllKerberosIdentitesOfComponentAfterComponentWasUninstalled() throws Exception {
installComponent(OOZIE, OOZIE_SERVER, HOST);
- kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("oozie_server1", "oozie_server2"));
+ kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("/OOZIE/OOZIE_SERVER/oozie_server1", "/OOZIE/OOZIE_SERVER/oozie_server2"));
expectLastCall().once();
replayAll();
uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
@@ -95,7 +95,7 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
public void skipsRemovingIdentityThatIsSharedByPrincipalName() throws Exception {
installComponent(OOZIE, OOZIE_SERVER, HOST);
installComponent(OOZIE_2, OOZIE_SERVER_2, HOST);
- kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("oozie_server1"));
+ kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("/OOZIE/OOZIE_SERVER/oozie_server1"));
expectLastCall().once();
replayAll();
uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
@@ -106,7 +106,7 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
public void skipsRemovingIdentityThatIsSharedByKeyTabFilePath() throws Exception {
installComponent(YARN, RESOURCE_MANAGER, HOST);
installComponent(YARN_2, RESOURCE_MANAGER_2, HOST);
- kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, YARN, RESOURCE_MANAGER)), newHashSet("rm_unique"));
+ kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, YARN, RESOURCE_MANAGER)), newHashSet("/YARN/RESOURCE_MANAGER/rm_unique"));
expectLastCall().once();
replayAll();
uninstallComponent(YARN, RESOURCE_MANAGER, HOST);
@@ -133,7 +133,7 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
@Test
public void removesServiceIdentitiesSkipComponentIdentitiesAfterServiceWasUninstalled() throws Exception {
installComponent(OOZIE, OOZIE_SERVER, HOST);
- kerberosHelper.deleteIdentities(cluster, hdfsComponents(), newHashSet("hdfs-service"));
+ kerberosHelper.deleteIdentities(cluster, hdfsComponents(), newHashSet("/HDFS/hdfs-service"));
expectLastCall().once();
replayAll();
uninstallService(HDFS, hdfsComponents());
http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorTest.java
index d6bef02..079096d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorTest.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -79,6 +80,9 @@ public class KerberosDescriptorTest {
" {" +
" \"name\": \"service1_spnego\"," +
" \"reference\": \"/spnego\"" +
+ " }," +
+ " {" +
+ " \"name\": \"service1_identity\"" +
" }" +
" ]," +
" \"name\": \"SERVICE1\"" +
@@ -87,6 +91,39 @@ public class KerberosDescriptorTest {
" \"identities\": [" +
" {" +
" \"name\": \"/spnego\"" +
+ " }," +
+ " {" +
+ " \"name\": \"service2_identity\"" +
+ " }" +
+ " ]," +
+ " \"components\": [" +
+ " {" +
+ " \"identities\": [" +
+ " {" +
+ " \"name\": \"component1_identity\"" +
+ " }," +
+ " {" +
+ " \"name\": \"service2_component1_service1_identity\"," +
+ " \"reference\": \"/SERVICE1/service1_identity\"" +
+ " }," +
+ " {" +
+ " \"name\": \"service2_component1_component1_identity\"," +
+ " \"reference\": \"./component1_identity\"" +
+ " }," +
+ " {" +
+ " \"name\": \"service2_component1_service2_identity\"," +
+ " \"reference\": \"../service2_identity\"" +
+ " }" +
+ " ]," +
+ " \"name\": \"COMPONENT21\"" +
+ " }," +
+ " {" +
+ " \"identities\": [" +
+ " {" +
+ " \"name\": \"component2_identity\"" +
+ " }" +
+ " ]," +
+ " \"name\": \"COMPONENT22\"" +
" }" +
" ]," +
" \"name\": \"SERVICE2\"" +
@@ -547,15 +584,118 @@ public class KerberosDescriptorTest {
// Reference is determined using the "reference" attribute
serviceDescriptor = kerberosDescriptor.getService("SERVICE1");
identities = serviceDescriptor.getIdentities(true, null);
- Assert.assertEquals(1, identities.size());
- Assert.assertEquals("service1_spnego", identities.get(0).getName());
- Assert.assertEquals("/spnego", identities.get(0).getReference());
+ Assert.assertEquals(2, identities.size());
+ for (KerberosIdentityDescriptor identity : identities) {
+ if (identity.isReference()) {
+ Assert.assertEquals("service1_spnego", identity.getName());
+ Assert.assertEquals("/spnego", identity.getReference());
+ } else {
+ Assert.assertEquals("service1_identity", identity.getName());
+ Assert.assertNull(identity.getReference());
+ }
+ }
+
+ Assert.assertEquals("service1_identity", identities.get(1).getName());
+ Assert.assertNull(identities.get(1).getReference());
// Reference is determined using the "name" attribute
serviceDescriptor = kerberosDescriptor.getService("SERVICE2");
identities = serviceDescriptor.getIdentities(true, null);
+ Assert.assertEquals(2, identities.size());
+ for (KerberosIdentityDescriptor identity : identities) {
+ if (identity.isReference()) {
+ Assert.assertEquals("/spnego", identity.getName());
+ Assert.assertNull(identity.getReference());
+ } else {
+ Assert.assertEquals("service2_identity", identity.getName());
+ Assert.assertNull(identity.getReference());
+ }
+ }
+ }
+
+ @Test
+ public void testGetPath() throws Exception {
+ KerberosDescriptor kerberosDescriptor;
+ KerberosServiceDescriptor serviceDescriptor;
+ List<KerberosIdentityDescriptor> identities;
+
+ kerberosDescriptor = KERBEROS_DESCRIPTOR_FACTORY.createInstance(JSON_VALUE);
+
+ serviceDescriptor = kerberosDescriptor.getService("SERVICE_NAME");
+ identities = serviceDescriptor.getIdentities(false, null);
+ Assert.assertEquals(1, identities.size());
+ Assert.assertEquals("/SERVICE_NAME/identity_1", identities.get(0).getPath());
+
+ KerberosComponentDescriptor componentDescriptor = serviceDescriptor.getComponent("COMPONENT_NAME");
+ identities = componentDescriptor.getIdentities(false, null);
Assert.assertEquals(1, identities.size());
- Assert.assertEquals("/spnego", identities.get(0).getName());
- Assert.assertNull(identities.get(0).getReference());
+ Assert.assertEquals("/SERVICE_NAME/COMPONENT_NAME/identity_1", identities.get(0).getPath());
+
+
+ kerberosDescriptor = KERBEROS_DESCRIPTOR_FACTORY.createInstance(JSON_VALUE_IDENTITY_REFERENCES);
+
+ serviceDescriptor = kerberosDescriptor.getService("SERVICE1");
+ identities = serviceDescriptor.getIdentities(true, null);
+ Assert.assertEquals(2, identities.size());
+ Assert.assertEquals("/SERVICE1/service1_spnego", identities.get(0).getPath());
+ Assert.assertEquals("/SERVICE1/service1_identity", identities.get(1).getPath());
+ }
+
+ @Test
+ public void testGetReferences() throws Exception {
+ KerberosDescriptor kerberosDescriptor = KERBEROS_DESCRIPTOR_FACTORY.createInstance(JSON_VALUE_IDENTITY_REFERENCES);
+ KerberosIdentityDescriptor identity;
+ List<KerberosIdentityDescriptor> references;
+ Set<String> paths;
+
+ // Find all references to /spnego
+ identity = kerberosDescriptor.getIdentity("spnego");
+ references = identity.findReferences();
+
+ Assert.assertNotNull(references);
+ Assert.assertEquals(2, references.size());
+
+ paths = collectPaths(references);
+ Assert.assertTrue(paths.contains("/SERVICE1/service1_spnego"));
+ Assert.assertTrue(paths.contains("/SERVICE2//spnego"));
+
+ // Find all references to /SERVICE1/service1_identity
+ identity = kerberosDescriptor.getService("SERVICE1").getIdentity("service1_identity");
+ references = identity.findReferences();
+
+ Assert.assertNotNull(references);
+ Assert.assertEquals(1, references.size());
+
+ paths = collectPaths(references);
+ Assert.assertTrue(paths.contains("/SERVICE2/COMPONENT21/service2_component1_service1_identity"));
+
+ // Find all references to /SERVICE2/COMPONENT21/component1_identity (testing ./)
+ identity = kerberosDescriptor.getService("SERVICE2").getComponent("COMPONENT21").getIdentity("component1_identity");
+ references = identity.findReferences();
+
+ Assert.assertNotNull(references);
+ Assert.assertEquals(1, references.size());
+
+ paths = collectPaths(references);
+ Assert.assertTrue(paths.contains("/SERVICE2/COMPONENT21/service2_component1_component1_identity"));
+
+ // Find all references to /SERVICE2/component2_identity (testing ../)
+ identity = kerberosDescriptor.getService("SERVICE2").getIdentity("service2_identity");
+ references = identity.findReferences();
+
+ Assert.assertNotNull(references);
+ Assert.assertEquals(1, references.size());
+
+ paths = collectPaths(references);
+ Assert.assertTrue(paths.contains("/SERVICE2/COMPONENT21/service2_component1_service2_identity"));
+ }
+
+ private Set<String> collectPaths(List<KerberosIdentityDescriptor> identityDescriptors) {
+ Set<String> paths = new HashSet<>();
+ for (KerberosIdentityDescriptor identityDescriptor : identityDescriptors) {
+ paths.add(identityDescriptor.getPath());
+ }
+ return paths;
}
+
}
\ No newline at end of file
[13/31] ambari git commit: AMBARI-22159. Replace hostgroup vars for
Druid
Posted by jl...@apache.org.
AMBARI-22159. Replace hostgroup vars for Druid
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/75465a83
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/75465a83
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/75465a83
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 75465a83bd743bb3a2fa74acf30cfca4d0a2287c
Parents: f1c4626
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Mon Oct 9 14:40:02 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Mon Oct 9 18:39:22 2017 +0200
----------------------------------------------------------------------
.../BlueprintConfigurationProcessor.java | 81 +++++++++++++++-----
.../BlueprintConfigurationProcessorTest.java | 32 ++++++++
2 files changed, 92 insertions(+), 21 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/75465a83/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 5a6e2cc..03f84a5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -32,6 +32,7 @@ import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -1367,11 +1368,56 @@ public class BlueprintConfigurationProcessor {
ClusterTopology topology);
}
+ private static class HostGroupUpdater implements PropertyUpdater {
+
+ public static final PropertyUpdater INSTANCE = new HostGroupUpdater();
+
+ @Override
+ public String updateForClusterCreate(String propertyName,
+ String origValue,
+ Map<String, Map<String, String>> properties,
+ ClusterTopology topology) {
+
+ //todo: getHostStrings
+ Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
+ if (m.find()) {
+ String hostGroupName = m.group(1);
+
+ HostGroupInfo groupInfo = topology.getHostGroupInfo().get(hostGroupName);
+ if (groupInfo == null) {
+ //todo: this should be validated in configuration validation
+ throw new RuntimeException(
+ "Encountered a host group token in configuration which couldn't be matched to a host group: "
+ + hostGroupName);
+ }
+
+ //todo: warn if > hosts
+ return origValue.replace(m.group(0), groupInfo.getHostNames().iterator().next());
+ }
+
+ return origValue;
+ }
+
+ @Override
+ public Collection<String> getRequiredHostGroups(String propertyName,
+ String origValue,
+ Map<String, Map<String, String>> properties,
+ ClusterTopology topology) {
+ //todo: getHostStrings
+ Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
+ if (m.find()) {
+ String hostGroupName = m.group(1);
+ return Collections.singleton(hostGroupName);
+ }
+ return Collections.emptySet();
+ }
+ }
+
/**
* Topology based updater which replaces the original host name of a property with the host name
* which runs the associated (master) component in the new cluster.
*/
- private static class SingleHostTopologyUpdater implements PropertyUpdater {
+ private static class SingleHostTopologyUpdater extends HostGroupUpdater {
/**
* Component name
*/
@@ -1402,21 +1448,9 @@ public class BlueprintConfigurationProcessor {
Map<String, Map<String, String>> properties,
ClusterTopology topology) {
- //todo: getHostStrings
- Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
- if (m.find()) {
- String hostGroupName = m.group(1);
-
- HostGroupInfo groupInfo = topology.getHostGroupInfo().get(hostGroupName);
- if (groupInfo == null) {
- //todo: this should be validated in configuration validation
- throw new RuntimeException(
- "Encountered a host group token in configuration which couldn't be matched to a host group: "
- + hostGroupName);
- }
-
- //todo: warn if > hosts
- return origValue.replace(m.group(0), groupInfo.getHostNames().iterator().next());
+ String replacedValue = super.updateForClusterCreate(propertyName, origValue, properties, topology);
+ if (!Objects.equals(origValue, replacedValue)) {
+ return replacedValue;
} else {
int matchingGroupCount = topology.getHostGroupsForComponent(component).size();
if (matchingGroupCount == 1) {
@@ -1525,11 +1559,9 @@ public class BlueprintConfigurationProcessor {
String origValue,
Map<String, Map<String, String>> properties,
ClusterTopology topology) {
- //todo: getHostStrings
- Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
- if (m.find()) {
- String hostGroupName = m.group(1);
- return Collections.singleton(hostGroupName);
+ Collection<String> result = super.getRequiredHostGroups(propertyName, origValue, properties, topology);
+ if (!result.isEmpty()) {
+ return result;
} else {
Collection<String> matchingGroups = topology.getHostGroupsForComponent(component);
int matchingGroupCount = matchingGroups.size();
@@ -2351,6 +2383,7 @@ public class BlueprintConfigurationProcessor {
allUpdaters.add(nonTopologyUpdaters);
Map<String, PropertyUpdater> amsSiteMap = new HashMap<>();
+ Map<String, PropertyUpdater> druidCommon = new HashMap<>();
Map<String, PropertyUpdater> hdfsSiteMap = new HashMap<>();
Map<String, PropertyUpdater> mapredSiteMap = new HashMap<>();
Map<String, PropertyUpdater> coreSiteMap = new HashMap<>();
@@ -2404,6 +2437,7 @@ public class BlueprintConfigurationProcessor {
Map<String, PropertyUpdater> zookeeperEnvMap = new HashMap<>();
singleHostTopologyUpdaters.put("ams-site", amsSiteMap);
+ singleHostTopologyUpdaters.put("druid-common", druidCommon);
singleHostTopologyUpdaters.put("hdfs-site", hdfsSiteMap);
singleHostTopologyUpdaters.put("mapred-site", mapredSiteMap);
singleHostTopologyUpdaters.put("core-site", coreSiteMap);
@@ -2775,6 +2809,11 @@ public class BlueprintConfigurationProcessor {
}
}
});
+
+ // DRUID
+ druidCommon.put("metastore_hostname", HostGroupUpdater.INSTANCE);
+ druidCommon.put("druid.metadata.storage.connector.connectURI", HostGroupUpdater.INSTANCE);
+ druidCommon.put("druid.zk.service.host", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
}
private static void addUnitPropertyUpdaters() {
http://git-wip-us.apache.org/repos/asf/ambari/blob/75465a83/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 68d6349..d137f2c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -88,6 +88,7 @@ import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
/**
* BlueprintConfigurationProcessor unit tests.
@@ -7933,6 +7934,37 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
assertEquals(someString, metricsReporterRegister);
}
+ @Test
+ public void druidProperties() throws Exception {
+ Map<String, Map<String, String>> properties = new HashMap<>();
+ Map<String, String> druidCommon = new HashMap<>();
+ String connectUriKey = "druid.metadata.storage.connector.connectURI";
+ String metastoreHostnameKey = "metastore_hostname";
+ String connectUriTemplate = "jdbc:mysql://%s:3306/druid?createDatabaseIfNotExist=true";
+ druidCommon.put(connectUriKey, String.format(connectUriTemplate, "%HOSTGROUP::group1%"));
+ druidCommon.put(metastoreHostnameKey, "%HOSTGROUP::group1%");
+ properties.put("druid-common", druidCommon);
+
+ Map<String, Map<String, String>> parentProperties = new HashMap<>();
+ Configuration parentClusterConfig = new Configuration(parentProperties, Collections.<String, Map<String, Map<String, String>>>emptyMap());
+ Configuration clusterConfig = new Configuration(properties, Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+
+ Collection<String> hgComponents1 = Sets.newHashSet("DRUID_COORDINATOR");
+ TestHostGroup group1 = new TestHostGroup("group1", hgComponents1, Collections.singleton("host1"));
+
+ Collection<String> hgComponents2 = Sets.newHashSet("DRUID_BROKER", "DRUID_OVERLORD", "DRUID_ROUTER");
+ TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton("host2"));
+
+ Collection<TestHostGroup> hostGroups = Arrays.asList(group1, group2);
+
+ ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+ BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
+ configProcessor.doUpdateForClusterCreate();
+
+ assertEquals(String.format(connectUriTemplate, "host1"), clusterConfig.getPropertyValue("druid-common", connectUriKey));
+ assertEquals("host1", clusterConfig.getPropertyValue("druid-common", metastoreHostnameKey));
+ }
@Test
public void testAmsPropertiesDefault() throws Exception {
[31/31] ambari git commit: Merge remote-tracking branch
'origin/trunk' into branch-feature-AMBARI-14714
Posted by jl...@apache.org.
Merge remote-tracking branch 'origin/trunk' into branch-feature-AMBARI-14714
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/045d9bfe
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/045d9bfe
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/045d9bfe
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 045d9bfe3e62fc68c3879ea2b874fabe11bf1622
Parents: eb6b21c 03273bd
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Tue Oct 10 22:24:45 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Tue Oct 10 22:24:45 2017 -0700
----------------------------------------------------------------------
.../libraries/script/script.py | 68 ++-
ambari-logsearch/ambari-logsearch-web/pom.xml | 11 +-
.../flume/FlumeTimelineMetricsSinkTest.java | 27 +-
.../AmbariCustomCommandExecutionHelper.java | 10 +
.../AmbariManagementControllerImpl.java | 6 +-
.../controller/AmbariManagementHelper.java | 2 +-
.../server/controller/KerberosHelper.java | 9 +-
.../server/controller/KerberosHelperImpl.java | 244 ++++++----
.../ServiceComponentHostResponse.java | 15 +
.../BlueprintConfigurationProcessor.java | 81 +++-
.../internal/HostComponentResourceProvider.java | 2 +
.../utilities/RemovableIdentities.java | 2 +-
.../upgrade/HostVersionOutOfSyncListener.java | 11 +
.../ambari/server/metadata/ActionMetadata.java | 1 +
.../kerberos/CreateKeytabFilesServerAction.java | 2 +-
.../kerberos/CreatePrincipalsServerAction.java | 2 +-
.../kerberos/KerberosServerAction.java | 71 ++-
.../PrepareDisableKerberosServerAction.java | 3 +-
.../PrepareEnableKerberosServerAction.java | 6 +-
.../PrepareKerberosIdentitiesServerAction.java | 142 ++++--
.../ambari/server/stack/ExtensionHelper.java | 57 ++-
.../apache/ambari/server/stack/StackModule.java | 33 +-
.../ambari/server/state/ConfigHelper.java | 164 ++++++-
.../ambari/server/state/PropertyInfo.java | 29 ++
.../ambari/server/state/RefreshCommand.java | 52 +++
.../state/RefreshCommandConfiguration.java | 71 +++
.../apache/ambari/server/state/StackInfo.java | 10 +
.../ambari/server/state/UpgradeContext.java | 24 +-
.../kerberos/AbstractKerberosDescriptor.java | 25 +
.../AbstractKerberosDescriptorContainer.java | 18 +-
.../kerberos/KerberosIdentityDescriptor.java | 160 +++++++
.../svccomphost/ServiceComponentHostImpl.java | 10 +
.../HDFS/2.1.0.2.0/configuration/core-site.xml | 12 +
.../HDFS/2.1.0.2.0/configuration/hdfs-site.xml | 3 +
.../HDFS/2.1.0.2.0/package/scripts/datanode.py | 13 +-
.../HDFS/2.1.0.2.0/package/scripts/hdfs.py | 62 ++-
.../2.1.0.2.0/package/scripts/hdfs_client.py | 5 +
.../2.1.0.2.0/package/scripts/hdfs_namenode.py | 21 +
.../2.1.0.2.0/package/scripts/install_params.py | 6 -
.../HDFS/2.1.0.2.0/package/scripts/namenode.py | 21 +-
.../2.1.0.2.0/package/scripts/params_linux.py | 2 -
.../HDFS/2.1.0.2.0/package/scripts/snamenode.py | 10 +
.../HDFS/3.0.0.3.0/configuration/hdfs-site.xml | 6 +
.../HDFS/3.0.0.3.0/package/scripts/datanode.py | 13 +-
.../HDFS/3.0.0.3.0/package/scripts/hdfs.py | 62 ++-
.../3.0.0.3.0/package/scripts/hdfs_client.py | 5 +
.../3.0.0.3.0/package/scripts/hdfs_namenode.py | 20 +
.../3.0.0.3.0/package/scripts/install_params.py | 6 -
.../HDFS/3.0.0.3.0/package/scripts/namenode.py | 21 +-
.../3.0.0.3.0/package/scripts/params_linux.py | 2 -
.../HDFS/3.0.0.3.0/package/scripts/snamenode.py | 10 +
.../OOZIE/4.0.0.2.0/package/scripts/oozie.py | 6 +-
.../4.0.0.2.0/package/scripts/params_linux.py | 3 -
.../OOZIE/4.2.0.3.0/package/scripts/oozie.py | 5 +-
.../4.2.0.3.0/package/scripts/params_linux.py | 3 -
.../ZEPPELIN/0.7.0/package/scripts/master.py | 33 +-
.../src/main/resources/configuration-schema.xsd | 12 +
.../src/main/resources/properties.json | 1 +
.../services/HDFS/configuration/hdfs-site.xml | 3 +
.../stacks/HDP/2.6/services/DRUID/kerberos.json | 30 --
.../server/controller/KerberosHelperTest.java | 5 -
.../BlueprintConfigurationProcessorTest.java | 32 ++
.../utilities/KerberosIdentityCleanerTest.java | 8 +-
.../HostVersionOutOfSyncListenerTest.java | 14 +-
.../server/stack/StackManagerExtensionTest.java | 31 +-
.../ambari/server/state/ConfigHelperTest.java | 76 ++-
.../ambari/server/state/PropertyInfoTest.java | 20 +
.../ambari/server/state/UpgradeContextTest.java | 60 ++-
.../state/kerberos/KerberosDescriptorTest.java | 150 +++++-
.../stacks/2.0.6/HBASE/test_hbase_master.py | 2 +
.../python/stacks/2.0.6/HDFS/test_datanode.py | 17 +
.../python/stacks/2.0.6/HDFS/test_namenode.py | 33 ++
.../stacks/2.6/ZEPPELIN/test_zeppelin_070.py | 101 +---
.../src/test/python/stacks/utils/RMFTestCase.py | 4 +-
.../resources/extensions/EXT/0.2/metainfo.xml | 2 +-
.../resources/extensions/EXT/0.3/metainfo.xml | 2 +-
.../services/HDFS/configuration/hdfs-site.xml | 8 +
.../stacks_with_extensions/HDP/0.4/metainfo.xml | 22 +
.../HDP/0.4/repos/repoinfo.xml | 63 +++
.../HDP/0.4/services/HBASE/metainfo.xml | 26 ++
.../0.4/services/HDFS/configuration/global.xml | 145 ++++++
.../services/HDFS/configuration/hadoop-env.xml | 223 +++++++++
.../services/HDFS/configuration/hbase-site.xml | 137 ++++++
.../services/HDFS/configuration/hdfs-log4j.xml | 199 ++++++++
.../services/HDFS/configuration/hdfs-site.xml | 396 ++++++++++++++++
.../HDP/0.4/services/HDFS/metainfo.xml | 30 ++
.../0.4/services/HDFS/package/dummy-script.py | 20 +
.../HDP/0.4/services/HIVE/metainfo.xml | 26 ++
.../HDP/0.4/services/MAPREDUCE/metainfo.xml | 23 +
.../HDP/0.4/services/ZOOKEEPER/metainfo.xml | 26 ++
ambari-web/app/assets/test/tests.js | 2 +
ambari-web/app/controllers.js | 1 +
.../app/controllers/main/service/info/metric.js | 468 +++++++++++++++++++
.../controllers/main/service/info/summary.js | 449 +-----------------
.../service/widgets/create/wizard_controller.js | 2 +-
ambari-web/app/messages.js | 1 +
ambari-web/app/styles/common.less | 1 +
.../app/styles/enhanced_service_dashboard.less | 26 +-
.../app/styles/theme/bootstrap-ambari.css | 64 ++-
ambari-web/app/styles/top-nav.less | 22 +
ambari-web/app/templates/application.hbs | 39 +-
.../app/templates/main/service/info/metrics.hbs | 104 +++++
.../app/templates/main/service/info/summary.hbs | 84 ----
ambari-web/app/templates/main/service/item.hbs | 7 +-
ambari-web/app/views.js | 1 +
ambari-web/app/views/main/service/info/menu.js | 7 +
.../app/views/main/service/info/metrics_view.js | 290 ++++++++++++
.../app/views/main/service/info/summary.js | 315 ++-----------
ambari-web/app/views/main/service/item.js | 6 +
.../main/service/info/metric_test.js | 110 +++++
.../main/service/info/summary_test.js | 76 ---
.../main/service/info/metrics_view_test.js | 334 +++++++++++++
.../views/main/service/info/summary_test.js | 281 +----------
.../savedQueries/SavedQueryService.java | 46 +-
.../resources/ui/app/routes/queries/query.js | 95 ++--
.../resources/ui/app/services/saved-queries.js | 21 +
116 files changed, 4817 insertions(+), 1700 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
index 8f1dc7c,6708560..696d395
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
@@@ -83,28 -80,39 +83,29 @@@ public class HostComponentResourceProvi
// ----- Property ID constants ---------------------------------------------
// Host Components
- public static final String HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
- public static final String HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "service_name");
- public static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "component_name");
- public static final String HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "display_name");
- public static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "host_name");
- public static final String HOST_COMPONENT_PUBLIC_HOST_NAME_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "public_host_name");
- public static final String HOST_COMPONENT_STATE_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "state");
- public static final String HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "desired_state");
- public static final String HOST_COMPONENT_VERSION_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "version");
- public static final String HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "desired_stack_id");
- public static final String HOST_COMPONENT_DESIRED_REPOSITORY_VERSION
- = PropertyHelper.getPropertyId("HostRoles", "desired_repository_version");
- public static final String HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "actual_configs");
- public static final String HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "stale_configs");
- public static final String HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "reload_configs");
- public static final String HOST_COMPONENT_DESIRED_ADMIN_STATE_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "desired_admin_state");
- public static final String HOST_COMPONENT_MAINTENANCE_STATE_PROPERTY_ID
- = "HostRoles/maintenance_state";
- public static final String HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID = "HostRoles/upgrade_state";
+ public static final String HOST_COMPONENT_CLUSTER_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "cluster_id";
+ public static final String HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "cluster_name";
+ public static final String HOST_COMPONENT_SERVICE_GROUP_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_group_id";
+ public static final String HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_group_name";
+ public static final String HOST_COMPONENT_SERVICE_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_id";
+ public static final String HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_name";
+ public static final String HOST_COMPONENT_SERVICE_TYPE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_type";
+ public static final String HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "id";
+ public static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "component_name";
+ public static final String HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "display_name";
+ public static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "host_name";
+ public static final String HOST_COMPONENT_PUBLIC_HOST_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "public_host_name";
+ public static final String HOST_COMPONENT_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "state";
+ public static final String HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "desired_state";
+ public static final String HOST_COMPONENT_VERSION_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "version";
+ public static final String HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "desired_stack_id";
+ public static final String HOST_COMPONENT_DESIRED_REPOSITORY_VERSION = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "desired_repository_version";
+ public static final String HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "actual_configs";
+ public static final String HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "stale_configs";
++ public static final String HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "reload_configs");
+ public static final String HOST_COMPONENT_DESIRED_ADMIN_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "desired_admin_state";
+ public static final String HOST_COMPONENT_MAINTENANCE_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "maintenance_state";
+ public static final String HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "upgrade_state";
//Parameters from the predicate
private static final String QUERY_PARAMETERS_RUN_SMOKE_TEST_ID = "params/run_smoke_test";
@@@ -246,26 -222,36 +247,27 @@@
for (ServiceComponentHostResponse response : responses) {
Resource resource = new ResourceImpl(Resource.Type.HostComponent);
- setResourceProperty(resource, HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID,
- response.getClusterName(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID,
- response.getServiceName(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID,
- response.getComponentName(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID,
- response.getDisplayName(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_HOST_NAME_PROPERTY_ID,
- response.getHostname(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_PUBLIC_HOST_NAME_PROPERTY_ID,
- response.getPublicHostname(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_STATE_PROPERTY_ID,
- response.getLiveState(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID,
- response.getDesiredState(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_VERSION_PROPERTY_ID, response.getVersion(),
- requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID,
- response.getDesiredStackVersion(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID,
- response.getActualConfigs(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID,
- response.isStaleConfig(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID,
- response.isReloadConfig(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID,
- response.getUpgradeState(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_DESIRED_REPOSITORY_VERSION,
- response.getDesiredRepositoryVersion(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, response.getClusterName(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_CLUSTER_ID_PROPERTY_ID, response.getClusterId(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_SERVICE_GROUP_ID_PROPERTY_ID, response.getServiceGroupId(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID, response.getServiceGroupName(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_SERVICE_ID_PROPERTY_ID, response.getServiceId(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, response.getServiceName(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_SERVICE_TYPE_PROPERTY_ID, response.getServiceType(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID, response.getHostComponentId(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID, response.getDisplayName(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_HOST_NAME_PROPERTY_ID, response.getHostname(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_PUBLIC_HOST_NAME_PROPERTY_ID, response.getPublicHostname(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_STATE_PROPERTY_ID, response.getLiveState(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, response.getDesiredState(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_VERSION_PROPERTY_ID, response.getVersion(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID, response.getDesiredStackVersion(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID, response.getActualConfigs(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID, response.isStaleConfig(), requestedIds);
++ setResourceProperty(resource, HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID, response.isReloadConfig(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID, response.getUpgradeState(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_DESIRED_REPOSITORY_VERSION, response.getDesiredRepositoryVersion(), requestedIds);
if (response.getAdminState() != null) {
setResourceProperty(resource, HOST_COMPONENT_DESIRED_ADMIN_STATE_PROPERTY_ID,
http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/resources/properties.json
----------------------------------------------------------------------
diff --cc ambari-server/src/main/resources/properties.json
index 3e73217,1d12f83..a995049
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@@ -57,11 -53,10 +57,12 @@@
"HostRoles/actual_configs",
"params/run_smoke_test",
"HostRoles/stale_configs",
+ "HostRoles/reload_configs",
"HostRoles/desired_admin_state",
"HostRoles/maintenance_state",
+ "HostRoles/service_id",
"HostRoles/service_name",
+ "HostRoles/service_type",
"HostRoles/upgrade_state",
"_"
],
http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
[08/31] ambari git commit: AMBARI-22160. hadooplzo package
installation failed on devdeploys (aonishuk)
Posted by jl...@apache.org.
AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fc80a183
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fc80a183
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fc80a183
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: fc80a1837cc613160e3c60cc3290b7e517b5cd45
Parents: 6eb273e
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Fri Oct 6 16:22:08 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Oct 9 16:06:47 2017 +0300
----------------------------------------------------------------------
.../libraries/script/script.py | 44 +-
.../resources/Ambari-DDL-AzureDB-CREATE.sql | 2147 ++++++++++++++++++
ambari-server/snippet/Snippet.java | 8 +
.../stacks/2.0.6/HBASE/test_hbase_master.py | 2 +
.../src/test/python/stacks/utils/RMFTestCase.py | 4 +-
5 files changed, 2190 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/fc80a183/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index d5b4469..cd8fce4 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -501,6 +501,7 @@ class Script(object):
Script.stack_version_from_distro_select = pkg_provider.get_installed_package_version(
stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
+
return Script.stack_version_from_distro_select
@@ -525,22 +526,20 @@ class Script(object):
"""
This function replaces ${stack_version} placeholder with actual version. If the package
version is passed from the server, use that as an absolute truth.
-
+
:param name name of the package
:param repo_version actual version of the repo currently installing
"""
- stack_version_package_formatted = ""
+ if not STACK_VERSION_PLACEHOLDER in name:
+ return name
- if not repo_version:
- repo_version = self.get_stack_version_before_packages_installed()
+ stack_version_package_formatted = ""
package_delimiter = '-' if OSCheck.is_ubuntu_family() else '_'
# repositoryFile is the truth
# package_version should be made to the form W_X_Y_Z_nnnn
package_version = default("repositoryFile/repoVersion", None)
- if package_version is not None:
- package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
# TODO remove legacy checks
if package_version is None:
@@ -550,6 +549,16 @@ class Script(object):
if package_version is None:
package_version = default("hostLevelParams/package_version", None)
+ if package_version is None or '-' not in package_version:
+ self.load_available_packages()
+ package_name = self.get_package_from_available(name, self.available_packages_in_repos)
+ if package_name is None:
+ raise Fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos))
+ return package_name
+
+ if package_version is not None:
+ package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
+
# The cluster effective version comes down when the version is known after the initial
# install. In that case we should not be guessing which version when invoking INSTALL, but
# use the supplied version to build the package_version
@@ -568,6 +577,7 @@ class Script(object):
# Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we try to specify it.
if not package_version or '*' in package_version:
+ repo_version = self.get_stack_version_before_packages_installed()
stack_version_package_formatted = repo_version.replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name
package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
@@ -760,6 +770,17 @@ class Script(object):
"""
self.install_packages(env)
+ def load_available_packages(self):
+ if self.available_packages_in_repos:
+ return self.available_packages_in_repos
+
+ pkg_provider = get_provider("Package")
+ try:
+ self.available_packages_in_repos = pkg_provider.get_available_packages_in_repos(Script.config['repositoryFile']['repositories'])
+ except Exception as err:
+ Logger.exception("Unable to load available packages")
+ self.available_packages_in_repos = []
+
def install_packages(self, env):
"""
List of packages that are required< by service is received from the server
@@ -779,20 +800,14 @@ class Script(object):
return
pass
try:
- package_list_str = config['hostLevelParams']['package_list']
agent_stack_retry_on_unavailability = bool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
agent_stack_retry_count = int(config['hostLevelParams']['agent_stack_retry_count'])
- pkg_provider = get_provider("Package")
- try:
- available_packages_in_repos = pkg_provider.get_available_packages_in_repos(config['repositoryFile']['repositories'])
- except Exception as err:
- Logger.exception("Unable to load available packages")
- available_packages_in_repos = []
+ package_list_str = config['hostLevelParams']['package_list']
if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
package_list = json.loads(package_list_str)
for package in package_list:
if self.check_package_condition(package):
- name = self.get_package_from_available(package['name'], available_packages_in_repos)
+ name = self.format_package_name(package['name'])
# HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
# TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
# <osFamily>any<osFamily> which would cause installation failure on Windows.
@@ -1092,5 +1107,6 @@ class Script(object):
def __init__(self):
+ self.available_packages_in_repos = []
if Script.instance is not None:
raise Fail("An instantiation already exists! Use, get_instance() method.")
[27/31] ambari git commit: AMBARI-22186 Navigation style changes.
(atkach)
Posted by jl...@apache.org.
AMBARI-22186 Navigation style changes. (atkach)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2d23e123
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2d23e123
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2d23e123
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 2d23e123fd9b514b8c21d973cb0237e8c3dd6b42
Parents: 5768294
Author: Andrii Tkach <at...@apache.org>
Authored: Tue Oct 10 16:44:35 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Tue Oct 10 18:00:24 2017 +0300
----------------------------------------------------------------------
ambari-web/app/styles/common.less | 1 +
.../app/styles/theme/bootstrap-ambari.css | 64 ++++++++++++++------
ambari-web/app/styles/top-nav.less | 22 +++++++
ambari-web/app/templates/application.hbs | 39 ++++++++----
ambari-web/app/templates/main/service/item.hbs | 2 +-
5 files changed, 96 insertions(+), 32 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/2d23e123/ambari-web/app/styles/common.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/common.less b/ambari-web/app/styles/common.less
index ddd4f3a..7753a59 100644
--- a/ambari-web/app/styles/common.less
+++ b/ambari-web/app/styles/common.less
@@ -71,6 +71,7 @@
@top-nav-menu-dropdown-border-color: #c3c3c3;
@top-nav-menu-dropdown-bg-color: #fff;
@top-nav-menu-dropdown-text-color: #333;
+@top-nav-menu-views-menu-color: #1491c1;
@-webkit-keyframes orangePulse {
from { background-color: @restart-indicator-color; }
http://git-wip-us.apache.org/repos/asf/ambari/blob/2d23e123/ambari-web/app/styles/theme/bootstrap-ambari.css
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/theme/bootstrap-ambari.css b/ambari-web/app/styles/theme/bootstrap-ambari.css
index a223949..70579e7 100644
--- a/ambari-web/app/styles/theme/bootstrap-ambari.css
+++ b/ambari-web/app/styles/theme/bootstrap-ambari.css
@@ -464,7 +464,7 @@ h2.table-title {
.nav.nav-tabs li a {
border-width: 0;
border-radius: 0;
- border-bottom: 2px solid transparent;
+ border-bottom: 3px solid transparent;
color: #6B6C6C;
text-transform: uppercase;
}
@@ -488,7 +488,7 @@ h2.table-title {
.nav-tabs-left li,
.nav-tabs-right li {
float: none;
- margin-bottom: 2px;
+ margin-bottom: 3px;
}
.nav-tabs-left li a,
.nav-tabs-right li a {
@@ -498,25 +498,25 @@ h2.table-title {
margin-right: -1px;
}
.nav-tabs-left li a {
- border: 2px solid transparent !important;
+ border: 3px solid transparent !important;
}
.nav-tabs-left li.active a,
.nav-tabs-left li.active a:hover,
.nav-tabs-left li.active a:active,
.nav-tabs-left li.active a:focus {
- border-right: 2px solid #3FAE2A !important;
+ border-right: 3px solid #3FAE2A !important;
}
.nav-tabs-right li {
margin-left: -1px;
}
.nav-tabs-right li a {
- border: 2px solid transparent !important;
+ border: 3px solid transparent !important;
}
.nav-tabs-right li.active a,
.nav-tabs-right li.active a:hover,
.nav-tabs-right li.active a:active,
.nav-tabs-right li.active a:focus {
- border-left: 2px solid #3FAE2A !important;
+ border-left: 3px solid #3FAE2A !important;
}
.wizard {
border: 2px solid #ebecf1;
@@ -797,8 +797,7 @@ input.radio:checked + label:after {
cursor: pointer;
margin-top: 3px;
}
-.navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group:hover span.ambari-header,
-.navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group:hover span.toggle-icon {
+.navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group:hover span.ambari-header {
color: #fff;
}
.navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group span.ambari-header {
@@ -890,7 +889,7 @@ input.radio:checked + label:after {
.navigation-bar-container ul.nav.side-nav-menu li.mainmenu-li > a .navigation-icon,
.navigation-bar-container ul.nav.side-nav-footer li.mainmenu-li > a .navigation-icon {
line-height: 18px;
- font-size: 14px;
+ font-size: 16px;
color: #b8bec4;
}
.navigation-bar-container ul.nav.side-nav-menu li.navigation-footer > a .toggle-icon,
@@ -904,12 +903,14 @@ input.radio:checked + label:after {
color: #b8bec4;
padding: 3px 5px 3px 10px;
}
-.navigation-bar-container ul.nav.side-nav-menu li.navigation-footer > a,
-.navigation-bar-container ul.nav.side-nav-footer li.navigation-footer > a,
.navigation-bar-container ul.nav.side-nav-menu li.mainmenu-li > a,
.navigation-bar-container ul.nav.side-nav-footer li.mainmenu-li > a {
padding: 10px 5px 10px 20px;
}
+.navigation-bar-container ul.nav.side-nav-menu li.navigation-footer > a,
+.navigation-bar-container ul.nav.side-nav-footer li.navigation-footer > a {
+ padding: 14px 5px 14px 20px;
+}
.navigation-bar-container ul.nav.side-nav-menu li.submenu-li > a,
.navigation-bar-container ul.nav.side-nav-footer li.submenu-li > a {
padding: 10px 5px 10px 25px;
@@ -922,7 +923,7 @@ input.radio:checked + label:after {
.navigation-bar-container ul.nav.side-nav-menu li.navigation-footer a .navigation-icon,
.navigation-bar-container ul.nav.side-nav-footer li.navigation-footer a .navigation-icon {
color: #3fae2a;
- font-size: 20px;
+ font-size: 19px;
position: relative;
padding: 0 15px;
left: calc(30%);
@@ -1021,7 +1022,7 @@ input.radio:checked + label:after {
position: absolute;
pointer-events: none;
border-color: transparent;
- border-left-color: #31823a;
+ border-left-color: #3fae2a;
margin-top: -12px;
}
.navigation-bar-container ul.nav.side-nav-menu .more-actions,
@@ -1097,6 +1098,10 @@ input.radio:checked + label:after {
.navigation-bar-container.collapsed ul.nav.side-nav-footer li a .toggle-icon {
display: none;
}
+.navigation-bar-container.collapsed ul.nav.side-nav-menu li a .navigation-icon,
+.navigation-bar-container.collapsed ul.nav.side-nav-footer li a .navigation-icon {
+ font-size: 19px;
+}
.navigation-bar-container.collapsed ul.nav.side-nav-menu li.navigation-footer a .navigation-icon,
.navigation-bar-container.collapsed ul.nav.side-nav-footer li.navigation-footer a .navigation-icon {
padding: 0 5px;
@@ -1147,7 +1152,7 @@ input.radio:checked + label:after {
position: absolute;
pointer-events: none;
border-color: transparent;
- border-left-color: #31823a;
+ border-left-color: #3fae2a;
margin-top: -12px;
}
.navigation-bar-container.collapsed ul.nav.side-nav-menu .more-actions,
@@ -1189,8 +1194,10 @@ input.radio:checked + label:after {
position: relative;
top: 1px;
}
+.notifications-dropdown,
#notifications-dropdown.dropdown-menu {
- width: 400px;
+ min-width: 400px;
+ max-width: 400px;
min-height: 150px;
padding: 0px;
z-index: 1000;
@@ -1202,6 +1209,7 @@ input.radio:checked + label:after {
-moz-box-shadow: 0px 2px 10px 2px rgba(0, 0, 0, 0.29);
box-shadow: 0px 2px 10px 2px rgba(0, 0, 0, 0.29);
}
+.notifications-dropdown .popup-arrow-up,
#notifications-dropdown.dropdown-menu .popup-arrow-up {
position: absolute;
right: 37px;
@@ -1210,6 +1218,7 @@ input.radio:checked + label:after {
height: 40px;
overflow: hidden;
}
+.notifications-dropdown .popup-arrow-up:after,
#notifications-dropdown.dropdown-menu .popup-arrow-up:after {
content: "";
position: absolute;
@@ -1221,10 +1230,12 @@ input.radio:checked + label:after {
left: 10px;
box-shadow: -1px -1px 10px -2px rgba(0, 0, 0, 0.5);
}
+.notifications-dropdown .notifications-header,
#notifications-dropdown.dropdown-menu .notifications-header {
border-bottom: 1px solid #eee;
padding: 15px 20px;
}
+.notifications-dropdown .notifications-header .notifications-title,
#notifications-dropdown.dropdown-menu .notifications-header .notifications-title {
font-family: 'Roboto', sans-serif;
font-weight: normal;
@@ -1233,19 +1244,23 @@ input.radio:checked + label:after {
color: #333;
font-size: 16px;
}
+.notifications-dropdown .notifications-body,
#notifications-dropdown.dropdown-menu .notifications-body {
padding: 0px 15px;
overflow: auto;
max-height: 500px;
}
+.notifications-dropdown .notifications-body .no-alert-text,
#notifications-dropdown.dropdown-menu .notifications-body .no-alert-text {
padding: 15px 5px;
}
+.notifications-dropdown .notifications-body .table-controls,
#notifications-dropdown.dropdown-menu .notifications-body .table-controls {
padding: 10px 0px;
margin: 0px;
border-bottom: 1px solid #eee;
}
+.notifications-dropdown .notifications-body .table-controls .state-filter,
#notifications-dropdown.dropdown-menu .notifications-body .table-controls .state-filter {
padding: 0px;
font-family: 'Roboto', sans-serif;
@@ -1257,36 +1272,45 @@ input.radio:checked + label:after {
color: #666;
position: relative;
}
+.notifications-dropdown .notifications-body .table-controls .state-filter .form-control.filter-select,
#notifications-dropdown.dropdown-menu .notifications-body .table-controls .state-filter .form-control.filter-select {
font-size: 12px;
color: #666;
height: 25px;
}
+.notifications-dropdown .notifications-body .table.alerts-table,
#notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table {
margin-top: 0px;
}
+.notifications-dropdown .notifications-body .table.alerts-table tbody tr,
#notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody tr {
cursor: pointer;
}
+.notifications-dropdown .notifications-body .table.alerts-table tbody tr.no-alert-tr:hover,
#notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody tr.no-alert-tr:hover {
cursor: default;
border-color: transparent;
border-bottom-color: #eee;
}
+.notifications-dropdown .notifications-body .table.alerts-table tbody tr.no-alert-tr:hover > td,
#notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody tr.no-alert-tr:hover > td {
border-color: transparent;
background-color: white;
}
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.status,
#notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.status {
width: 9%;
padding: 15px 3px;
}
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.status .alert-state-CRITICAL,
#notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.status .alert-state-CRITICAL {
color: #EF6162;
}
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.status .alert-state-WARNING,
#notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.status .alert-state-WARNING {
color: #E98A40;
}
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.content,
#notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.content {
width: 90%;
padding: 15px 3px 10px 3px;
@@ -1297,12 +1321,14 @@ input.radio:checked + label:after {
color: #333;
line-height: 1.3;
}
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.content .name,
#notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.content .name {
font-weight: bold;
font-size: 14px;
color: #333;
margin-bottom: 5px;
}
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.content .description,
#notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.content .description {
font-size: 12px;
color: #666;
@@ -1327,11 +1353,13 @@ input.radio:checked + label:after {
-webkit-hyphens: auto;
hyphens: auto;
}
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.content .timestamp,
#notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.content .timestamp {
text-align: right;
font-size: 11px;
color: #999;
}
+.notifications-dropdown .notifications-footer,
#notifications-dropdown.dropdown-menu .notifications-footer {
border-top: 1px solid #eee;
padding: 15px;
@@ -1372,13 +1400,13 @@ input.radio:checked + label:after {
}
.accordion .panel-group,
.wizard .wizard-body .wizard-content .accordion .panel-group {
- margin-bottom: 0px;
+ margin-bottom: 0;
}
.accordion .panel-group .panel,
.wizard .wizard-body .wizard-content .accordion .panel-group .panel {
- border-radius: 0px;
+ border-radius: 0;
border: none;
- margin-top: 0px;
+ margin-top: 0;
padding: 0 10px;
}
.accordion .panel-group .panel .panel-heading,
http://git-wip-us.apache.org/repos/asf/ambari/blob/2d23e123/ambari-web/app/styles/top-nav.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/top-nav.less b/ambari-web/app/styles/top-nav.less
index 0644e52..a75d08a 100644
--- a/ambari-web/app/styles/top-nav.less
+++ b/ambari-web/app/styles/top-nav.less
@@ -109,6 +109,28 @@
.top-nav-user {
margin-top: 2px;
}
+ .ambari-views {
+ margin-top: 17px;
+ padding: 0 20px 0 10px;
+ .notifications-dropdown.dropdown-menu {
+ right: -28px;
+ min-width: 200px;
+ max-width: 300px;
+ min-height: 100px;
+ li {
+ padding: 2px 5px;
+ a {
+ font-size: 12px;
+ color: @top-nav-menu-views-menu-color;
+ }
+ }
+ }
+ i {
+ font-size: 20px;
+ color: @top-nav-menu-views-menu-color;
+ cursor: pointer;
+ }
+ }
}
#notifications-dropdown.dropdown-menu {
http://git-wip-us.apache.org/repos/asf/ambari/blob/2d23e123/ambari-web/app/templates/application.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/application.hbs b/ambari-web/app/templates/application.hbs
index 03c47db..5c47406 100644
--- a/ambari-web/app/templates/application.hbs
+++ b/ambari-web/app/templates/application.hbs
@@ -35,19 +35,7 @@
</a>
{{/if}}
<div class="btn-group">
- <div class="dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
- <span class="ambari-header" title="Apache Ambari" {{QAAttr "ambari-title"}}>{{t app.name}}</span>
- <span class="toggle-icon glyphicon glyphicon-triangle-bottom"></span>
- </div>
- <ul class="dropdown-menu">
- {{#if view.views.length}}
- {{#each item in view.views}}
- <li><a class="" href="#" {{action "setView" item target="App.router.mainViewsController"}}>{{item.label}}</a></li>
- {{/each}}
- {{else}}
- <li class="disabled"><a href="javascript:void(null);">{{t menu.item.views.noViews}}</a></li>
- {{/if}}
- </ul>
+ <span class="ambari-header" title="Apache Ambari" {{QAAttr "ambari-title"}}>{{t app.name}}</span>
</div>
</li>
</ul>
@@ -121,6 +109,31 @@
{{/if}}
{{! user dropdown end }}
+ {{! views menu}}
+ <div class="navbar-nav navbar-right ambari-views notifications-group">
+ {{#if enableLinks}}
+ <i class="icon-th dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"></i>
+ <ul class="notifications-dropdown row dropdown-menu">
+ <div class="popup-arrow-up"></div>
+ <div class="notifications-header col-sm-12">
+ <div class="notifications-title">{{t common.views}}</div>
+ </div>
+ <div class="notifications-body col-sm-12">
+ {{#if view.views.length}}
+ {{#each item in view.views}}
+ <li>
+ <a href="#" {{action "setView" item target="App.router.mainViewsController"}}>{{item.label}}</a>
+ </li>
+ {{/each}}
+ {{else}}
+ <li class="disabled"><a href="javascript:void(null);">{{t menu.item.views.noViews}}</a></li>
+ {{/if}}
+ </div>
+ </ul>
+ {{/if}}
+ </div>
+ {{!views menu end}}
+
<div class="navbar-nav navbar-right cluster-notifications">
{{#if enableLinks}}
{{! bg label }}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2d23e123/ambari-web/app/templates/main/service/item.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/item.hbs b/ambari-web/app/templates/main/service/item.hbs
index df26a9d..9abcaf3 100644
--- a/ambari-web/app/templates/main/service/item.hbs
+++ b/ambari-web/app/templates/main/service/item.hbs
@@ -25,7 +25,7 @@
<div class="service-button">
{{#if view.isMaintenanceActive}}
<div class="btn-group display-inline-block">
- <button class="btn btn-default dropdown-toggle" id="service-actions-dropdown-btn" data-toggle="dropdown" href="#">
+ <button class="btn btn-success dropdown-toggle" id="service-actions-dropdown-btn" data-toggle="dropdown" href="#">
{{t common.actions}}
<span class="caret"></span>
</button>
[17/31] ambari git commit: AMBARI-22079. Addendum: Upgrade Yarn
version for Logsearch Web (Istvan Tobias via oleewere)
Posted by jl...@apache.org.
AMBARI-22079. Addendum: Upgrade Yarn version for Logsearch Web (Istvan Tobias via oleewere)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0ca85137
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0ca85137
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0ca85137
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 0ca85137fdf1667e3fddb28c220a6e1f6e88a2ee
Parents: 7e0fe29
Author: Istvan Tobias <to...@gmal.com>
Authored: Mon Oct 9 21:21:52 2017 +0200
Committer: Oliver Szabo <ol...@gmail.com>
Committed: Mon Oct 9 21:21:52 2017 +0200
----------------------------------------------------------------------
ambari-logsearch/ambari-logsearch-web/pom.xml | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/0ca85137/ambari-logsearch/ambari-logsearch-web/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/pom.xml b/ambari-logsearch/ambari-logsearch-web/pom.xml
index d641d5a..a0621d9 100644
--- a/ambari-logsearch/ambari-logsearch-web/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-web/pom.xml
@@ -69,13 +69,16 @@
</configuration>
</execution>
<execution>
- <id>generate dist</id>
- <phase>generate-resources</phase>
+ <id>webpack build</id>
<goals>
- <goal>yarn</goal>
+ <goal>webpack</goal>
</goals>
+ <!-- optional: the default phase is "generate-resources" -->
+ <phase>generate-resources</phase>
<configuration>
- <arguments>build-prod</arguments>
+ <!-- optional: if not specified, it will run webpack's default
+ build (and you can remove this whole <configuration> section.) -->
+ <arguments>-p</arguments>
</configuration>
</execution>
<execution>
[18/31] ambari git commit: Revert "Revert "AMBARI-21205 Make
ToggleKerberos and AddDeleteService experimental features (Duc Le via
rzang)""
Posted by jl...@apache.org.
Revert "Revert "AMBARI-21205 Make ToggleKerberos and AddDeleteService experimental features (Duc Le via rzang)""
This reverts commit 7e0fe2913619f4b70097e3ebcd7fb89e84eee62d.
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4242225c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4242225c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4242225c
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 4242225ca6aa89512886bcc74eb5c49e65bd259c
Parents: 0ca8513
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Mon Oct 9 13:03:49 2017 -0700
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Mon Oct 9 13:04:36 2017 -0700
----------------------------------------------------------------------
ambari-web/app/config.js | 6 ++--
ambari-web/app/routes/add_service_routes.js | 2 +-
ambari-web/app/routes/main.js | 2 +-
.../app/templates/main/admin/kerberos.hbs | 34 +++++++++++---------
.../main/service/all_services_actions.hbs | 6 ++--
ambari-web/app/views/main/admin.js | 14 ++++----
.../main/admin/stack_upgrade/services_view.js | 2 +-
ambari-web/app/views/main/menu.js | 16 +++++----
ambari-web/app/views/main/service/item.js | 2 +-
.../admin/stack_upgrade/services_view_test.js | 1 +
10 files changed, 49 insertions(+), 36 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index ba1b75d..0963f70 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -86,9 +86,11 @@ App.supports = {
addingNewRepository: false,
kerberosStackAdvisor: true,
logCountVizualization: false,
- manageJournalNode: true,
createAlerts: false,
- enabledWizardForHostOrderedUpgrade: true
+ enabledWizardForHostOrderedUpgrade: true,
+ manageJournalNode: true,
+ enableToggleKerberos: true,
+ enableAddDeleteServices: true
};
if (App.enableExperimental) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/routes/add_service_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/add_service_routes.js b/ambari-web/app/routes/add_service_routes.js
index 1615f0d..75b3586 100644
--- a/ambari-web/app/routes/add_service_routes.js
+++ b/ambari-web/app/routes/add_service_routes.js
@@ -24,7 +24,7 @@ module.exports = App.WizardRoute.extend({
route: '/service/add',
enter: function (router) {
- if (App.isAuthorized('SERVICE.ADD_DELETE_SERVICES')) {
+ if (App.isAuthorized('SERVICE.ADD_DELETE_SERVICES') && App.supports.enableAddDeleteServices) {
// `getSecurityStatus` call is required to retrieve information related to kerberos type: Manual or automated kerberos
router.get('mainController').isLoading.call(router.get('clusterController'),'isClusterNameLoaded').done(function () {
App.router.get('mainAdminKerberosController').getSecurityStatus().always(function () {
http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/routes/main.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/main.js b/ambari-web/app/routes/main.js
index 30cc8aa..7ed18de 100644
--- a/ambari-web/app/routes/main.js
+++ b/ambari-web/app/routes/main.js
@@ -460,7 +460,7 @@ module.exports = Em.Route.extend(App.RouterRedirections, {
route: '/kerberos',
enter: function (router, transition) {
- if (router.get('loggedIn') && !App.isAuthorized('CLUSTER.TOGGLE_KERBEROS')) {
+ if (router.get('loggedIn') && (!App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || !App.supports.enableToggleKerberos)) {
router.transitionTo('main.dashboard.index');
}
},
http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/templates/main/admin/kerberos.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/kerberos.hbs b/ambari-web/app/templates/main/admin/kerberos.hbs
index e7bb618..2b41122 100644
--- a/ambari-web/app/templates/main/admin/kerberos.hbs
+++ b/ambari-web/app/templates/main/admin/kerberos.hbs
@@ -20,20 +20,22 @@
<div>
<p class="text-success">{{t admin.security.enabled}}
{{#isAuthorized "CLUSTER.TOGGLE_KERBEROS"}}
- <button class="btn btn-padding btn-warning admin-disable-security-btn" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action notifySecurityOffPopup target="controller"}}>{{t admin.kerberos.button.disable}} </button>
- {{#unless isManualKerberos}}
- <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
- <i class="glyphicon glyphicon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
- {{#if App.isCredentialStorePersistent}}
- <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
+ {{#if App.supports.enableToggleKerberos}}
+ <button class="btn btn-padding btn-warning admin-disable-security-btn" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action notifySecurityOffPopup target="controller"}}>{{t admin.kerberos.button.disable}} </button>
+ {{#unless isManualKerberos}}
+ <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
+ <i class="glyphicon glyphicon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
+ {{#if App.isCredentialStorePersistent}}
+ <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
+ {{/if}}
+ {{/unless}}
+ <br/>
+ {{#unless isEditMode}}
+ <a href="#" {{action makeConfigsEditable target="controller"}} class="pull-right">
+ {{t common.edit}}
+ </a>
+ {{/unless}}
{{/if}}
- {{/unless}}
- <br/>
- {{#unless isEditMode}}
- <a href="#" {{action makeConfigsEditable target="controller"}} class="pull-right">
- {{t common.edit}}
- </a>
- {{/unless}}
{{/isAuthorized}}
</p>
</div>
@@ -51,8 +53,10 @@
<div>
<p class="muted background-text">{{t admin.security.disabled}}
{{#isAuthorized "CLUSTER.TOGGLE_KERBEROS"}}
- <a class="btn btn-padding btn-success admin-enable-security-btn" {{action checkAndStartKerberosWizard target="controller"}}>{{t admin.kerberos.button.enable}} </a>
- <br/>
+ {{#if App.supports.enableToggleKerberos}}
+ <a class="btn btn-padding btn-success admin-enable-security-btn" {{action checkAndStartKerberosWizard target="controller"}}>{{t admin.kerberos.button.enable}} </a>
+ <br/>
+ {{/if}}
{{/isAuthorized}}
</p>
</div>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/templates/main/service/all_services_actions.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/all_services_actions.hbs b/ambari-web/app/templates/main/service/all_services_actions.hbs
index 3e87cb2..a9e122b 100644
--- a/ambari-web/app/templates/main/service/all_services_actions.hbs
+++ b/ambari-web/app/templates/main/service/all_services_actions.hbs
@@ -22,12 +22,14 @@
</div>
<ul class="dropdown-menu">
{{#isAuthorized "SERVICE.ADD_DELETE_SERVICES"}}
- <li {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}>
+ {{#if App.supports.enableAddDeleteServices}}
+ <li {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}>
<a href="#"
{{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}
{{action gotoAddService target="view.serviceController"}}>
<i class="glyphicon glyphicon-plus"></i> {{t services.service.add}}</a>
- </li>
+ </li>
+ {{/if}}
{{/isAuthorized}}
{{#isAuthorized "SERVICE.START_STOP"}}
<li class="divider"></li>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/views/main/admin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin.js b/ambari-web/app/views/main/admin.js
index 509f380..05d0f56 100644
--- a/ambari-web/app/views/main/admin.js
+++ b/ambari-web/app/views/main/admin.js
@@ -39,12 +39,14 @@ App.MainAdminView = Em.View.extend({
});
}
if (!App.get('isHadoopWindowsStack') && App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || (App.get('upgradeInProgress') || App.get('upgradeHolding')) ) {
- items.push({
- name: 'kerberos',
- url: 'adminKerberos.index',
- label: Em.I18n.t('common.kerberos'),
- disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
- });
+ if (App.supports.enableToggleKerberos) {
+ items.push({
+ name: 'kerberos',
+ url: 'adminKerberos.index',
+ label: Em.I18n.t('common.kerberos'),
+ disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
+ });
+ }
}
if ((App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') && App.isAuthorized('SERVICE.MANAGE_AUTO_START, CLUSTER.MANAGE_AUTO_START')) || (App.get('upgradeInProgress') || App.get('upgradeHolding'))) {
if (App.supports.serviceAutoStart) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/services_view.js b/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
index f566814..25efffe 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
@@ -56,7 +56,7 @@ App.MainAdminStackServicesView = Em.View.extend({
* @param event
*/
goToAddService: function (event) {
- if (!App.isAuthorized('SERVICE.ADD_DELETE_SERVICES')) {
+ if (!App.isAuthorized('SERVICE.ADD_DELETE_SERVICES') || !App.supports.enableAddDeleteServices) {
return;
} else if (event.context == "KERBEROS") {
App.router.get('mainAdminKerberosController').checkAndStartKerberosWizard();
http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/views/main/menu.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/menu.js b/ambari-web/app/views/main/menu.js
index 4bb53ae..32c4f6f 100644
--- a/ambari-web/app/views/main/menu.js
+++ b/ambari-web/app/views/main/menu.js
@@ -118,13 +118,15 @@ App.MainSideMenuView = Em.CollectionView.extend({
});
}
if (!App.get('isHadoopWindowsStack') && App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || upg) {
- categories.push({
- name: 'kerberos',
- url: 'kerberos/',
- label: Em.I18n.t('common.kerberos'),
- disabled: App.get('upgradeInProgress') || App.get('upgradeHolding'),
- href: router.urlFor('main.admin.adminKerberos')
- });
+ if (App.supports.enableToggleKerberos) {
+ categories.push({
+ name: 'kerberos',
+ url: 'kerberos/',
+ label: Em.I18n.t('common.kerberos'),
+ disabled: App.get('upgradeInProgress') || App.get('upgradeHolding'),
+ href: router.urlFor('main.admin.adminKerberos')
+ });
+ }
}
if ((App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') && App.isAuthorized('SERVICE.MANAGE_AUTO_START, CLUSTER.MANAGE_AUTO_START')) || upg) {
if (App.supports.serviceAutoStart) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/views/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/item.js b/ambari-web/app/views/main/service/item.js
index 37e0904..45c783b 100644
--- a/ambari-web/app/views/main/service/item.js
+++ b/ambari-web/app/views/main/service/item.js
@@ -289,7 +289,7 @@ App.MainServiceItemView = Em.View.extend({
options.push(actionMap.DOWNLOAD_CLIENT_CONFIGS);
}
- if (App.isAuthorized("SERVICE.ADD_DELETE_SERVICES")) {
+ if (App.isAuthorized("SERVICE.ADD_DELETE_SERVICES") && App.supports.enableAddDeleteServices) {
options.push(actionMap.DELETE_SERVICE);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
index 70d182c..da75cf2 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
@@ -34,6 +34,7 @@ describe('App.MainAdminStackServicesView', function () {
sinon.stub(App.router, 'get').returns(mock);
sinon.spy(mock, 'checkAndStartKerberosWizard');
isAccessibleMock = sinon.stub(App, 'isAuthorized');
+ App.set('supports.enableAddDeleteServices', true);
});
afterEach(function() {
App.get('router').transitionTo.restore();
[30/31] ambari git commit: AMBARI-22137 - Different stack versions
should be able to link to different extension versions
Posted by jl...@apache.org.
AMBARI-22137 - Different stack versions should be able to link to different extension versions
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/03273bdc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/03273bdc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/03273bdc
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 03273bdce970a1282d89056aa48dc2ffe3b7b712
Parents: a3a8afc
Author: Tim Thorpe <tt...@apache.org>
Authored: Tue Oct 10 13:54:01 2017 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Tue Oct 10 13:54:01 2017 -0700
----------------------------------------------------------------------
.../controller/AmbariManagementHelper.java | 2 +-
.../ambari/server/stack/ExtensionHelper.java | 57 ++-
.../apache/ambari/server/stack/StackModule.java | 4 +
.../server/stack/StackManagerExtensionTest.java | 31 +-
.../resources/extensions/EXT/0.2/metainfo.xml | 2 +-
.../resources/extensions/EXT/0.3/metainfo.xml | 2 +-
.../stacks_with_extensions/HDP/0.4/metainfo.xml | 22 ++
.../HDP/0.4/repos/repoinfo.xml | 63 +++
.../HDP/0.4/services/HBASE/metainfo.xml | 26 ++
.../0.4/services/HDFS/configuration/global.xml | 145 +++++++
.../services/HDFS/configuration/hadoop-env.xml | 223 +++++++++++
.../services/HDFS/configuration/hbase-site.xml | 137 +++++++
.../services/HDFS/configuration/hdfs-log4j.xml | 199 ++++++++++
.../services/HDFS/configuration/hdfs-site.xml | 396 +++++++++++++++++++
.../HDP/0.4/services/HDFS/metainfo.xml | 30 ++
.../0.4/services/HDFS/package/dummy-script.py | 20 +
.../HDP/0.4/services/HIVE/metainfo.xml | 26 ++
.../HDP/0.4/services/MAPREDUCE/metainfo.xml | 23 ++
.../HDP/0.4/services/ZOOKEEPER/metainfo.xml | 26 ++
19 files changed, 1425 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
index 2dd6f12..0c8edfe 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
@@ -70,7 +70,7 @@ public class AmbariManagementHelper {
*/
public void createExtensionLink(StackManager stackManager, StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
validateCreateExtensionLinkRequest(stackInfo, extensionInfo);
- ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
+ ExtensionHelper.validateCreateLink(stackManager, stackInfo, extensionInfo);
ExtensionLinkEntity linkEntity = createExtensionLinkEntity(stackInfo, extensionInfo);
stackManager.linkStackToExtension(stackInfo, extensionInfo);
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
index 86e532a..91dc870 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
@@ -27,6 +27,8 @@ import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.StackInfo;
import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
import org.apache.ambari.server.utils.VersionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* An extension version is like a stack version but it contains custom services. Linking an extension
@@ -35,6 +37,8 @@ import org.apache.ambari.server.utils.VersionUtils;
*/
public class ExtensionHelper {
+ private final static Logger LOG = LoggerFactory.getLogger(ExtensionHelper.class);
+
public static void validateDeleteLink(Clusters clusters, StackInfo stack, ExtensionInfo extension) throws AmbariException {
validateNotRequiredExtension(stack, extension);
validateServicesNotInstalled(clusters, stack, extension);
@@ -62,9 +66,9 @@ public class ExtensionHelper {
}
}
- public static void validateCreateLink(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+ public static void validateCreateLink(StackManager stackManager, StackInfo stack, ExtensionInfo extension) throws AmbariException {
validateSupportedStackVersion(stack, extension);
- validateServiceDuplication(stack, extension);
+ validateServiceDuplication(stackManager, stack, extension);
validateRequiredExtensions(stack, extension);
}
@@ -88,15 +92,24 @@ public class ExtensionHelper {
throw new AmbariException(message);
}
- private static void validateServiceDuplication(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+ private static void validateServiceDuplication(StackManager stackManager, StackInfo stack, ExtensionInfo extension) throws AmbariException {
+ LOG.debug("Looking for duplicate services");
for (ServiceInfo service : extension.getServices()) {
+ LOG.debug("Looking for duplicate service " + service.getName());
if (service != null) {
ServiceInfo stackService = null;
try {
stackService = stack.getService(service.getName());
+ if (stackService != null) {
+ LOG.debug("Found service " + service.getName());
+ if (isInheritedExtensionService(stackManager, stack, service.getName(), extension.getName())) {
+ stackService = null;
+ }
+ }
}
catch (Exception e) {
//Eat the exception
+ LOG.error("Error validating service duplication", e);
}
if (stackService != null) {
String message = "Existing service is included in extension"
@@ -112,6 +125,44 @@ public class ExtensionHelper {
}
}
+ private static boolean isInheritedExtensionService(StackManager stackManager, StackInfo stack, String serviceName, String extensionName) {
+ // Check if service is from an extension at the current stack level, if so then it isn't inherited from its parent stack version
+ if (isExtensionService(stack, serviceName, extensionName)) {
+ LOG.debug("Service is at requested stack/version level " + serviceName);
+ return false;
+ }
+
+ return isExtensionService(stackManager, stack.getName(), stack.getParentStackVersion(), serviceName, extensionName);
+ }
+
+ private static boolean isExtensionService(StackManager stackManager, String stackName, String stackVersion, String serviceName, String extensionName) {
+ LOG.debug("Checking at stack/version " + stackName + "/" + stackVersion);
+ StackInfo stack = stackManager.getStack(stackName, stackVersion);
+
+ if (stack == null) {
+ LOG.warn("Stack/version not found " + stackName + "/" + stackVersion);
+ return false;
+ }
+
+ if (isExtensionService(stack, serviceName, extensionName)) {
+ LOG.debug("Stack/version " + stackName + "/" + stackVersion + " contains service " + serviceName);
+ return true;
+ }
+ else {
+ return isExtensionService(stackManager, stackName, stack.getParentStackVersion(), serviceName, extensionName);
+ }
+ }
+
+ private static boolean isExtensionService(StackInfo stack, String serviceName, String extensionName) {
+ ExtensionInfo extension = stack.getExtension(extensionName);
+ if (extension == null) {
+ LOG.debug("Extension not found " + extensionName);
+ return false;
+ }
+
+ return extension.getService(serviceName) != null;
+ }
+
private static void validateRequiredExtensions(StackInfo stack, ExtensionInfo extension) throws AmbariException {
for (ExtensionMetainfoXml.Extension requiredExtension : extension.getExtensions()) {
if (requiredExtension != null) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 520764d..b109331 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -198,6 +198,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
if (parentVersion != null) {
mergeStackWithParent(parentVersion, allStacks, commonServices, extensions);
}
+
for (ExtensionInfo extension : stackInfo.getExtensions()) {
String extensionKey = extension.getName() + StackManager.PATH_DELIMITER + extension.getVersion();
ExtensionModule extensionModule = extensions.get(extensionKey);
@@ -402,6 +403,9 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
private void addExtensionServices() throws AmbariException {
for (ExtensionModule extension : extensionModules.values()) {
+ for (Map.Entry<String, ServiceModule> entry : extension.getServiceModules().entrySet()) {
+ serviceModules.put(entry.getKey(), entry.getValue());
+ }
stackInfo.addExtension(extension.getModuleInfo());
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
index 20c8f40..cef30b5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
@@ -69,6 +69,9 @@ public class StackManagerExtensionTest {
StackEntity stack3 = new StackEntity();
stack3.setStackName("HDP");
stack3.setStackVersion("0.3");
+ StackEntity stack4 = new StackEntity();
+ stack4.setStackName("HDP");
+ stack4.setStackVersion("0.4");
ExtensionEntity extension1 = new ExtensionEntity();
extension1.setExtensionName("EXT");
extension1.setExtensionVersion("0.1");
@@ -78,19 +81,28 @@ public class StackManagerExtensionTest {
ExtensionEntity extension3 = new ExtensionEntity();
extension3.setExtensionName("EXT");
extension3.setExtensionVersion("0.3");
+ ExtensionLinkEntity link1 = new ExtensionLinkEntity();
+ link1.setLinkId(new Long(-1));
+ link1.setStack(stack1);
+ link1.setExtension(extension1);
List<ExtensionLinkEntity> list = new ArrayList<>();
+ List<ExtensionLinkEntity> linkList = new ArrayList<>();
+ linkList.add(link1);
expect(stackDao.find("HDP", "0.1")).andReturn(stack1).atLeastOnce();
expect(stackDao.find("HDP", "0.2")).andReturn(stack2).atLeastOnce();
expect(stackDao.find("HDP", "0.3")).andReturn(stack3).atLeastOnce();
+ expect(stackDao.find("HDP", "0.4")).andReturn(stack3).atLeastOnce();
expect(extensionDao.find("EXT", "0.1")).andReturn(extension1).atLeastOnce();
expect(extensionDao.find("EXT", "0.2")).andReturn(extension2).atLeastOnce();
expect(extensionDao.find("EXT", "0.3")).andReturn(extension3).atLeastOnce();
+ expect(linkDao.findByStack("HDP", "0.1")).andReturn(linkList).atLeastOnce();
expect(linkDao.findByStack(EasyMock.anyObject(String.class),
EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
expect(linkDao.findByStackAndExtension("HDP", "0.2", "EXT", "0.2")).andReturn(null).atLeastOnce();
+ expect(linkDao.findByStackAndExtension("HDP", "0.1", "EXT", "0.1")).andReturn(link1).atLeastOnce();
replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, linkDao); //linkEntity
@@ -144,21 +156,34 @@ public class StackManagerExtensionTest {
assertNotNull(themes);
assertTrue("Number of themes is " + themes.size(), themes.size() == 0);
- StackInfo stack = stackManager.getStack("HDP", "0.2");
+ StackInfo stack = stackManager.getStack("HDP", "0.1");
assertNotNull(stack.getService("OOZIE2"));
oozie = stack.getService("OOZIE2");
assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
- assertEquals(oozie.getVersion(), "4.0.0");
+ assertEquals(oozie.getVersion(), "3.2.0");
assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
extension = stack.getExtensions().iterator().next();
assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
- assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+ assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.1");
+
+ stack = stackManager.getStack("HDP", "0.2");
+ assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 0);
stack = stackManager.getStack("HDP", "0.3");
assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
extension = stack.getExtensions().iterator().next();
+ assertNotNull(extension.getService("OOZIE2"));
+ oozie = extension.getService("OOZIE2");
+ assertEquals(oozie.getVersion(), "4.0.0");
+
+ assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
+ assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+
+ stack = stackManager.getStack("HDP", "0.4");
+ assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
+ extension = stack.getExtensions().iterator().next();
assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
index 0d37b3e..c95a20f 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
@@ -25,7 +25,7 @@
<min-stack-versions>
<stack>
<name>HDP</name>
- <version>0.2</version>
+ <version>0.3</version>
</stack>
</min-stack-versions>
</prerequisites>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
index d827314..1b6ce73 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
@@ -25,7 +25,7 @@
<min-stack-versions>
<stack>
<name>HDP</name>
- <version>0.2</version>
+ <version>0.3</version>
</stack>
</min-stack-versions>
</prerequisites>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
new file mode 100644
index 0000000..3b4897f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <versions>
+ <upgrade>0.3</upgrade>
+ </versions>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
new file mode 100644
index 0000000..9b3b1c7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<reposinfo>
+ <os family="redhat6">
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+ <repoid>HDP-1.1.1.16</repoid>
+ <reponame>HDP</reponame>
+ <unique>true</unique>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+ <repoid>HDP-UTILS-1.1.0.15</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <unique>false</unique>
+ <mirrorslist></mirrorslist>
+ </repo>
+ <repo>
+ <baseurl></baseurl>
+ <repoid>epel</repoid>
+ <reponame>epel</reponame>
+ <unique>true</unique>
+ <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+ </repo>
+ </os>
+ <os family="centos5">
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+ <repoid>HDP-1.1.1.16</repoid>
+ <reponame>HDP</reponame>
+ <unique>true</unique>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+ <repoid>HDP-UTILS-1.1.0.15</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <unique>false</unique>
+ <mirrorslist></mirrorslist>
+ </repo>
+ <repo>
+ <baseurl></baseurl>
+ <repoid>epel</repoid>
+ <reponame>epel</reponame>
+ <unique>true</unique>
+ <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+ </repo>
+ </os>
+</reposinfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..48123f0
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HBASE</name>
+ <extends>common-services/HBASE/1.0</extends>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
new file mode 100644
index 0000000..bcab577
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>namenode_host</name>
+ <value></value>
+ <description>NameNode Host.</description>
+ </property>
+ <property>
+ <name>dfs_name_dir</name>
+ <value>/hadoop/hdfs/namenode</value>
+ <description>NameNode Directories.</description>
+ </property>
+ <property>
+ <name>snamenode_host</name>
+ <value></value>
+ <description>Secondary NameNode.</description>
+ </property>
+ <property>
+ <name>fs_checkpoint_dir</name>
+ <value>/hadoop/hdfs/namesecondary</value>
+ <description>Secondary NameNode checkpoint dir.</description>
+ </property>
+ <property>
+ <name>datanode_hosts</name>
+ <value></value>
+ <description>List of Datanode Hosts.</description>
+ </property>
+ <property>
+ <name>dfs_data_dir</name>
+ <value>/hadoop/hdfs/data</value>
+ <description>Data directories for Data Nodes.</description>
+ </property>
+ <property>
+ <name>hdfs_log_dir_prefix</name>
+ <value>/var/log/hadoop</value>
+ <description>Hadoop Log Dir Prefix</description>
+ </property>
+ <property>
+ <name>hadoop_pid_dir_prefix</name>
+ <value>/var/run/hadoop</value>
+ <description>Hadoop PID Dir Prefix</description>
+ </property>
+ <property>
+ <name>dfs_webhdfs_enabled</name>
+ <value>true</value>
+ <description>WebHDFS enabled</description>
+ </property>
+ <property>
+ <name>hadoop_heapsize</name>
+ <value>1024</value>
+ <description>Hadoop maximum Java heap size</description>
+ </property>
+ <property>
+ <name>namenode_heapsize</name>
+ <value>1024</value>
+ <description>NameNode Java heap size</description>
+ </property>
+ <property>
+ <name>namenode_opt_newsize</name>
+ <value>200</value>
+ <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+ </property>
+ <property>
+ <name>namenode_opt_maxnewsize</name>
+ <value>640</value>
+ <description>NameNode maximum new generation size</description>
+ </property>
+ <property>
+ <name>namenode_opt_permsize</name>
+ <value>128</value>
+ <description>NameNode permanent generation size</description>
+ </property>
+ <property>
+ <name>namenode_opt_maxpermsize</name>
+ <value>256</value>
+ <description>NameNode maximum permanent generation size</description>
+ </property>
+ <property>
+ <name>datanode_du_reserved</name>
+ <value>1</value>
+ <description>Reserved space for HDFS</description>
+ </property>
+ <property>
+ <name>dtnode_heapsize</name>
+ <value>1024</value>
+ <description>DataNode maximum Java heap size</description>
+ </property>
+ <property>
+ <name>dfs_datanode_failed_volume_tolerated</name>
+ <value>0</value>
+ <description>DataNode volumes failure toleration</description>
+ </property>
+ <property>
+ <name>fs_checkpoint_period</name>
+ <value>21600</value>
+ <description>HDFS Maximum Checkpoint Delay</description>
+ </property>
+ <property>
+ <name>fs_checkpoint_size</name>
+ <value>0.5</value>
+ <description>FS Checkpoint Size.</description>
+ </property>
+ <property>
+ <name>security_enabled</name>
+ <value>false</value>
+ <description>Hadoop Security</description>
+ </property>
+ <property>
+ <name>kerberos_domain</name>
+ <value>EXAMPLE.COM</value>
+ <description>Kerberos realm.</description>
+ </property>
+ <property>
+ <name>kerberos_domain</name>
+ <value>EXAMPLE.COM</value>
+ <description>Kerberos realm.</description>
+ </property>
+ <property>
+ <name>keytab_path</name>
+ <value>/etc/security/keytabs</value>
+ <description>KeyTab Directory.</description>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..8fb8c7f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,223 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>hdfs_log_dir_prefix</name>
+ <value>/var/log/hadoop</value>
+ <description>Hadoop Log Dir Prefix</description>
+ </property>
+ <property>
+ <name>hadoop_pid_dir_prefix</name>
+ <value>/var/run/hadoop</value>
+ <description>Hadoop PID Dir Prefix</description>
+ </property>
+ <property>
+ <name>hadoop_heapsize</name>
+ <value>1024</value>
+ <description>Hadoop maximum Java heap size</description>
+ </property>
+ <property>
+ <name>namenode_heapsize</name>
+ <value>1024</value>
+ <description>NameNode Java heap size</description>
+ </property>
+ <property>
+ <name>namenode_opt_newsize</name>
+ <value>200</value>
+ <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+ </property>
+ <property>
+ <name>namenode_opt_maxnewsize</name>
+ <value>200</value>
+ <description>NameNode maximum new generation size</description>
+ </property>
+ <property>
+ <name>namenode_opt_permsize</name>
+ <value>128</value>
+ <description>NameNode permanent generation size</description>
+ </property>
+ <property>
+ <name>namenode_opt_maxpermsize</name>
+ <value>256</value>
+ <description>NameNode maximum permanent generation size</description>
+ </property>
+ <property>
+ <name>dtnode_heapsize</name>
+ <value>1024</value>
+ <description>DataNode maximum Java heap size</description>
+ </property>
+ <property>
+ <name>proxyuser_group</name>
+ <value>users</value>
+ <description>Proxy user group.</description>
+ </property>
+ <property>
+ <name>security_enabled</name>
+ <value>false</value>
+ <description>Hadoop Security</description>
+ </property>
+ <property>
+ <name>kerberos_domain</name>
+ <value>EXAMPLE.COM</value>
+ <description>Kerberos realm.</description>
+ </property>
+ <property>
+ <name>hdfs_user</name>
+ <value>hdfs</value>
+ <description>User and Groups.</description>
+ </property>
+ <property>
+ <name>ignore_groupsusers_create</name>
+ <value>false</value>
+ <description>Whether to ignores failures on users and group creation</description>
+ </property>
+ <property>
+ <name>smokeuser</name>
+ <value>ambari-qa</value>
+ <description>User executing service checks</description>
+ </property>
+ <property>
+ <name>user_group</name>
+ <value>hadoop</value>
+ <description>Proxy user group.</description>
+ </property>
+
+ <!-- hadoop-env.sh -->
+ <property>
+ <name>content</name>
+ <description>hadoop-env.sh content</description>
+ <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use. Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options. Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options. Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored. $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from. Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+ JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+ JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+ export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+ </value>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
new file mode 100644
index 0000000..5024e85
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.regionserver.msginterval</name>
+ <value>1000</value>
+ <description>Interval between messages from the RegionServer to HMaster
+ in milliseconds. Default is 15. Set this value low if you want unit
+ tests to be responsive.
+ </description>
+ </property>
+ <property>
+ <name>hbase.client.pause</name>
+ <value>5000</value>
+ <description>General client pause value. Used mostly as value to wait
+ before running a retry of a failed get, region lookup, etc.</description>
+ </property>
+ <property>
+ <name>hbase.master.meta.thread.rescanfrequency</name>
+ <value>10000</value>
+ <description>How long the HMaster sleeps (in milliseconds) between scans of
+ the root and meta tables.
+ </description>
+ </property>
+ <property>
+ <name>hbase.server.thread.wakefrequency</name>
+ <value>1000</value>
+ <description>Time to sleep in between searches for work (in milliseconds).
+ Used as sleep interval by service threads such as META scanner and log roller.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.handler.count</name>
+ <value>5</value>
+ <description>Count of RPC Server instances spun up on RegionServers
+ Same property is used by the HMaster for count of master handlers.
+ Default is 10.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.lease.period</name>
+ <value>6000</value>
+ <description>Length of time the master will wait before timing out a region
+ server lease. Since region servers report in every second (see above), this
+ value has been reduced so that the master will notice a dead region server
+ sooner. The default is 30 seconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.info.port</name>
+ <value>-1</value>
+ <description>The port for the hbase master web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port</name>
+ <value>-1</value>
+ <description>The port for the hbase regionserver web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port.auto</name>
+ <value>true</value>
+ <description>Info server auto port bind. Enables automatic port
+ search if hbase.regionserver.info.port is already in use.
+ Enabled for testing to run multiple tests on one machine.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.lease.thread.wakefrequency</name>
+ <value>3000</value>
+ <description>The interval between checks for expired region server leases.
+ This value has been reduced due to the other reduced values above so that
+ the master will notice a dead region server sooner. The default is 15 seconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.optionalcacheflushinterval</name>
+ <value>10000</value>
+ <description>
+ Amount of time to wait since the last time a region was flushed before
+ invoking an optional cache flush. Default 60,000.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.safemode</name>
+ <value>false</value>
+ <description>
+ Turn on/off safe mode in region server. Always on for production, always off
+ for tests.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.max.filesize</name>
+ <value>67108864</value>
+ <description>
+ Maximum desired file size for an HRegion. If filesize exceeds
+ value + (value / 2), the HRegion is split in two. Default: 256M.
+
+ Keep the maximum filesize small so we split more often in tests.
+ </description>
+ </property>
+ <property>
+ <name>hadoop.log.dir</name>
+ <value>${user.dir}/../logs</value>
+ </property>
+ <property>
+ <name>hbase.zookeeper.property.clientPort</name>
+ <value>21818</value>
+ <description>Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+ </description>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..649472d
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,199 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+ <property>
+ <name>content</name>
+ <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+ </value>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..2b979d7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+ <property>
+ <name>dfs.name.dir</name>
+ <!-- cluster variant -->
+ <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+ <description>Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy. </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.support.append</name>
+ <value>true</value>
+ <description>to enable dfs append</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>false</value>
+ <description>to enable webhdfs</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.datanode.failed.volumes.tolerated</name>
+ <value>0</value>
+ <description>#of failed disks dn would tolerate</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.block.local-path-access.user</name>
+ <value>hbase</value>
+ <description>the user who is allowed to perform short
+ circuit reads.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.data.dir</name>
+ <value>/mnt/hmc/hadoop/hdfs/data</value>
+ <description>Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.hosts.exclude</name>
+ <value>/etc/hadoop/conf/dfs.exclude</value>
+ <description>Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.</description>
+ </property>
+
+ <property>
+ <name>dfs.hosts</name>
+ <value>/etc/hadoop/conf/dfs.include</value>
+ <description>Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.</description>
+ </property>
+
+ <property>
+ <name>dfs.replication.max</name>
+ <value>50</value>
+ <description>Maximal block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.replication</name>
+ <value>3</value>
+ <description>Default block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.heartbeat.interval</name>
+ <value>3</value>
+ <description>Determines datanode heartbeat interval in seconds.</description>
+ </property>
+
+ <property>
+ <name>dfs.safemode.threshold.pct</name>
+ <value>1.0f</value>
+ <description>
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>6250000</value>
+ <description>
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.address</name>
+ <value>0.0.0.0:50010</value>
+ </property>
+
+ <property>
+ <name>dfs.datanode.http.address</name>
+ <value>0.0.0.0:50075</value>
+ </property>
+
+ <property>
+ <name>dfs.block.size</name>
+ <value>134217728</value>
+ <description>The default block size for new files.</description>
+ </property>
+
+ <property>
+ <name>dfs.http.address</name>
+ <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system. Either the
+literal string "local" or a host:port for HDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+ <description>
+ Kerberos principal name for the secondary NameNode.
+ </description>
+ </property>
+
+
+<!--
+ This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+ <property>
+ <name>dfs.namenode.kerberos.https.principal</name>
+ <value>host/_HOST@</value>
+ <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.kerberos.https.principal</name>
+ <value>host/_HOST@</value>
+ <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <!-- cluster variant -->
+ <name>dfs.secondary.http.address</name>
+ <value>hdp2.cybervisiontech.com.ua:50090</value>
+ <description>Address of secondary namenode web server</description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.https.port</name>
+ <value>50490</value>
+ <description>The https port where secondary-namenode binds</description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.principal</name>
+ <value>HTTP/_HOST@</value>
+ <description>
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.keytab</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.kerberos.principal</name>
+ <value>dn/_HOST@</value>
+ <description>
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.keytab.file</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.keytab.file</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.keytab.file</name>
+ <value>/dn.service.keytab</value>
+ <description>
+ The filename of the keytab file for the DataNode.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.https.port</name>
+ <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.https.address</name>
+ <value>hdp1.cybervisiontech.com.ua:50470</value>
+ <description>The https address where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.datanode.data.dir.perm</name>
+ <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+ </property>
+
+ <property>
+ <name>dfs.access.time.precision</name>
+ <value>0</value>
+ <description>The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+ </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+ <name>ipc.server.read.threadpool.size</name>
+ <value>5</value>
+ <description></description>
+</property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..da61660
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HDFS</name>
+ <extends>common-services/HDFS/1.0</extends>
+ <configuration-dependencies>
+ <config-type>core-site</config-type>
+ <config-type>global</config-type>
+ <config-type>hdfs-site</config-type>
+ <config-type>hadoop-policy</config-type>
+ <config-type>hdfs-log4j</config-type>
+ </configuration-dependencies>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..9c122b2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HIVE</name>
+ <extends>common-services/HIVE/1.0</extends>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
new file mode 100644
index 0000000..3b0b3d9
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>MAPREDUCE</name>
+ <extends>common-services/MAPREDUCE/1.0</extends>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..9c8a299
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>ZOOKEEPER</name>
+ <extends>common-services/ZOOKEEPER/1.0</extends>
+ </service>
+ </services>
+</metainfo>
[23/31] ambari git commit: Revert "AMBARI-22160. hadooplzo package
installation failed on devdeploys (aonishuk)"
Posted by jl...@apache.org.
Revert "AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)"
This reverts commit e037a8d7194ac97da9f746e52eb53cf15ba2415f.
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cec9f730
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cec9f730
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cec9f730
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: cec9f73008bb1ec101450e2915fa06c81fe64e56
Parents: e19db40
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Oct 10 16:08:14 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Oct 10 16:08:14 2017 +0300
----------------------------------------------------------------------
.../libraries/script/script.py | 45 ++++++--------------
.../HDFS/2.1.0.2.0/package/scripts/hdfs.py | 10 ++---
.../2.1.0.2.0/package/scripts/install_params.py | 6 +++
.../2.1.0.2.0/package/scripts/params_linux.py | 2 +
.../HDFS/3.0.0.3.0/package/scripts/hdfs.py | 10 ++---
.../3.0.0.3.0/package/scripts/install_params.py | 6 +++
.../3.0.0.3.0/package/scripts/params_linux.py | 2 +
.../OOZIE/4.0.0.2.0/package/scripts/oozie.py | 6 +--
.../4.0.0.2.0/package/scripts/params_linux.py | 3 ++
.../OOZIE/4.2.0.3.0/package/scripts/oozie.py | 5 +--
.../4.2.0.3.0/package/scripts/params_linux.py | 3 ++
.../stacks/2.0.6/HBASE/test_hbase_master.py | 2 -
.../src/test/python/stacks/utils/RMFTestCase.py | 4 +-
13 files changed, 48 insertions(+), 56 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index bf8c0dc..d5b4469 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -501,7 +501,6 @@ class Script(object):
Script.stack_version_from_distro_select = pkg_provider.get_installed_package_version(
stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
-
return Script.stack_version_from_distro_select
@@ -526,20 +525,22 @@ class Script(object):
"""
This function replaces ${stack_version} placeholder with actual version. If the package
version is passed from the server, use that as an absolute truth.
-
+
:param name name of the package
:param repo_version actual version of the repo currently installing
"""
- if not STACK_VERSION_PLACEHOLDER in name:
- return name
-
stack_version_package_formatted = ""
+ if not repo_version:
+ repo_version = self.get_stack_version_before_packages_installed()
+
package_delimiter = '-' if OSCheck.is_ubuntu_family() else '_'
# repositoryFile is the truth
# package_version should be made to the form W_X_Y_Z_nnnn
package_version = default("repositoryFile/repoVersion", None)
+ if package_version is not None:
+ package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
# TODO remove legacy checks
if package_version is None:
@@ -549,17 +550,6 @@ class Script(object):
if package_version is None:
package_version = default("hostLevelParams/package_version", None)
- package_version = None
- if (package_version is None or '-' not in package_version) and default('/repositoryFile', None):
- self.load_available_packages()
- package_name = self.get_package_from_available(name, self.available_packages_in_repos)
- if package_name is None:
- raise Fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos))
- return package_name
-
- if package_version is not None:
- package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
-
# The cluster effective version comes down when the version is known after the initial
# install. In that case we should not be guessing which version when invoking INSTALL, but
# use the supplied version to build the package_version
@@ -578,7 +568,6 @@ class Script(object):
# Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we try to specify it.
if not package_version or '*' in package_version:
- repo_version = self.get_stack_version_before_packages_installed()
stack_version_package_formatted = repo_version.replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name
package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
@@ -771,19 +760,6 @@ class Script(object):
"""
self.install_packages(env)
- def load_available_packages(self):
- if self.available_packages_in_repos:
- return self.available_packages_in_repos
-
-
- pkg_provider = get_provider("Package")
- try:
- self.available_packages_in_repos = pkg_provider.get_available_packages_in_repos(self.get_config()['repositoryFile']['repositories'])
- except Exception as err:
- Logger.exception("Unable to load available packages")
- self.available_packages_in_repos = []
-
-
def install_packages(self, env):
"""
List of packages that are required< by service is received from the server
@@ -806,11 +782,17 @@ class Script(object):
package_list_str = config['hostLevelParams']['package_list']
agent_stack_retry_on_unavailability = bool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
agent_stack_retry_count = int(config['hostLevelParams']['agent_stack_retry_count'])
+ pkg_provider = get_provider("Package")
+ try:
+ available_packages_in_repos = pkg_provider.get_available_packages_in_repos(config['repositoryFile']['repositories'])
+ except Exception as err:
+ Logger.exception("Unable to load available packages")
+ available_packages_in_repos = []
if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
package_list = json.loads(package_list_str)
for package in package_list:
if self.check_package_condition(package):
- name = self.format_package_name(package['name'])
+ name = self.get_package_from_available(package['name'], available_packages_in_repos)
# HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
# TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
# <osFamily>any<osFamily> which would cause installation failure on Windows.
@@ -1110,6 +1092,5 @@ class Script(object):
def __init__(self):
- self.available_packages_in_repos = []
if Script.instance is not None:
raise Fail("An instantiation already exists! Use, get_instance() method.")
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
index 07c7616..e054209 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -25,7 +25,6 @@ from resource_management.core.resources import Package
from resource_management.core.source import Template
from resource_management.core.resources.service import ServiceConfig
from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
import os
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@@ -139,11 +138,10 @@ def hdfs(name=None):
content=Template("slaves.j2")
)
- if params.lzo_enabled:
- lzo_packages = get_lzo_packages(params.stack_version_unformatted)
- Package(lzo_packages,
- retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
- retry_count=params.agent_stack_retry_count)
+ if params.lzo_enabled and len(params.lzo_packages) > 0:
+ Package(params.lzo_packages,
+ retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+ retry_count=params.agent_stack_retry_count)
def install_snappy():
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
index 235f231..fe488c3 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
@@ -23,6 +23,7 @@ if OSCheck.is_windows_family():
exclude_packages = []
else:
from resource_management.libraries.functions.default import default
+ from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.script.script import Script
_config = Script.get_config()
@@ -31,3 +32,8 @@ else:
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+ lzo_packages = get_lzo_packages(stack_version_unformatted)
+
+ exclude_packages = []
+ if not lzo_enabled:
+ exclude_packages += lzo_packages
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index bb6349b..76b430b 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -40,6 +40,7 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.format_jvm_option import format_jvm_option
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions.get_architecture import get_architecture
@@ -388,6 +389,7 @@ HdfsResource = functools.partial(
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+lzo_packages = get_lzo_packages(stack_version_unformatted)
name_node_params = default("/commandParams/namenode", None)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
index 07c7616..e054209 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
@@ -25,7 +25,6 @@ from resource_management.core.resources import Package
from resource_management.core.source import Template
from resource_management.core.resources.service import ServiceConfig
from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
import os
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@@ -139,11 +138,10 @@ def hdfs(name=None):
content=Template("slaves.j2")
)
- if params.lzo_enabled:
- lzo_packages = get_lzo_packages(params.stack_version_unformatted)
- Package(lzo_packages,
- retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
- retry_count=params.agent_stack_retry_count)
+ if params.lzo_enabled and len(params.lzo_packages) > 0:
+ Package(params.lzo_packages,
+ retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+ retry_count=params.agent_stack_retry_count)
def install_snappy():
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
index 235f231..fe488c3 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
@@ -23,6 +23,7 @@ if OSCheck.is_windows_family():
exclude_packages = []
else:
from resource_management.libraries.functions.default import default
+ from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.script.script import Script
_config = Script.get_config()
@@ -31,3 +32,8 @@ else:
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+ lzo_packages = get_lzo_packages(stack_version_unformatted)
+
+ exclude_packages = []
+ if not lzo_enabled:
+ exclude_packages += lzo_packages
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
index 2fa6208..de735f4 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
@@ -40,6 +40,7 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.format_jvm_option import format_jvm_option
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
@@ -377,6 +378,7 @@ HdfsResource = functools.partial(
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+lzo_packages = get_lzo_packages(stack_version_unformatted)
name_node_params = default("/commandParams/namenode", None)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index f215a1e..64f9d54 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -37,7 +37,6 @@ from resource_management.libraries.functions.copy_tarball import get_current_ver
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.security_commons import update_credential_provider_path
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.core.resources.packaging import Package
from resource_management.core.shell import as_user, as_sudo, call, checked_call
from resource_management.core.exceptions import Fail
@@ -306,9 +305,8 @@ def oozie_server_specific(upgrade_type):
Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
not_if = no_op_test)
- if params.lzo_enabled:
- all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
- Package(all_lzo_packages,
+ if params.lzo_enabled and len(params.all_lzo_packages) > 0:
+ Package(params.all_lzo_packages,
retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
retry_count=params.agent_stack_retry_count)
Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index a0f0672..b66e157 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -30,6 +30,7 @@ from resource_management.libraries.functions import get_port_from_url
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.get_architecture import get_architecture
@@ -387,3 +388,5 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
# The logic for LZO also exists in HDFS' params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+
+all_lzo_packages = get_lzo_packages(stack_version_unformatted)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
index 0771e93..d916d3b 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
@@ -275,9 +275,8 @@ def oozie_server_specific():
Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
not_if = no_op_test)
- if params.lzo_enabled:
- all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
- Package(all_lzo_packages,
+ if params.lzo_enabled and len(params.all_lzo_packages) > 0:
+ Package(params.all_lzo_packages,
retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
retry_count=params.agent_stack_retry_count)
Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
index 70b89b7..d30a465 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
@@ -28,6 +28,7 @@ from resource_management.libraries.functions import get_port_from_url
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.get_architecture import get_architecture
@@ -369,3 +370,5 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
# The logic for LZO also exists in HDFS' params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+
+all_lzo_packages = get_lzo_packages(stack_version_unformatted)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index e32393d..2224d31 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -95,10 +95,8 @@ class TestHBaseMaster(RMFTestCase):
try_install=True,
os_type=('Redhat', '6.4', 'Final'),
checked_call_mocks = [(0, "OK.", "")],
- available_packages_in_repos = ['hbase_2_3_0_1_1234'],
)
-
# only assert that the correct package is trying to be installed
self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',
retry_count=5,
http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index ae33a2a..bff8642 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -80,8 +80,7 @@ class RMFTestCase(TestCase):
mocks_dict={},
try_install=False,
command_args=[],
- log_out_files=False,
- available_packages_in_repos = []):
+ log_out_files=False):
norm_path = os.path.normpath(path)
@@ -126,7 +125,6 @@ class RMFTestCase(TestCase):
Script.instance = None
script_class_inst = RMFTestCase._get_attr(script_module, classname)()
script_class_inst.log_out_files = log_out_files
- script_class_inst.available_packages_in_repos = available_packages_in_repos
method = RMFTestCase._get_attr(script_class_inst, command)
except IOError, err:
raise RuntimeError("Cannot load class %s from %s: %s" % (classname, norm_path, err.message))
[25/31] ambari git commit: AMBARI-22178. Install and upgrade options
are shown for current and already upgraded stack versions (ncole)
Posted by jl...@apache.org.
AMBARI-22178. Install and upgrade options are shown for current and already upgraded stack versions (ncole)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8908d3e0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8908d3e0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8908d3e0
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 8908d3e05a546cd3dea4bf84d54f087d64ce8b88
Parents: b129536
Author: Nate Cole <nc...@hortonworks.com>
Authored: Tue Oct 10 09:44:20 2017 -0400
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Tue Oct 10 10:18:06 2017 -0400
----------------------------------------------------------------------
.../upgrade/HostVersionOutOfSyncListener.java | 11 +++++++++++
.../upgrade/HostVersionOutOfSyncListenerTest.java | 14 +++++++-------
2 files changed, 18 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/8908d3e0/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
index 0be036e..4ce855d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
@@ -48,6 +48,7 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.ComponentInfo;
import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.StackId;
import org.slf4j.Logger;
@@ -115,6 +116,10 @@ public class HostVersionOutOfSyncListener {
List<HostVersionEntity> hostVersionEntities =
hostVersionDAO.get().findByClusterAndHost(cluster.getClusterName(), event.getHostName());
+ Service service = cluster.getService(event.getServiceName());
+ ServiceComponent serviceComponent = service.getServiceComponent(event.getComponentName());
+ RepositoryVersionEntity componentRepo = serviceComponent.getDesiredRepositoryVersion();
+
for (HostVersionEntity hostVersionEntity : hostVersionEntities) {
StackEntity hostStackEntity = hostVersionEntity.getRepositoryVersion().getStack();
StackId hostStackId = new StackId(hostStackEntity);
@@ -136,6 +141,12 @@ public class HostVersionOutOfSyncListener {
continue;
}
+ // !!! we shouldn't be changing other versions to OUT_OF_SYNC if the event
+ // component repository doesn't match
+ if (!hostVersionEntity.getRepositoryVersion().equals(componentRepo)) {
+ continue;
+ }
+
switch (hostVersionEntity.getState()) {
case INSTALLED:
case NOT_REQUIRED:
http://git-wip-us.apache.org/repos/asf/ambari/blob/8908d3e0/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
index 076190a..24d4f55 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
@@ -19,6 +19,7 @@
package org.apache.ambari.server.events.listeners.upgrade;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import java.sql.SQLException;
import java.util.ArrayList;
@@ -335,12 +336,12 @@ public class HostVersionOutOfSyncListenerTest {
for (HostVersionEntity hostVersionEntity : hostVersions) {
RepositoryVersionEntity repoVersion = hostVersionEntity.getRepositoryVersion();
- if (repoVersion.getVersion().equals(INSTALLED_VERSION) || repoVersion.getVersion().equals(INSTALLED_VERSION_2)) {
- if (changedHosts.contains(hostVersionEntity.getHostName())) {
- assertEquals(hostVersionEntity.getState(), RepositoryVersionState.OUT_OF_SYNC);
- } else {
- assertEquals(hostVersionEntity.getState(), RepositoryVersionState.INSTALLED);
- }
+
+ if (repoVersion.getVersion().equals(INSTALLED_VERSION_2)) {
+ assertEquals(RepositoryVersionState.INSTALLED, hostVersionEntity.getState());
+ } else if (repoVersion.getVersion().equals(INSTALLED_VERSION)) {
+ assertTrue(changedHosts.contains(hostVersionEntity.getHostName()));
+ assertEquals(RepositoryVersionState.OUT_OF_SYNC, hostVersionEntity.getState());
}
}
}
@@ -598,6 +599,5 @@ public class HostVersionOutOfSyncListenerTest {
}
}
}
-
}
}
[20/31] ambari git commit: Revert "AMBARI-22162. Move out the druid
configurations from hive-site to hive-interactive-site. (Slim Bouguerra via
Swapan Shridhar)."
Posted by jl...@apache.org.
Revert "AMBARI-22162. Move out the druid configurations from hive-site to hive-interactive-site. (Slim Bouguerra via Swapan Shridhar)."
This reverts commit ce2a0a00921bde8d780c82561902773f76431fce.
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3b8e8071
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3b8e8071
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3b8e8071
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 3b8e807100a988fafc4d55e7a0ad7644fac6cef2
Parents: 8cffd72
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Oct 9 16:59:04 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Oct 9 16:59:04 2017 -0400
----------------------------------------------------------------------
.../HIVE/0.12.0.2.0/configuration/hive-site.xml | 217 ++++++++++++++++++
.../configuration/hive-interactive-site.xml | 225 -------------------
.../stacks/HDP/2.6/services/stack_advisor.py | 12 +-
.../stacks/HDP/2.6/upgrades/config-upgrade.xml | 21 +-
4 files changed, 234 insertions(+), 241 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/3b8e8071/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
index 69d1c69..d66cf4c 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
@@ -451,6 +451,223 @@ limitations under the License.
<on-ambari-upgrade add="false"/>
</property>
+ <!-- Druid related properties -->
+ <property>
+ <name>hive.druid.broker.address.default</name>
+ <value>localhost:8082</value>
+ <description>Host name of druid router if any or broker</description>
+ <on-ambari-upgrade add="false"/>
+ <depends-on>
+ <property>
+ <type>druid-router</type>
+ <name>druid.port</name>
+ </property>
+ </depends-on>
+ </property>
+
+ <property>
+ <name>hive.druid.metadata.uri</name>
+ <value>jdbc:mysql://localhost:3355/druid</value>
+ <description>URI to connect to the database (for example jdbc:mysql://hostname:port/DBName)</description>
+ <on-ambari-upgrade add="false"/>
+ <depends-on>
+ <property>
+ <type>druid-common</type>
+ <name>druid.metadata.storage.connector.connectURI</name>
+ </property>
+ </depends-on>
+ </property>
+
+ <property>
+ <name>hive.druid.coordinator.address.default</name>
+ <value>localhost:8082</value>
+ <description>Host name of druid router if any or broker</description>
+ <on-ambari-upgrade add="false"/>
+ <depends-on>
+ <property>
+ <type>druid-coordinator</type>
+ <name>druid.port</name>
+ </property>
+ </depends-on>
+ </property>
+
+ <property>
+ <name>hive.druid.metadata.password</name>
+ <value>{{druid_metadata_password}}</value>
+ <property-type>PASSWORD</property-type>
+ <display-name>Druid Metadata Password</display-name>
+ <description>Druid meta data storage password</description>
+ <value-attributes>
+ <type>password</type>
+ <empty-value-valid>true</empty-value-valid>
+ </value-attributes>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.metadata.username</name>
+ <value>druid</value>
+ <description>Username used to connect to druid metadata storage</description>
+ <on-ambari-upgrade add="false"/>
+ <depends-on>
+ <property>
+ <type>druid-common</type>
+ <name>druid.metadata.storage.connector.user</name>
+ </property>
+ </depends-on>
+ </property>
+
+ <property>
+ <name>hive.druid.indexer.segments.granularity</name>
+ <display-name>Default Granularity for the Druid segments</display-name>
+ <value-attributes>
+ <type>value-list</type>
+ <entries>
+ <entry>
+ <value>YEAR</value>
+ </entry>
+ <entry>
+ <value>MONTH</value>
+ </entry>
+ <entry>
+ <value>WEEK</value>
+ </entry>
+ <entry>
+ <value>DAY</value>
+ </entry>
+ <entry>
+ <value>HOUR</value>
+ </entry>
+ <entry>
+ <value>MINUTE</value>
+ </entry>
+ <entry>
+ <value>SECOND</value>
+ </entry>
+ </entries>
+ </value-attributes>
+ <value>MINUTE</value>
+ <description>Default Granularity for the segments created by the Druid storage handler, this can be overridden per table using table property druid.segment.granularity </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+ <name>hive.druid.indexer.partition.size.max</name>
+ <value>5000000</value>
+ <description>Maximum number of records per segment partition</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+ <name>hive.druid.indexer.memory.rownum.max</name>
+ <value>75000</value>
+ <description>Maximum number of records in memory while storing data in Druid</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.select.distribute</name>
+ <value>true</value>
+ <description>If it is set to true, we distribute the execution of Druid Select queries</description>
+ <on-ambari-upgrade add="false"/>
+ <value-attributes>
+ <type>boolean</type>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>hive.druid.basePersistDirectory</name>
+ <value></value>
+ <description>
+ Local temporary directory used to persist intermediate indexing state,
+ if empty (recommended) will default to JVM system property java.io.tmpdir.
+ </description>
+ <value-attributes>
+ <empty-value-valid>true</empty-value-valid>
+ </value-attributes>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.storage.storageDirectory</name>
+ <value>{{druid_storage_dir}}</value>
+ <description>
+ Druid deep storage location for segments.
+ </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+ <name>hive.druid.metadata.db.type</name>
+ <display-name>Druid metadata storage type </display-name>
+ <value-attributes>
+ <overridable>false</overridable>
+ <type>value-list</type>
+ <entries>
+ <entry>
+ <value>mysql</value>
+ <label>MYSQL</label>
+ </entry>
+ <entry>
+ <value>postgresql</value>
+ <label>POSTGRESQL</label>
+ </entry>
+ </entries>
+ </value-attributes>
+ <value>mysql</value>
+ <depends-on>
+ <property>
+ <type>druid-common</type>
+ <name>druid.metadata.storage.type</name>
+ </property>
+ </depends-on>
+ <description>Druid metadata storage type</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.passiveWaitTimeMs</name>
+ <value>30000</value>
+ <description>
+ Wait time in ms default to 30 seconds.
+ </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.working.directory</name>
+ <value>/tmp/druid-indexing</value>
+ <description>
+ Default hdfs working directory used to store some intermediate metadata.
+ </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.maxTries</name>
+ <value>5</value>
+ <description>
+ Maximum number of http call retries before giving up.
+ </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.bitmap.type</name>
+ <display-name>Druid metadata storage type </display-name>
+ <value-attributes>
+ <type>value-list</type>
+ <entries>
+ <entry>
+ <value>roaring</value>
+ </entry>
+ <entry>
+ <value>concise</value>
+ </entry>
+ </entries>
+ </value-attributes>
+ <value>roaring</value>
+ <description>Druid Coding algorithm use to encode the bitmaps</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
<!-- This property is removed in HDP 2.5 and higher. -->
<property>
<name>atlas.rest.address</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/3b8e8071/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
index 64cef3e..aae2efa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
@@ -124,229 +124,4 @@ limitations under the License.
<on-ambari-upgrade add="false"/>
</property>
- <!-- Druid related properties -->
- <property>
- <name>hive.druid.broker.address.default</name>
- <value>localhost:8082</value>
- <description>Host name of druid router if any or broker</description>
- <on-ambari-upgrade add="false"/>
- <depends-on>
- <property>
- <type>druid-router</type>
- <name>druid.port</name>
- </property>
- </depends-on>
- </property>
-
- <property>
- <name>hive.druid.metadata.uri</name>
- <value>jdbc:mysql://localhost:3355/druid</value>
- <description>URI to connect to the database (for example jdbc:mysql://hostname:port/DBName)</description>
- <on-ambari-upgrade add="false"/>
- <depends-on>
- <property>
- <type>druid-common</type>
- <name>druid.metadata.storage.connector.connectURI</name>
- </property>
- </depends-on>
- </property>
-
- <property>
- <name>hive.druid.coordinator.address.default</name>
- <value>localhost:8082</value>
- <description>Host name of druid router if any or broker</description>
- <on-ambari-upgrade add="false"/>
- <depends-on>
- <property>
- <type>druid-coordinator</type>
- <name>druid.port</name>
- </property>
- </depends-on>
- </property>
-
- <property>
- <name>hive.druid.metadata.password</name>
- <value>{{druid_metadata_password}}</value>
- <property-type>PASSWORD</property-type>
- <display-name>Druid Metadata Password</display-name>
- <description>Druid meta data storage password</description>
- <value-attributes>
- <type>password</type>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.metadata.username</name>
- <value>druid</value>
- <description>Username used to connect to druid metadata storage</description>
- <on-ambari-upgrade add="false"/>
- <depends-on>
- <property>
- <type>druid-common</type>
- <name>druid.metadata.storage.connector.user</name>
- </property>
- </depends-on>
- </property>
-
- <property>
- <name>hive.druid.indexer.segments.granularity</name>
- <display-name>Default Granularity for the Druid segments</display-name>
- <value-attributes>
- <type>value-list</type>
- <entries>
- <entry>
- <value>YEAR</value>
- </entry>
- <entry>
- <value>MONTH</value>
- </entry>
- <entry>
- <value>WEEK</value>
- </entry>
- <entry>
- <value>DAY</value>
- </entry>
- <entry>
- <value>HOUR</value>
- </entry>
- <entry>
- <value>MINUTE</value>
- </entry>
- <entry>
- <value>SECOND</value>
- </entry>
- </entries>
- </value-attributes>
- <value>DAY</value>
- <description>Default Granularity for the segments created by the Druid storage handler, this can be overridden per table using table property druid.segment.granularity </description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>hive.druid.indexer.partition.size.max</name>
- <value>1000000</value>
- <description>Maximum number of records per segment partition</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>hive.druid.indexer.memory.rownum.max</name>
- <value>75000</value>
- <description>Maximum number of records in memory while storing data in Druid</description>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.select.distribute</name>
- <value>true</value>
- <description>If it is set to true, we distribute the execution of Druid Select queries</description>
- <on-ambari-upgrade add="false"/>
- <value-attributes>
- <type>boolean</type>
- </value-attributes>
- </property>
-
- <property>
- <name>hive.druid.basePersistDirectory</name>
- <value></value>
- <description>
- Local temporary directory used to persist intermediate indexing state,
- if empty (recommended) will default to JVM system property java.io.tmpdir.
- </description>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.storage.storageDirectory</name>
- <value>{{druid_storage_dir}}</value>
- <description>
- Druid deep storage location for segments.
- </description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>hive.druid.metadata.db.type</name>
- <display-name>Druid metadata storage type </display-name>
- <value-attributes>
- <overridable>false</overridable>
- <type>value-list</type>
- <entries>
- <entry>
- <value>mysql</value>
- <label>MYSQL</label>
- </entry>
- <entry>
- <value>postgresql</value>
- <label>POSTGRESQL</label>
- </entry>
- </entries>
- </value-attributes>
- <value>mysql</value>
- <depends-on>
- <property>
- <type>druid-common</type>
- <name>druid.metadata.storage.type</name>
- </property>
- </depends-on>
- <description>Druid metadata storage type</description>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.passiveWaitTimeMs</name>
- <value>30000</value>
- <description>
- Wait time in ms default to 30 seconds.
- </description>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.working.directory</name>
- <value>/tmp/druid-indexing</value>
- <description>
- Default hdfs working directory used to store some intermediate metadata.
- </description>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.maxTries</name>
- <value>5</value>
- <description>
- Maximum number of http call retries before giving up.
- </description>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.bitmap.type</name>
- <display-name>Druid metadata storage type </display-name>
- <value-attributes>
- <type>value-list</type>
- <entries>
- <entry>
- <value>roaring</value>
- </entry>
- <entry>
- <value>concise</value>
- </entry>
- </entries>
- </value-attributes>
- <value>roaring</value>
- <description>Druid Coding algorithm use to encode the bitmaps</description>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.http.read.timeout</name>
- <value>PT10M</value>
- <description>
- Maximum number of http call retries before giving up.
- </description>
- <on-ambari-upgrade add="false"/>
- </property>
</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/3b8e8071/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index b634e71..0d2925e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -605,7 +605,7 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
# druid is not in list of services to be installed
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
if 'DRUID' in servicesList:
- putHiveInteractiveSiteProperty = self.putProperty(configurations, "hive-interactive-site", services)
+ putHiveSiteProperty = self.putProperty(configurations, "hive-site", services)
if 'druid-coordinator' in services['configurations']:
component_hosts = self.getHostsWithComponent("DRUID", 'DRUID_COORDINATOR', services, hosts)
if component_hosts is not None and len(component_hosts) > 0:
@@ -642,11 +642,11 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
else:
druid_metadata_user = ""
- putHiveInteractiveSiteProperty('hive.druid.broker.address.default', druid_broker_host_port)
- putHiveInteractiveSiteProperty('hive.druid.coordinator.address.default', druid_coordinator_host_port)
- putHiveInteractiveSiteProperty('hive.druid.metadata.uri', druid_metadata_uri)
- putHiveInteractiveSiteProperty('hive.druid.metadata.username', druid_metadata_user)
- putHiveInteractiveSiteProperty('hive.druid.metadata.db.type', druid_metadata_type)
+ putHiveSiteProperty('hive.druid.broker.address.default', druid_broker_host_port)
+ putHiveSiteProperty('hive.druid.coordinator.address.default', druid_coordinator_host_port)
+ putHiveSiteProperty('hive.druid.metadata.uri', druid_metadata_uri)
+ putHiveSiteProperty('hive.druid.metadata.username', druid_metadata_user)
+ putHiveSiteProperty('hive.druid.metadata.db.type', druid_metadata_type)
def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
http://git-wip-us.apache.org/repos/asf/ambari/blob/3b8e8071/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index fd7e438..2b4c656 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -75,19 +75,11 @@
<set key ="atlas.jaas.ticketBased-KafkaClient.option.useTicketCache" value="true"
if-type="cluster-env" if-key="security_enabled" if-value="true"/>
</definition>
- </changes>
- </component>
- <component name="HIVE_SERVER_INTERACTIVE">
- <changes>
- <definition xsi:type="configure" id="llap_update_tez_shuffle_ssl_enable" summary="Update additional LLAP-Tez settings">
- <type>tez-interactive-site</type>
- <set key="tez.runtime.shuffle.ssl.enable" value="false"/>
- </definition>
<definition xsi:type="configure" id="hdp_2_6_maint_druid_config_for_hive_hook" summary="Updating druid hive related properties">
<type>hive-site</type>
<set key="hive.druid.metadata.password" value="{{druid_metadata_password}}" if-type="druid-common"/>
- <set key="hive.druid.indexer.segments.granularity" value="DAY" if-type="druid-common"/>
- <set key="hive.druid.indexer.partition.size.max" value="1000000" if-type="druid-common"/>
+ <set key="hive.druid.indexer.segments.granularity" value="MINUTE" if-type="druid-common"/>
+ <set key="hive.druid.indexer.partition.size.max" value="5000000" if-type="druid-common"/>
<set key="hive.druid.indexer.memory.rownum.max" value="75000" if-type="druid-common"/>
<set key="hive.druid.select.distribute" value="true" if-type="druid-common"/>
<set key="hive.druid.basePersistDirectory" value="" if-type="druid-common"/>
@@ -95,6 +87,15 @@
<set key="hive.druid.passiveWaitTimeMs" value="30000" if-type="druid-common"/>
<set key="hive.druid.working.directory" value="/tmp/druid-indexing" if-type="druid-common"/>
<set key="hive.druid.bitmap.type" value="roaring" if-type="druid-common"/>
+
+ </definition>
+ </changes>
+ </component>
+ <component name="HIVE_SERVER_INTERACTIVE">
+ <changes>
+ <definition xsi:type="configure" id="llap_update_tez_shuffle_ssl_enable" summary="Update additional LLAP-Tez settings">
+ <type>tez-interactive-site</type>
+ <set key="tez.runtime.shuffle.ssl.enable" value="false"/>
</definition>
</changes>
</component>
[14/31] ambari git commit: AMBARI-22155. Intermittent failure of
FlumeTimelineMetricsSinkTest
Posted by jl...@apache.org.
AMBARI-22155. Intermittent failure of FlumeTimelineMetricsSinkTest
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b0c24a51
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b0c24a51
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b0c24a51
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: b0c24a5153949e4c0cbf70d217276416515c1211
Parents: 75465a8
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Mon Oct 9 18:56:36 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Mon Oct 9 18:57:08 2017 +0200
----------------------------------------------------------------------
.../flume/FlumeTimelineMetricsSinkTest.java | 27 ++++++++++++--------
1 file changed, 17 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/b0c24a51/ambari-metrics/ambari-metrics-flume-sink/src/test/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSinkTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-flume-sink/src/test/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSinkTest.java b/ambari-metrics/ambari-metrics-flume-sink/src/test/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSinkTest.java
index bd4ae6a..99da43f 100644
--- a/ambari-metrics/ambari-metrics-flume-sink/src/test/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSinkTest.java
+++ b/ambari-metrics/ambari-metrics-flume-sink/src/test/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSinkTest.java
@@ -18,17 +18,21 @@
package org.apache.hadoop.metrics2.sink.flume;
+import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.anyString;
import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.eq;
import static org.easymock.EasyMock.expect;
import static org.powermock.api.easymock.PowerMock.mockStatic;
import static org.powermock.api.easymock.PowerMock.replay;
-import static org.powermock.api.easymock.PowerMock.replayAll;
import static org.powermock.api.easymock.PowerMock.resetAll;
import static org.powermock.api.easymock.PowerMock.verifyAll;
import java.net.InetAddress;
import java.util.Collections;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
import org.apache.flume.Context;
import org.apache.flume.instrumentation.util.JMXPollUtil;
@@ -43,7 +47,7 @@ import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
@RunWith(PowerMockRunner.class)
-@PrepareForTest(JMXPollUtil.class)
+@PrepareForTest({JMXPollUtil.class, Executors.class, FlumeTimelineMetricsSink.class})
public class FlumeTimelineMetricsSinkTest {
@Test
public void testNonNumericMetricMetricExclusion() throws InterruptedException {
@@ -76,7 +80,7 @@ public class FlumeTimelineMetricsSinkTest {
flumeTimelineMetricsSink.setMetricsCaches(Collections.singletonMap("SINK",timelineMetricsCache));
EasyMock.expect(timelineMetricsCache.getTimelineMetric("key1"))
.andReturn(new TimelineMetric()).once();
- timelineMetricsCache.putTimelineMetric(EasyMock.anyObject(TimelineMetric.class));
+ timelineMetricsCache.putTimelineMetric(anyObject(TimelineMetric.class));
EasyMock.expectLastCall().once();
return timelineMetricsCache;
}
@@ -86,15 +90,18 @@ public class FlumeTimelineMetricsSinkTest {
FlumeTimelineMetricsSink flumeTimelineMetricsSink = new FlumeTimelineMetricsSink();
TimelineMetricsCache timelineMetricsCache = getTimelineMetricsCache(flumeTimelineMetricsSink);
flumeTimelineMetricsSink.setPollFrequency(1);
- mockStatic(JMXPollUtil.class);
- EasyMock.expect(JMXPollUtil.getAllMBeans()).andReturn(
- Collections.singletonMap("component1", Collections.singletonMap("key1", "42"))).once();
- flumeTimelineMetricsSink.start();
- flumeTimelineMetricsSink.stop();
- replay(JMXPollUtil.class, timelineMetricsCache);
+ mockStatic(Executors.class);
+ ScheduledExecutorService executor = createNiceMock(ScheduledExecutorService.class);
+ expect(Executors.newSingleThreadScheduledExecutor()).andReturn(executor);
+ FlumeTimelineMetricsSink.TimelineMetricsCollector collector = anyObject();
+ TimeUnit unit = anyObject();
+ expect(executor.scheduleWithFixedDelay(collector, eq(0), eq(1), unit)).andReturn(null);
+ executor.shutdown();
+ replay(timelineMetricsCache, Executors.class, executor);
+
flumeTimelineMetricsSink.start();
- Thread.sleep(5);
flumeTimelineMetricsSink.stop();
+
verifyAll();
}
[26/31] ambari git commit: AMBARI-21406. Refresh configurations
without restart command (magyari_sandor)
Posted by jl...@apache.org.
AMBARI-21406. Refresh configurations without restart command (magyari_sandor)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/57682942
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/57682942
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/57682942
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 57682942b7368a8de3f0a76f65e45b13c1626deb
Parents: 8908d3e
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Fri Aug 25 14:08:55 2017 +0200
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Tue Oct 10 16:46:30 2017 +0200
----------------------------------------------------------------------
.../libraries/script/script.py | 23 ++-
.../AmbariCustomCommandExecutionHelper.java | 10 ++
.../AmbariManagementControllerImpl.java | 4 +-
.../ServiceComponentHostResponse.java | 15 ++
.../internal/HostComponentResourceProvider.java | 4 +
.../ambari/server/metadata/ActionMetadata.java | 1 +
.../apache/ambari/server/stack/StackModule.java | 29 +++-
.../ambari/server/state/ConfigHelper.java | 164 ++++++++++++++++++-
.../ambari/server/state/PropertyInfo.java | 29 ++++
.../ambari/server/state/RefreshCommand.java | 52 ++++++
.../state/RefreshCommandConfiguration.java | 71 ++++++++
.../apache/ambari/server/state/StackInfo.java | 10 ++
.../svccomphost/ServiceComponentHostImpl.java | 10 ++
.../HDFS/2.1.0.2.0/configuration/core-site.xml | 12 ++
.../HDFS/2.1.0.2.0/configuration/hdfs-site.xml | 3 +
.../HDFS/2.1.0.2.0/package/scripts/datanode.py | 13 +-
.../HDFS/2.1.0.2.0/package/scripts/hdfs.py | 52 +++++-
.../2.1.0.2.0/package/scripts/hdfs_client.py | 5 +
.../2.1.0.2.0/package/scripts/hdfs_namenode.py | 21 +++
.../HDFS/2.1.0.2.0/package/scripts/namenode.py | 21 ++-
.../HDFS/2.1.0.2.0/package/scripts/snamenode.py | 10 ++
.../HDFS/3.0.0.3.0/configuration/hdfs-site.xml | 6 +
.../HDFS/3.0.0.3.0/package/scripts/datanode.py | 13 +-
.../HDFS/3.0.0.3.0/package/scripts/hdfs.py | 52 +++++-
.../3.0.0.3.0/package/scripts/hdfs_client.py | 5 +
.../3.0.0.3.0/package/scripts/hdfs_namenode.py | 20 +++
.../HDFS/3.0.0.3.0/package/scripts/namenode.py | 21 ++-
.../HDFS/3.0.0.3.0/package/scripts/snamenode.py | 10 ++
.../src/main/resources/configuration-schema.xsd | 12 ++
.../src/main/resources/properties.json | 1 +
.../services/HDFS/configuration/hdfs-site.xml | 3 +
.../ambari/server/state/ConfigHelperTest.java | 76 ++++++++-
.../ambari/server/state/PropertyInfoTest.java | 20 +++
.../python/stacks/2.0.6/HDFS/test_datanode.py | 17 ++
.../python/stacks/2.0.6/HDFS/test_namenode.py | 33 ++++
.../services/HDFS/configuration/hdfs-site.xml | 8 +
36 files changed, 835 insertions(+), 21 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index bf8c0dc..12e6f98 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -1006,12 +1006,33 @@ class Script(object):
def configure(self, env, upgrade_type=None, config_dir=None):
"""
- To be overridden by subclasses
+ To be overridden by subclasses (may invoke save_configs)
:param upgrade_type: only valid during RU/EU, otherwise will be None
:param config_dir: for some clients during RU, the location to save configs to, otherwise None
"""
self.fail_with_error('configure method isn\'t implemented')
+ def save_configs(self, env):
+ """
+ To be overridden by subclasses
+ Creates / updates configuration files
+ """
+ self.fail_with_error('save_configs method isn\'t implemented')
+
+ def reconfigure(self, env):
+ """
+ Default implementation of RECONFIGURE action which may be overridden by subclasses
+ """
+ Logger.info("Refresh config files ...")
+ self.save_configs(env)
+
+ config = self.get_config()
+ if "reconfigureAction" in config["commandParams"] and config["commandParams"]["reconfigureAction"] is not None:
+ reconfigure_action = config["commandParams"]["reconfigureAction"]
+ Logger.info("Call %s" % reconfigure_action)
+ method = self.choose_method_to_execute(reconfigure_action)
+ method(env)
+
def generate_configs_get_template_file_content(self, filename, dicts):
config = self.get_config()
content = ''
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index d0dd7e0..e12477e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -89,6 +89,7 @@ import org.apache.ambari.server.state.HostState;
import org.apache.ambari.server.state.MaintenanceState;
import org.apache.ambari.server.state.PropertyInfo;
import org.apache.ambari.server.state.PropertyInfo.PropertyType;
+import org.apache.ambari.server.state.RefreshCommandConfiguration;
import org.apache.ambari.server.state.RepositoryInfo;
import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponent;
@@ -507,6 +508,15 @@ public class AmbariCustomCommandExecutionHelper {
StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
roleParams.put(COMPONENT_CATEGORY, componentInfo.getCategory());
+ // set reconfigureAction in case of a RECONFIGURE command if there are any
+ if (commandName.equals("RECONFIGURE")) {
+ String refreshConfigsCommand = configHelper.getRefreshConfigsCommand(cluster, hostName, serviceName, componentName);
+ if (refreshConfigsCommand != null && !refreshConfigsCommand.equals(RefreshCommandConfiguration.REFRESH_CONFIGS)) {
+ LOG.info("Refreshing configs for {}/{} with command: ", componentName, hostName, refreshConfigsCommand);
+ commandParams.put("reconfigureAction", refreshConfigsCommand);
+ }
+ }
+
execCmd.setCommandParams(commandParams);
execCmd.setRoleParams(roleParams);
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 5642575..8c4888c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -4795,7 +4795,9 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
properties = ambariMetaInfo.getServiceProperties(stackName, stackVersion, serviceName);
}
for (PropertyInfo property: properties) {
- response.add(property.convertToResponse());
+ if (property.shouldBeConfigured()) {
+ response.add(property.convertToResponse());
+ }
}
return response;
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
index 7b75e06..bc67117 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
@@ -40,6 +40,7 @@ public class ServiceComponentHostResponse {
private String desiredRepositoryVersion;
private String desiredState;
private boolean staleConfig = false;
+ private boolean reloadConfig = false;
private String adminState = null;
private String maintenanceState = null;
private UpgradeState upgradeState = UpgradeState.NONE;
@@ -283,6 +284,20 @@ public class ServiceComponentHostResponse {
}
/**
+ * @return true if configs are reloadable without RESTART command
+ */
+ public boolean isReloadConfig() {
+ return reloadConfig;
+ }
+
+ /**
+ * @param reloadConfig
+ */
+ public void setReloadConfig(boolean reloadConfig) {
+ this.reloadConfig = reloadConfig;
+ }
+
+ /**
* @return the maintenance state
*/
public String getMaintenanceState() {
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
index 48e15eb..6708560 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
@@ -106,6 +106,8 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
= PropertyHelper.getPropertyId("HostRoles", "actual_configs");
public static final String HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID
= PropertyHelper.getPropertyId("HostRoles", "stale_configs");
+ public static final String HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID
+ = PropertyHelper.getPropertyId("HostRoles", "reload_configs");
public static final String HOST_COMPONENT_DESIRED_ADMIN_STATE_PROPERTY_ID
= PropertyHelper.getPropertyId("HostRoles", "desired_admin_state");
public static final String HOST_COMPONENT_MAINTENANCE_STATE_PROPERTY_ID
@@ -244,6 +246,8 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
response.getActualConfigs(), requestedIds);
setResourceProperty(resource, HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID,
response.isStaleConfig(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID,
+ response.isReloadConfig(), requestedIds);
setResourceProperty(resource, HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID,
response.getUpgradeState(), requestedIds);
setResourceProperty(resource, HOST_COMPONENT_DESIRED_REPOSITORY_VERSION,
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java b/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
index e0bfdcf..33dc0e8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
@@ -65,6 +65,7 @@ public class ActionMetadata {
defaultHostComponentCommands.add("CONFIGURE");
defaultHostComponentCommands.add("CONFIGURE_FUNCTION");
defaultHostComponentCommands.add("DISABLE_SECURITY");
+ defaultHostComponentCommands.add("RECONFIGURE");
}
private void fillServiceClients() {
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index e88bbf2..520764d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -38,6 +38,7 @@ import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.ExtensionInfo;
import org.apache.ambari.server.state.PropertyDependencyInfo;
import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.RefreshCommand;
import org.apache.ambari.server.state.RepositoryInfo;
import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.StackInfo;
@@ -579,6 +580,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
}
// Read the service and available configs for this stack
populateServices();
+
if (!stackInfo.isValid()) {
setValid(false);
addErrors(stackInfo.getErrors());
@@ -627,7 +629,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
for (ServiceInfo serviceInfo : serviceInfos) {
ServiceModule serviceModule = new ServiceModule(stackContext, serviceInfo, serviceDirectory);
serviceModules.add(serviceModule);
- if (!serviceModule.isValid()){
+ if (!serviceModule.isValid()) {
stackInfo.setValid(false);
setValid(false);
stackInfo.addErrors(serviceModule.getErrors());
@@ -769,7 +771,11 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
// relationship into map. Since we do not have the reverse {@link PropertyInfo},
// we have to loop through service-configs again later.
for (ServiceModule serviceModule : serviceModules.values()) {
+
+ Map<String, Map<String, String>> componentRefreshCommandsMap = new HashMap();
+
for (PropertyInfo pi : serviceModule.getModuleInfo().getProperties()) {
+
for (PropertyDependencyInfo pdi : pi.getDependsOnProperties()) {
String type = ConfigHelper.fileNameToConfigType(pi.getFilename());
String name = pi.getName();
@@ -784,7 +790,28 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
dependedByMap.put(pdi, newDependenciesSet);
}
}
+
+ // set refresh commands
+ if (pi.getSupportedRefreshCommands() != null && pi.getSupportedRefreshCommands().size() > 0) {
+ String type = ConfigHelper.fileNameToConfigType(pi.getFilename());
+ String propertyName = type + "/" + pi.getName();
+
+ Map<String, String> refreshCommandPropertyMap = componentRefreshCommandsMap.get(propertyName);
+
+ for (RefreshCommand refreshCommand : pi.getSupportedRefreshCommands()) {
+ String componentName = refreshCommand.getComponentName();
+ if (refreshCommandPropertyMap == null) {
+ refreshCommandPropertyMap = new HashMap<>();
+ componentRefreshCommandsMap.put(propertyName, refreshCommandPropertyMap);
+ }
+ refreshCommandPropertyMap.put(componentName, refreshCommand.getCommand());
+ }
+
+ }
+
}
+
+ stackInfo.getRefreshCommandConfiguration().addRefreshCommands(componentRefreshCommandsMap);
}
// Go through all service-configs again and set their 'depended-by' if necessary.
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index bb7fcbe..eade914 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -17,12 +17,15 @@
*/
package org.apache.ambari.server.state;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
@@ -41,6 +44,7 @@ import org.apache.ambari.server.state.PropertyInfo.PropertyType;
import org.apache.ambari.server.state.configgroup.ConfigGroup;
import org.apache.ambari.server.utils.SecretReference;
import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.math.NumberUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -72,6 +76,8 @@ public class ConfigHelper {
*/
private final Cache<Integer, Boolean> staleConfigsCache;
+ private final Cache<Integer, String> refreshConfigCommandCache;
+
private static final Logger LOG =
LoggerFactory.getLogger(ConfigHelper.class);
@@ -113,6 +119,9 @@ public class ConfigHelper {
STALE_CONFIGS_CACHE_EXPIRATION_TIME = configuration.staleConfigCacheExpiration();
staleConfigsCache = CacheBuilder.newBuilder().
expireAfterWrite(STALE_CONFIGS_CACHE_EXPIRATION_TIME, TimeUnit.SECONDS).build();
+
+ refreshConfigCommandCache = CacheBuilder.newBuilder().
+ expireAfterWrite(STALE_CONFIGS_CACHE_EXPIRATION_TIME, TimeUnit.SECONDS).build();
}
/**
@@ -1302,6 +1311,8 @@ public class ConfigHelper {
StackId stackId = sch.getServiceComponent().getDesiredStackId();
+ StackInfo stackInfo = ambariMetaInfo.getStack(stackId);
+
ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
stackId.getStackVersion(), sch.getServiceName());
@@ -1316,8 +1327,10 @@ public class ConfigHelper {
// ---- merge values, determine changed keys, check stack: stale
Iterator<Entry<String, Map<String, String>>> it = desired.entrySet().iterator();
+ List<String> changedProperties = new LinkedList<>();
- while (it.hasNext() && !stale) {
+ while (it.hasNext()) {
+ boolean staleEntry = false;
Entry<String, Map<String, String>> desiredEntry = it.next();
String type = desiredEntry.getKey();
@@ -1325,29 +1338,108 @@ public class ConfigHelper {
if (!actual.containsKey(type)) {
// desired is set, but actual is not
- if (!serviceInfo.hasConfigDependency(type)) {
- stale = componentInfo != null && componentInfo.hasConfigType(type);
- } else {
- stale = true;
- }
+ staleEntry = (serviceInfo.hasConfigDependency(type) || componentInfo.hasConfigType(type));
} else {
// desired and actual both define the type
HostConfig hc = actual.get(type);
Map<String, String> actualTags = buildTags(hc);
if (!isTagChanged(tags, actualTags, hasGroupSpecificConfigsForType(cluster, sch.getHostName(), type))) {
- stale = false;
+ staleEntry = false;
} else {
- stale = serviceInfo.hasConfigDependency(type) || componentInfo.hasConfigType(type);
+ staleEntry = (serviceInfo.hasConfigDependency(type) || componentInfo.hasConfigType(type));
+ if (staleEntry) {
+ Collection<String> changedKeys = findChangedKeys(cluster, type, tags.values(), actualTags.values());
+ changedProperties.addAll(changedKeys);
+ }
}
}
+ stale = stale | staleEntry;
}
+
+ String refreshCommand = calculateRefreshCommand(stackInfo.getRefreshCommandConfiguration(), sch, changedProperties);
+
if (STALE_CONFIGS_CACHE_ENABLED) {
staleConfigsCache.put(staleHash, stale);
+ if (refreshCommand != null) {
+ refreshConfigCommandCache.put(staleHash, refreshCommand);
+ }
}
+
+ // gather all changed properties and see if we can find a common refreshConfigs command for this component
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Changed properties {} ({}) {} : COMMAND: {}", stale, sch.getServiceComponentName(), sch.getHostName(), refreshCommand);
+ for (String p : changedProperties) {
+ LOG.debug(p);
+ }
+ }
+
return stale;
}
+ public String getRefreshConfigsCommand(Cluster cluster, String hostName, String serviceName, String componentName) throws AmbariException {
+ ServiceComponent serviceComponent = cluster.getService(serviceName).getServiceComponent(componentName);
+ ServiceComponentHost sch = serviceComponent.getServiceComponentHost(hostName);
+ return getRefreshConfigsCommand(cluster, sch);
+ }
+
+ public String getRefreshConfigsCommand(Cluster cluster, ServiceComponentHost sch) throws AmbariException {
+ String refreshCommand = null;
+
+ Map<String, HostConfig> actual = sch.getActualConfigs();
+ if (STALE_CONFIGS_CACHE_ENABLED) {
+ Map<String, Map<String, String>> desired = getEffectiveDesiredTags(cluster, sch.getHostName(),
+ cluster.getDesiredConfigs());
+ int staleHash = Objects.hashCode(actual.hashCode(),
+ desired.hashCode(),
+ sch.getHostName(),
+ sch.getServiceComponentName(),
+ sch.getServiceName());
+ refreshCommand = refreshConfigCommandCache.getIfPresent(staleHash);
+ }
+ return refreshCommand;
+ }
+
+
+ /**
+ * Calculates refresh command for a set of changed properties as follows:
+ * - if a property has no refresh command return null
+ * - in case of multiple refresh commands: as REFRESH_CONFIGS is executed by default in case of any other command as well,
+ * can be overriden by RELOAD_CONFIGS or any other custom command, however in case of any other different commands return null
+ * as it's not possible to refresh all properties with one command.
+ *
+ * examples:
+ * {REFRESH_CONFIGS, REFRESH_CONFIGS, RELOAD_CONFIGS} ==> RELOAD_CONFIGS
+ * {REFRESH_CONFIGS, RELOADPROXYUSERS, RELOAD_CONFIGS} ==> null
+ *
+ * @param refreshCommandConfiguration
+ * @param sch
+ * @param changedProperties
+ * @return
+ */
+ private String calculateRefreshCommand(RefreshCommandConfiguration refreshCommandConfiguration,
+ ServiceComponentHost sch, List<String> changedProperties) {
+
+ String finalRefreshCommand = null;
+ for (String propertyName : changedProperties) {
+ String refreshCommand = refreshCommandConfiguration.getRefreshCommandForComponent(sch, propertyName);
+ if (refreshCommand == null) {
+ return null;
+ }
+ if (finalRefreshCommand == null) {
+ finalRefreshCommand = refreshCommand;
+ }
+ if (!finalRefreshCommand.equals(refreshCommand)) {
+ if (finalRefreshCommand.equals(RefreshCommandConfiguration.REFRESH_CONFIGS)) {
+ finalRefreshCommand = refreshCommand;
+ } else if (!refreshCommand.equals(RefreshCommandConfiguration.REFRESH_CONFIGS)) {
+ return null;
+ }
+ }
+ }
+ return finalRefreshCommand;
+ }
+
/**
* Determines if the hostname has group specific configs for the type specified
*
@@ -1374,6 +1466,62 @@ public class ConfigHelper {
}
/**
+ * @return the keys that have changed values
+ */
+ private Collection<String> findChangedKeys(Cluster cluster, String type,
+ Collection<String> desiredTags, Collection<String> actualTags) {
+
+ Map<String, String> desiredValues = new HashMap<>();
+ Map<String, String> actualValues = new HashMap<>();
+
+ for (String tag : desiredTags) {
+ Config config = cluster.getConfig(type, tag);
+ if (null != config) {
+ desiredValues.putAll(config.getProperties());
+ }
+ }
+
+ for (String tag : actualTags) {
+ Config config = cluster.getConfig(type, tag);
+ if (null != config) {
+ actualValues.putAll(config.getProperties());
+ }
+ }
+
+ List<String> keys = new ArrayList<>();
+
+ for (Entry<String, String> entry : desiredValues.entrySet()) {
+ String key = entry.getKey();
+ String value = entry.getValue();
+
+ if (!actualValues.containsKey(key) || !valuesAreEqual(actualValues.get(key), value)) {
+ keys.add(type + "/" + key);
+ }
+ }
+
+ return keys;
+ }
+
+ /**
+ * Compares values as double in case they are numbers.
+ * @param actualValue
+ * @param newValue
+ * @return
+ */
+ private boolean valuesAreEqual(String actualValue, String newValue) {
+ boolean actualValueIsNumber = NumberUtils.isNumber(actualValue);
+ boolean newValueIsNumber = NumberUtils.isNumber(newValue);
+ if (actualValueIsNumber && newValueIsNumber) {
+ Double ab = Double.parseDouble(actualValue);
+ Double bb = Double.parseDouble(newValue);
+ return ab.equals(bb);
+ } else if (!actualValueIsNumber && !newValueIsNumber) {
+ return actualValue.equals(newValue);
+ }
+ return false;
+ }
+
+ /**
* @return the map of tags for a desired config
*/
private Map<String, String> buildTags(HostConfig hc) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
index 63c850e..31fcb9d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
@@ -90,6 +90,11 @@ public class PropertyInfo {
private Set<PropertyDependencyInfo> usedByProperties =
new HashSet<>();
+ @XmlElementWrapper(name="supported-refresh-commands")
+ @XmlElement(name="refresh-command")
+ private Set<RefreshCommand> supportedRefreshCommands = new HashSet<>();
+
+
//This method is called after all the properties (except IDREF) are unmarshalled for this object,
//but before this object is set to the parent object.
void afterUnmarshal(Unmarshaller unmarshaller, Object parent) {
@@ -209,6 +214,30 @@ public class PropertyInfo {
this.requireInput = requireInput;
}
+ public List<Element> getPropertyAttributes() {
+ return propertyAttributes;
+ }
+
+ public void setPropertyAttributes(List<Element> propertyAttributes) {
+ this.propertyAttributes = propertyAttributes;
+ }
+
+ public Set<RefreshCommand> getSupportedRefreshCommands() {
+ return supportedRefreshCommands;
+ }
+
+ public void setSupportedRefreshCommands(Set<RefreshCommand> supportedRefreshCommands) {
+ this.supportedRefreshCommands = supportedRefreshCommands;
+ }
+
+ /**
+ * Willcard properties should not be included to stack configurations.
+ * @return
+ */
+ public boolean shouldBeConfigured() {
+ return !getName().contains("*");
+ }
+
@Override
public int hashCode() {
final int prime = 31;
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommand.java
new file mode 100644
index 0000000..e09a875
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommand.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state;
+
+import javax.xml.bind.annotation.XmlAttribute;
+
+/**
+ * Represents a RefreshCommand defined for a component and a property.
+ */
+public class RefreshCommand {
+
+ @XmlAttribute(name="componentName", required = true)
+ private String componentName;
+
+ /**
+ * Default command is reload_configs.
+ */
+ @XmlAttribute(name="command", required = false)
+ private String command = RefreshCommandConfiguration.RELOAD_CONFIGS;
+
+ public RefreshCommand() {
+ }
+
+ public RefreshCommand(String componentName, String command) {
+ this.componentName = componentName;
+ this.command = command;
+ }
+
+ public String getComponentName() {
+ return componentName;
+ }
+
+ public String getCommand() {
+ return command;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommandConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommandConfiguration.java b/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommandConfiguration.java
new file mode 100644
index 0000000..5999c6c
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommandConfiguration.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class RefreshCommandConfiguration {
+
+ public static final String RELOAD_CONFIGS = "reload_configs";
+ public static final String REFRESH_CONFIGS = "refresh_configs";
+
+ private Map<String, Map<String, String>> propertyComponentCommandMap;
+
+ public RefreshCommandConfiguration() {
+ }
+
+ private String findKey(String propertyName) {
+ for (String keyName : propertyComponentCommandMap.keySet()) {
+ if (propertyName.startsWith(keyName)) {
+ return keyName;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * If no command is defined for a component then the default command will be REFRESH_CONFIGS in case of a client component or
+ * if there's only one command defined for an another component. This is because if RELOAD_CONFIGS is defined for NAMENODE then
+ * presumably other dependent components will need just a refresh.
+ */
+ public String getRefreshCommandForComponent(ServiceComponentHost sch, String propertyName) {
+ if (sch.isClientComponent()) {
+ return REFRESH_CONFIGS;
+ }
+ String keyName = findKey(propertyName);
+ Map<String, String> componentCommandMap = propertyComponentCommandMap.get(keyName);
+ if (componentCommandMap != null) {
+ String commandForComponent = componentCommandMap.get(sch.getServiceComponentName());
+ if (commandForComponent != null) {
+ return commandForComponent;
+ } else if(componentCommandMap.size() == 1) {
+ return REFRESH_CONFIGS;
+ }
+ }
+ return null;
+ }
+
+ public void addRefreshCommands(Map<String, Map<String, String>> refreshCommands) {
+ if (propertyComponentCommandMap == null) {
+ propertyComponentCommandMap = new HashMap();
+ }
+ propertyComponentCommandMap.putAll(refreshCommands);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index c32e907..70d5926 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -90,6 +90,8 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
* */
private List<String> servicesWithNoConfigs = new ArrayList<>();
+ private RefreshCommandConfiguration refreshCommandConfiguration = new RefreshCommandConfiguration();
+
public String getMinJdk() {
return minJdk;
}
@@ -604,4 +606,12 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
public VersionDefinitionXml getLatestVersionDefinition() {
return latestVersion;
}
+
+ public RefreshCommandConfiguration getRefreshCommandConfiguration() {
+ return refreshCommandConfiguration;
+ }
+
+ public void setRefreshCommandConfiguration(RefreshCommandConfiguration refreshCommandConfiguration) {
+ this.refreshCommandConfiguration = refreshCommandConfiguration;
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index f490ff0..3b8f6da 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -1181,6 +1181,16 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
LOG.error("Could not determine stale config", e);
}
+ try {
+ Cluster cluster = clusters.getCluster(clusterName);
+ ServiceComponent serviceComponent = cluster.getService(serviceName).getServiceComponent(serviceComponentName);
+ ServiceComponentHost sch = serviceComponent.getServiceComponentHost(hostName);
+ String refreshConfigsCommand = helper.getRefreshConfigsCommand(cluster,sch);
+ r.setReloadConfig(refreshConfigsCommand != null);
+ } catch (Exception e) {
+ LOG.error("Could not determine reload config flag", e);
+ }
+
return r;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
index 5c6f043..d39ea78 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
@@ -185,4 +185,16 @@ DEFAULT
</description>
<on-ambari-upgrade add="true"/>
</property>
+ <property>
+ <name>hadoop.proxyuser.*</name>
+ <value/>
+ <description>
+ This * property is not configured it's used just to define refresh commands for all properties
+ prefixed with hadoop.proxyuser.
+ </description>
+ <supported-refresh-commands>
+ <refresh-command componentName="NAMENODE" command="reloadproxyusers" />
+ </supported-refresh-commands>
+ <on-ambari-upgrade add="false"/>
+ </property>
</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
index 7fdc227..d97a52e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
@@ -184,6 +184,9 @@
<value>3</value>
<description>Determines datanode heartbeat interval in seconds.</description>
<on-ambari-upgrade add="true"/>
+ <supported-refresh-commands>
+ <refresh-command componentName="NAMENODE" command="reload_configs" />
+ </supported-refresh-commands>
</property>
<property>
<name>dfs.namenode.safemode.threshold-pct</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
index 0aa0bc0..c0abb15 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
@@ -31,7 +31,7 @@ from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
from resource_management.core.logger import Logger
-from hdfs import hdfs
+from hdfs import hdfs, reconfig
from ambari_commons.os_family_impl import OsFamilyImpl
from ambari_commons import OSConst
from utils import get_hdfs_binary
@@ -57,6 +57,17 @@ class DataNode(Script):
hdfs("datanode")
datanode(action="configure")
+ def save_configs(self, env):
+ import params
+ env.set_params(params)
+ hdfs("datanode")
+
+ def reload_configs(self, env):
+ import params
+ env.set_params(params)
+ Logger.info("RELOAD CONFIGS")
+ reconfig("datanode", params.dfs_dn_ipc_address)
+
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
index 07c7616..4022986 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -20,12 +20,16 @@ Ambari Agent
"""
from resource_management.libraries.script.script import Script
-from resource_management.core.resources.system import Directory, File, Link
+from resource_management.core.resources.system import Execute, Directory, File, Link
from resource_management.core.resources import Package
from resource_management.core.source import Template
from resource_management.core.resources.service import ServiceConfig
from resource_management.libraries.resources.xml_config import XmlConfig
+
from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.format import format
import os
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@@ -157,6 +161,52 @@ def install_snappy():
to=params.so_src_x64,
)
+class ConfigStatusParser():
+ def __init__(self):
+ self.reconfig_successful = False
+
+ def handle_new_line(self, line, is_stderr):
+ if is_stderr:
+ return
+
+ if line.startswith('SUCCESS: Changed property'):
+ self.reconfig_successful = True
+
+ Logger.info('[reconfig] %s' % (line))
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def reconfig(componentName, componentAddress):
+ import params
+
+ if params.security_enabled:
+ Execute(params.nn_kinit_cmd,
+ user=params.hdfs_user
+ )
+
+ nn_reconfig_cmd = format('hdfs --config {hadoop_conf_dir} dfsadmin -reconfig {componentName} {componentAddress} start')
+
+ Execute (nn_reconfig_cmd,
+ user=params.hdfs_user,
+ logoutput=True,
+ path=params.hadoop_bin_dir
+ )
+
+ nn_reconfig_cmd = format('hdfs --config {hadoop_conf_dir} dfsadmin -reconfig {componentName} {componentAddress} status')
+ config_status_parser = ConfigStatusParser()
+ Execute (nn_reconfig_cmd,
+ user=params.hdfs_user,
+ logoutput=False,
+ path=params.hadoop_bin_dir,
+ on_new_line=config_status_parser.handle_new_line
+ )
+
+
+ if not config_status_parser.reconfig_successful:
+ Logger.info('Reconfiguration failed')
+ raise Fail('Reconfiguration failed!')
+
+ Logger.info('Reconfiguration successfully completed.')
+
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hdfs(component=None):
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
index 0896f30..f2e96c3 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
@@ -42,6 +42,11 @@ class HdfsClient(Script):
env.set_params(params)
hdfs()
+ def save_configs(self, env):
+ import params
+ env.set_params(params)
+ hdfs()
+
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index cac6e9c..2224f72 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -430,6 +430,27 @@ def is_namenode_formatted(params):
return False
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def refreshProxyUsers():
+ import params
+
+ if params.security_enabled:
+ Execute(params.nn_kinit_cmd,
+ user=params.hdfs_user
+ )
+
+ if params.dfs_ha_enabled:
+ # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+ # need to execute each command scoped to a particular namenode
+ nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshSuperUserGroupsConfiguration')
+ else:
+ nn_refresh_cmd = format('dfsadmin -fs {namenode_address} -refreshSuperUserGroupsConfiguration')
+ ExecuteHadoop(nn_refresh_cmd,
+ user=params.hdfs_user,
+ conf_dir=params.hadoop_conf_dir,
+ bin_dir=params.hadoop_bin_dir)
+
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def decommission():
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 50bf1e0..291da05 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -46,8 +46,8 @@ from ambari_commons import OSConst
import namenode_upgrade
-from hdfs_namenode import namenode, wait_for_safemode_off
-from hdfs import hdfs
+from hdfs_namenode import namenode, wait_for_safemode_off, refreshProxyUsers
+from hdfs import hdfs, reconfig
import hdfs_rebalance
from utils import initiate_safe_zkfc_failover, get_hdfs_binary, get_dfsadmin_base_command
@@ -86,6 +86,23 @@ class NameNode(Script):
hdfs_binary = self.get_hdfs_binary()
namenode(action="configure", hdfs_binary=hdfs_binary, env=env)
+ def save_configs(self, env):
+ import params
+ env.set_params(params)
+ hdfs()
+
+ def reload_configs(self, env):
+ import params
+ env.set_params(params)
+ Logger.info("RELOAD CONFIGS")
+ reconfig("namenode", params.namenode_address)
+
+ def reloadproxyusers(self, env):
+ import params
+ env.set_params(params)
+ Logger.info("RELOAD HDFS PROXY USERS")
+ refreshProxyUsers()
+
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
index 4977e1c..3d387b4 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
@@ -44,6 +44,16 @@ class SNameNode(Script):
hdfs("secondarynamenode")
snamenode(action="configure")
+ def save_configs(self, env):
+ import params
+ env.set_params(params)
+ hdfs("secondarynamenode")
+
+ def reload_configs(self, env):
+ import params
+ env.set_params(params)
+ Logger.info("RELOAD CONFIGS")
+
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
index 5c28527..940f87c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
@@ -181,6 +181,9 @@
<value>3</value>
<description>Determines datanode heartbeat interval in seconds.</description>
<on-ambari-upgrade add="false"/>
+ <supported-refresh-commands>
+ <refresh-command componentName="NAMENODE" command="reload_configs" />
+ </supported-refresh-commands>
</property>
<property>
<name>dfs.namenode.safemode.threshold-pct</name>
@@ -637,5 +640,8 @@
<name>hadoop.caller.context.enabled</name>
<value>true</value>
<on-ambari-upgrade add="false"/>
+ <supported-refresh-commands>
+ <refresh-command componentName="NAMENODE" command="reload_configs" />
+ </supported-refresh-commands>
</property>
</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
index d8fb361..a843374 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
@@ -25,7 +25,7 @@ from resource_management.libraries.functions.stack_features import check_stack_f
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
from resource_management.core.logger import Logger
-from hdfs import hdfs
+from hdfs import hdfs, reconfig
from ambari_commons.os_family_impl import OsFamilyImpl
from ambari_commons import OSConst
from utils import get_hdfs_binary
@@ -50,6 +50,17 @@ class DataNode(Script):
hdfs("datanode")
datanode(action="configure")
+ def save_configs(self, env):
+ import params
+ env.set_params(params)
+ hdfs("datanode")
+
+ def reload_configs(self, env):
+ import params
+ env.set_params(params)
+ Logger.info("RELOAD CONFIGS")
+ reconfig("datanode", params.dfs_dn_ipc_address)
+
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
index 07c7616..4022986 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
@@ -20,12 +20,16 @@ Ambari Agent
"""
from resource_management.libraries.script.script import Script
-from resource_management.core.resources.system import Directory, File, Link
+from resource_management.core.resources.system import Execute, Directory, File, Link
from resource_management.core.resources import Package
from resource_management.core.source import Template
from resource_management.core.resources.service import ServiceConfig
from resource_management.libraries.resources.xml_config import XmlConfig
+
from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.format import format
import os
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@@ -157,6 +161,52 @@ def install_snappy():
to=params.so_src_x64,
)
+class ConfigStatusParser():
+ def __init__(self):
+ self.reconfig_successful = False
+
+ def handle_new_line(self, line, is_stderr):
+ if is_stderr:
+ return
+
+ if line.startswith('SUCCESS: Changed property'):
+ self.reconfig_successful = True
+
+ Logger.info('[reconfig] %s' % (line))
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def reconfig(componentName, componentAddress):
+ import params
+
+ if params.security_enabled:
+ Execute(params.nn_kinit_cmd,
+ user=params.hdfs_user
+ )
+
+ nn_reconfig_cmd = format('hdfs --config {hadoop_conf_dir} dfsadmin -reconfig {componentName} {componentAddress} start')
+
+ Execute (nn_reconfig_cmd,
+ user=params.hdfs_user,
+ logoutput=True,
+ path=params.hadoop_bin_dir
+ )
+
+ nn_reconfig_cmd = format('hdfs --config {hadoop_conf_dir} dfsadmin -reconfig {componentName} {componentAddress} status')
+ config_status_parser = ConfigStatusParser()
+ Execute (nn_reconfig_cmd,
+ user=params.hdfs_user,
+ logoutput=False,
+ path=params.hadoop_bin_dir,
+ on_new_line=config_status_parser.handle_new_line
+ )
+
+
+ if not config_status_parser.reconfig_successful:
+ Logger.info('Reconfiguration failed')
+ raise Fail('Reconfiguration failed!')
+
+ Logger.info('Reconfiguration successfully completed.')
+
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hdfs(component=None):
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
index 0896f30..f2e96c3 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
@@ -42,6 +42,11 @@ class HdfsClient(Script):
env.set_params(params)
hdfs()
+ def save_configs(self, env):
+ import params
+ env.set_params(params)
+ hdfs()
+
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
index 5a1f368..94cd66c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
@@ -460,6 +460,26 @@ def decommission():
conf_dir=conf_dir,
bin_dir=params.hadoop_bin_dir)
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def refreshProxyUsers():
+ import params
+
+ if params.security_enabled:
+ Execute(params.nn_kinit_cmd,
+ user=params.hdfs_user
+ )
+
+ if params.dfs_ha_enabled:
+ # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+ # need to execute each command scoped to a particular namenode
+ nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshSuperUserGroupsConfiguration')
+ else:
+ nn_refresh_cmd = format('dfsadmin -fs {namenode_address} -refreshSuperUserGroupsConfiguration')
+ ExecuteHadoop(nn_refresh_cmd,
+ user=params.hdfs_user,
+ conf_dir=params.hadoop_conf_dir,
+ bin_dir=params.hadoop_bin_dir)
+
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def decommission():
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
index 7a0e784..ffdafb8 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
@@ -46,8 +46,8 @@ from ambari_commons import OSConst
import namenode_upgrade
-from hdfs_namenode import namenode, wait_for_safemode_off
-from hdfs import hdfs
+from hdfs_namenode import namenode, wait_for_safemode_off, refreshProxyUsers
+from hdfs import hdfs, reconfig
import hdfs_rebalance
from utils import initiate_safe_zkfc_failover, get_hdfs_binary, get_dfsadmin_base_command
@@ -86,6 +86,23 @@ class NameNode(Script):
hdfs_binary = self.get_hdfs_binary()
namenode(action="configure", hdfs_binary=hdfs_binary, env=env)
+ def save_configs(self, env):
+ import params
+ env.set_params(params)
+ hdfs()
+
+ def reload_configs(self, env):
+ import params
+ env.set_params(params)
+ Logger.info("RELOAD CONFIGS")
+ reconfig("namenode", params.namenode_address)
+
+ def reloadproxyusers(self, env):
+ import params
+ env.set_params(params)
+ Logger.info("RELOAD HDFS PROXY USERS")
+ refreshProxyUsers()
+
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
index f5ff3e1..b0ed533 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
@@ -44,6 +44,16 @@ class SNameNode(Script):
hdfs("secondarynamenode")
snamenode(action="configure")
+ def save_configs(self, env):
+ import params
+ env.set_params(params)
+ hdfs("secondarynamenode")
+
+ def reload_configs(self, env):
+ import params
+ env.set_params(params)
+ Logger.info("RELOAD CONFIGS")
+
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/configuration-schema.xsd
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/configuration-schema.xsd b/ambari-server/src/main/resources/configuration-schema.xsd
index 9350984..12b0217 100644
--- a/ambari-server/src/main/resources/configuration-schema.xsd
+++ b/ambari-server/src/main/resources/configuration-schema.xsd
@@ -41,6 +41,13 @@
<xs:element name="deleted" type="xs:boolean" minOccurs="0"/>
<xs:element name="final" type="xs:boolean" minOccurs="0"/>
<xs:element name="on-ambari-upgrade" type="propertyUpgradeBehavior" minOccurs="1"/>
+ <xs:element name="supported-refresh-commands" minOccurs="0">
+ <xs:complexType>
+ <xs:sequence>
+ <xs:element name="refresh-command" type="refreshCommands" minOccurs="1" maxOccurs="unbounded"/>
+ </xs:sequence>
+ </xs:complexType>
+ </xs:element>
<xs:element name="on-stack-upgrade" type="propertyStackUpgradeBehavior" minOccurs="0"/>
<xs:element name="property-type" minOccurs="0">
<xs:simpleType>
@@ -84,6 +91,11 @@
<xs:attribute name="merge" type="xs:boolean" use="optional" default="true"/>
</xs:complexType>
+ <xs:complexType name="refreshCommands">
+ <xs:attribute name="componentName" type="xs:string" use="required"/>
+ <xs:attribute name="command" type="xs:string" use="optional"/>
+ </xs:complexType>
+
<xs:complexType name="valueAttributesInfo">
<xs:all>
<xs:element name="type" type="xs:string" minOccurs="0"/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/properties.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/properties.json b/ambari-server/src/main/resources/properties.json
index e42864f..1d12f83 100644
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@ -53,6 +53,7 @@
"HostRoles/actual_configs",
"params/run_smoke_test",
"HostRoles/stale_configs",
+ "HostRoles/reload_configs",
"HostRoles/desired_admin_state",
"HostRoles/maintenance_state",
"HostRoles/service_name",
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
index 86aa3ec..14fcf6a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
@@ -76,5 +76,8 @@
<name>hadoop.caller.context.enabled</name>
<value>true</value>
<on-ambari-upgrade add="false"/>
+ <supported-refresh-commands>
+ <refresh-command componentName="NAMENODE" command="reload_configs" />
+ </supported-refresh-commands>
</property>
</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index 38a38cc..8a0a782 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -157,6 +157,7 @@ public class ConfigHelperTest {
cluster.addService("FLUME", repositoryVersion);
cluster.addService("OOZIE", repositoryVersion);
+ cluster.addService("HDFS", repositoryVersion);
final ClusterRequest clusterRequest2 =
new ClusterRequest(cluster.getClusterId(), clusterName,
@@ -229,6 +230,45 @@ public class ConfigHelperTest {
managementController.updateClusters(new HashSet<ClusterRequest>() {{
add(clusterRequest5);
}}, null);
+
+ // hdfs-site/hadoop.caller.context.enabled
+ ConfigurationRequest cr6 = new ConfigurationRequest();
+ cr6.setClusterName(clusterName);
+ cr6.setType("hdfs-site");
+ cr6.setVersionTag("version1");
+ cr6.setProperties(new HashMap<String, String>() {{
+ put("hadoop.caller.context.enabled", "true");
+ }});
+ cr6.setPropertiesAttributes(null);
+
+ final ClusterRequest clusterRequest6 =
+ new ClusterRequest(cluster.getClusterId(), clusterName,
+ cluster.getDesiredStackVersion().getStackVersion(), null);
+
+ clusterRequest6.setDesiredConfig(Collections.singletonList(cr6));
+ managementController.updateClusters(new HashSet<ClusterRequest>() {{
+ add(clusterRequest6);
+ }}, null);
+
+ // hdfs-site/hadoop.caller.context.enabled
+ ConfigurationRequest cr7 = new ConfigurationRequest();
+ cr7.setClusterName(clusterName);
+ cr7.setType("hdfs-site");
+ cr7.setVersionTag("version2");
+ cr7.setProperties(new HashMap<String, String>() {{
+ put("hadoop.caller.context.enabled", "false");
+ }});
+ cr7.setPropertiesAttributes(null);
+
+ final ClusterRequest clusterRequest7 =
+ new ClusterRequest(cluster.getClusterId(), clusterName,
+ cluster.getDesiredStackVersion().getStackVersion(), null);
+
+ clusterRequest7.setDesiredConfig(Collections.singletonList(cr7));
+ managementController.updateClusters(new HashSet<ClusterRequest>() {{
+ add(clusterRequest7);
+ }}, null);
+
}
@After
@@ -545,7 +585,7 @@ public class ConfigHelperTest {
configHelper.getEffectiveDesiredTags(cluster, "h3"));
Assert.assertNotNull(effectiveAttributes);
- Assert.assertEquals(7, effectiveAttributes.size());
+ Assert.assertEquals(8, effectiveAttributes.size());
Assert.assertTrue(effectiveAttributes.containsKey("global3"));
Map<String, Map<String, String>> globalAttrs = effectiveAttributes.get("global3");
@@ -991,7 +1031,39 @@ public class ConfigHelperTest {
Assert.assertTrue(configHelper.isStaleConfigs(sch, null));
verify(sch);
- }
+ }
+
+ @Test
+ public void testCalculateRefreshCommands() throws Exception {
+
+ Map<String, HostConfig> schReturn = new HashMap<>();
+ HostConfig hc = new HostConfig();
+ // Put a different version to check for change
+ hc.setDefaultVersionTag("version1");
+ schReturn.put("hdfs-site", hc);
+
+ ServiceComponent sc = createNiceMock(ServiceComponent.class);
+
+ // set up mocks
+ ServiceComponentHost sch = createNiceMock(ServiceComponentHost.class);
+ expect(sc.getDesiredStackId()).andReturn(cluster.getDesiredStackVersion()).anyTimes();
+
+ // set up expectations
+ expect(sch.getActualConfigs()).andReturn(schReturn).anyTimes();
+ expect(sch.getHostName()).andReturn("h1").anyTimes();
+ expect(sch.getClusterId()).andReturn(cluster.getClusterId()).anyTimes();
+ expect(sch.getServiceName()).andReturn("HDFS").anyTimes();
+ expect(sch.getServiceComponentName()).andReturn("NAMENODE").anyTimes();
+ expect(sch.getServiceComponent()).andReturn(sc).anyTimes();
+
+ replay(sc, sch);
+
+ Assert.assertTrue(configHelper.isStaleConfigs(sch, null));
+ String refreshConfigsCommand = configHelper.getRefreshConfigsCommand(cluster, sch);
+ Assert.assertEquals("reload_configs", refreshConfigsCommand);
+ verify(sch);
+ }
+
}
public static class RunWithCustomModule {
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
index 7a94ebf..4a04d0b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
@@ -137,6 +137,26 @@ public class PropertyInfoTest {
}
@Test
+ public void testBehaviorWithSupportedRefreshCommandsTags() throws JAXBException {
+ // given
+ String xml =
+ "<property>\n" +
+ " <name>prop_name</name>\n" +
+ " <value>prop_val</value>\n" +
+ " <supported-refresh-commands>\n" +
+ " <refresh-command componentName=\"NAMENODE\" command=\"reload_configs\" />\n" +
+ " </supported-refresh-commands>\n" +
+ "</property>";
+
+ // when
+ PropertyInfo propertyInfo = propertyInfoFrom(xml);
+
+ // then
+ assertEquals(propertyInfo.getSupportedRefreshCommands().iterator().next().getCommand(), "reload_configs");
+ assertEquals(propertyInfo.getSupportedRefreshCommands().iterator().next().getComponentName(), "NAMENODE");
+ }
+
+ @Test
public void testUnknownPropertyType() throws Exception {
// Given
String xml =
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index b1a4154..ef59e84 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -666,3 +666,20 @@ class TestDatanode(RMFTestCase):
self.assertEquals(
('hdfs dfsadmin -fs hdfs://ns1 -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010'),
mocks_dict['checked_call'].call_args_list[0][0][0])
+
+ def test_reload_configs(self):
+ with self.assertRaises(Fail):
+ self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
+ classname = "DataNode",
+ command = "reload_configs",
+ config_file = "default.json",
+ stack_version = self.STACK_VERSION,
+ target = RMFTestCase.TARGET_COMMON_SERVICES
+ )
+
+ # self.assertResourceCalled('Execute', "hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -reconfig namenode c6401.ambari.apache.org:8020 start",
+ # tries=115,
+ # try_sleep=10,
+ # user="hdfs",
+ # logoutput=True
+ # )
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index 06e12f6..4e1124a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1745,6 +1745,39 @@ class TestNamenode(RMFTestCase):
get_namenode_states_mock.return_value = active_namenodes, standby_namenodes, unknown_namenodes
self.assertFalse(is_this_namenode_active())
+ def test_reloadproxyusers(self):
+ self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
+ classname = "NameNode",
+ command = "reloadproxyusers",
+ config_file = "default.json",
+ stack_version = self.STACK_VERSION,
+ target = RMFTestCase.TARGET_COMMON_SERVICES
+ )
+
+ self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -refreshSuperUserGroupsConfiguration',
+ user = 'hdfs',
+ conf_dir = '/etc/hadoop/conf',
+ bin_dir = '/usr/bin')
+ self.assertNoMoreResources()
+
+ def test_reload_configs(self):
+ with self.assertRaises(Fail):
+ self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
+ classname = "NameNode",
+ command = "reload_configs",
+ config_file = "default.json",
+ stack_version = self.STACK_VERSION,
+ target = RMFTestCase.TARGET_COMMON_SERVICES
+ )
+
+ # self.assertResourceCalled('Execute', "hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -reconfig namenode c6401.ambari.apache.org:8020 start",
+ # tries=115,
+ # try_sleep=10,
+ # user="hdfs",
+ # logoutput=True
+ # )
+
+
class Popen_Mock:
return_value = 1
http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
index 28657eb..9e52a33 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
@@ -444,4 +444,12 @@ don't exist, they will be created with this permission.</description>
</description>
<on-ambari-upgrade add="true"/>
</property>
+ <property>
+ <name>hadoop.caller.context.enabled</name>
+ <value>true</value>
+ <on-ambari-upgrade add="false"/>
+ <supported-refresh-commands>
+ <refresh-command componentName="NAMENODE" command="reload_configs" />
+ </supported-refresh-commands>
+ </property>
</configuration>
[02/31] ambari git commit: AMBARI-22162. Move out the druid
configurations from hive-site to hive-interactive-site. (Slim Bouguerra via
Swapan Shridhar).
Posted by jl...@apache.org.
AMBARI-22162. Move out the druid configurations from hive-site to hive-interactive-site. (Slim Bouguerra via Swapan Shridhar).
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ce2a0a00
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ce2a0a00
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ce2a0a00
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: ce2a0a00921bde8d780c82561902773f76431fce
Parents: 84e616d
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Mon Oct 9 02:41:52 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Mon Oct 9 02:41:52 2017 -0700
----------------------------------------------------------------------
.../HIVE/0.12.0.2.0/configuration/hive-site.xml | 217 ------------------
.../configuration/hive-interactive-site.xml | 225 +++++++++++++++++++
.../stacks/HDP/2.6/services/stack_advisor.py | 12 +-
.../stacks/HDP/2.6/upgrades/config-upgrade.xml | 21 +-
4 files changed, 241 insertions(+), 234 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/ce2a0a00/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
index d66cf4c..69d1c69 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
@@ -451,223 +451,6 @@ limitations under the License.
<on-ambari-upgrade add="false"/>
</property>
- <!-- Druid related properties -->
- <property>
- <name>hive.druid.broker.address.default</name>
- <value>localhost:8082</value>
- <description>Host name of druid router if any or broker</description>
- <on-ambari-upgrade add="false"/>
- <depends-on>
- <property>
- <type>druid-router</type>
- <name>druid.port</name>
- </property>
- </depends-on>
- </property>
-
- <property>
- <name>hive.druid.metadata.uri</name>
- <value>jdbc:mysql://localhost:3355/druid</value>
- <description>URI to connect to the database (for example jdbc:mysql://hostname:port/DBName)</description>
- <on-ambari-upgrade add="false"/>
- <depends-on>
- <property>
- <type>druid-common</type>
- <name>druid.metadata.storage.connector.connectURI</name>
- </property>
- </depends-on>
- </property>
-
- <property>
- <name>hive.druid.coordinator.address.default</name>
- <value>localhost:8082</value>
- <description>Host name of druid router if any or broker</description>
- <on-ambari-upgrade add="false"/>
- <depends-on>
- <property>
- <type>druid-coordinator</type>
- <name>druid.port</name>
- </property>
- </depends-on>
- </property>
-
- <property>
- <name>hive.druid.metadata.password</name>
- <value>{{druid_metadata_password}}</value>
- <property-type>PASSWORD</property-type>
- <display-name>Druid Metadata Password</display-name>
- <description>Druid meta data storage password</description>
- <value-attributes>
- <type>password</type>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.metadata.username</name>
- <value>druid</value>
- <description>Username used to connect to druid metadata storage</description>
- <on-ambari-upgrade add="false"/>
- <depends-on>
- <property>
- <type>druid-common</type>
- <name>druid.metadata.storage.connector.user</name>
- </property>
- </depends-on>
- </property>
-
- <property>
- <name>hive.druid.indexer.segments.granularity</name>
- <display-name>Default Granularity for the Druid segments</display-name>
- <value-attributes>
- <type>value-list</type>
- <entries>
- <entry>
- <value>YEAR</value>
- </entry>
- <entry>
- <value>MONTH</value>
- </entry>
- <entry>
- <value>WEEK</value>
- </entry>
- <entry>
- <value>DAY</value>
- </entry>
- <entry>
- <value>HOUR</value>
- </entry>
- <entry>
- <value>MINUTE</value>
- </entry>
- <entry>
- <value>SECOND</value>
- </entry>
- </entries>
- </value-attributes>
- <value>MINUTE</value>
- <description>Default Granularity for the segments created by the Druid storage handler, this can be overridden per table using table property druid.segment.granularity </description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>hive.druid.indexer.partition.size.max</name>
- <value>5000000</value>
- <description>Maximum number of records per segment partition</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>hive.druid.indexer.memory.rownum.max</name>
- <value>75000</value>
- <description>Maximum number of records in memory while storing data in Druid</description>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.select.distribute</name>
- <value>true</value>
- <description>If it is set to true, we distribute the execution of Druid Select queries</description>
- <on-ambari-upgrade add="false"/>
- <value-attributes>
- <type>boolean</type>
- </value-attributes>
- </property>
-
- <property>
- <name>hive.druid.basePersistDirectory</name>
- <value></value>
- <description>
- Local temporary directory used to persist intermediate indexing state,
- if empty (recommended) will default to JVM system property java.io.tmpdir.
- </description>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.storage.storageDirectory</name>
- <value>{{druid_storage_dir}}</value>
- <description>
- Druid deep storage location for segments.
- </description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>hive.druid.metadata.db.type</name>
- <display-name>Druid metadata storage type </display-name>
- <value-attributes>
- <overridable>false</overridable>
- <type>value-list</type>
- <entries>
- <entry>
- <value>mysql</value>
- <label>MYSQL</label>
- </entry>
- <entry>
- <value>postgresql</value>
- <label>POSTGRESQL</label>
- </entry>
- </entries>
- </value-attributes>
- <value>mysql</value>
- <depends-on>
- <property>
- <type>druid-common</type>
- <name>druid.metadata.storage.type</name>
- </property>
- </depends-on>
- <description>Druid metadata storage type</description>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.passiveWaitTimeMs</name>
- <value>30000</value>
- <description>
- Wait time in ms default to 30 seconds.
- </description>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.working.directory</name>
- <value>/tmp/druid-indexing</value>
- <description>
- Default hdfs working directory used to store some intermediate metadata.
- </description>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.maxTries</name>
- <value>5</value>
- <description>
- Maximum number of http call retries before giving up.
- </description>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <property>
- <name>hive.druid.bitmap.type</name>
- <display-name>Druid metadata storage type </display-name>
- <value-attributes>
- <type>value-list</type>
- <entries>
- <entry>
- <value>roaring</value>
- </entry>
- <entry>
- <value>concise</value>
- </entry>
- </entries>
- </value-attributes>
- <value>roaring</value>
- <description>Druid Coding algorithm use to encode the bitmaps</description>
- <on-ambari-upgrade add="false"/>
- </property>
-
<!-- This property is removed in HDP 2.5 and higher. -->
<property>
<name>atlas.rest.address</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/ce2a0a00/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
index aae2efa..64cef3e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
@@ -124,4 +124,229 @@ limitations under the License.
<on-ambari-upgrade add="false"/>
</property>
+ <!-- Druid related properties -->
+ <property>
+ <name>hive.druid.broker.address.default</name>
+ <value>localhost:8082</value>
+ <description>Host name of druid router if any or broker</description>
+ <on-ambari-upgrade add="false"/>
+ <depends-on>
+ <property>
+ <type>druid-router</type>
+ <name>druid.port</name>
+ </property>
+ </depends-on>
+ </property>
+
+ <property>
+ <name>hive.druid.metadata.uri</name>
+ <value>jdbc:mysql://localhost:3355/druid</value>
+ <description>URI to connect to the database (for example jdbc:mysql://hostname:port/DBName)</description>
+ <on-ambari-upgrade add="false"/>
+ <depends-on>
+ <property>
+ <type>druid-common</type>
+ <name>druid.metadata.storage.connector.connectURI</name>
+ </property>
+ </depends-on>
+ </property>
+
+ <property>
+ <name>hive.druid.coordinator.address.default</name>
+ <value>localhost:8082</value>
+ <description>Host name of druid router if any or broker</description>
+ <on-ambari-upgrade add="false"/>
+ <depends-on>
+ <property>
+ <type>druid-coordinator</type>
+ <name>druid.port</name>
+ </property>
+ </depends-on>
+ </property>
+
+ <property>
+ <name>hive.druid.metadata.password</name>
+ <value>{{druid_metadata_password}}</value>
+ <property-type>PASSWORD</property-type>
+ <display-name>Druid Metadata Password</display-name>
+ <description>Druid meta data storage password</description>
+ <value-attributes>
+ <type>password</type>
+ <empty-value-valid>true</empty-value-valid>
+ </value-attributes>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.metadata.username</name>
+ <value>druid</value>
+ <description>Username used to connect to druid metadata storage</description>
+ <on-ambari-upgrade add="false"/>
+ <depends-on>
+ <property>
+ <type>druid-common</type>
+ <name>druid.metadata.storage.connector.user</name>
+ </property>
+ </depends-on>
+ </property>
+
+ <property>
+ <name>hive.druid.indexer.segments.granularity</name>
+ <display-name>Default Granularity for the Druid segments</display-name>
+ <value-attributes>
+ <type>value-list</type>
+ <entries>
+ <entry>
+ <value>YEAR</value>
+ </entry>
+ <entry>
+ <value>MONTH</value>
+ </entry>
+ <entry>
+ <value>WEEK</value>
+ </entry>
+ <entry>
+ <value>DAY</value>
+ </entry>
+ <entry>
+ <value>HOUR</value>
+ </entry>
+ <entry>
+ <value>MINUTE</value>
+ </entry>
+ <entry>
+ <value>SECOND</value>
+ </entry>
+ </entries>
+ </value-attributes>
+ <value>DAY</value>
+ <description>Default Granularity for the segments created by the Druid storage handler, this can be overridden per table using table property druid.segment.granularity </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+ <name>hive.druid.indexer.partition.size.max</name>
+ <value>1000000</value>
+ <description>Maximum number of records per segment partition</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+ <name>hive.druid.indexer.memory.rownum.max</name>
+ <value>75000</value>
+ <description>Maximum number of records in memory while storing data in Druid</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.select.distribute</name>
+ <value>true</value>
+ <description>If it is set to true, we distribute the execution of Druid Select queries</description>
+ <on-ambari-upgrade add="false"/>
+ <value-attributes>
+ <type>boolean</type>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>hive.druid.basePersistDirectory</name>
+ <value></value>
+ <description>
+ Local temporary directory used to persist intermediate indexing state,
+ if empty (recommended) will default to JVM system property java.io.tmpdir.
+ </description>
+ <value-attributes>
+ <empty-value-valid>true</empty-value-valid>
+ </value-attributes>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.storage.storageDirectory</name>
+ <value>{{druid_storage_dir}}</value>
+ <description>
+ Druid deep storage location for segments.
+ </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+ <name>hive.druid.metadata.db.type</name>
+ <display-name>Druid metadata storage type </display-name>
+ <value-attributes>
+ <overridable>false</overridable>
+ <type>value-list</type>
+ <entries>
+ <entry>
+ <value>mysql</value>
+ <label>MYSQL</label>
+ </entry>
+ <entry>
+ <value>postgresql</value>
+ <label>POSTGRESQL</label>
+ </entry>
+ </entries>
+ </value-attributes>
+ <value>mysql</value>
+ <depends-on>
+ <property>
+ <type>druid-common</type>
+ <name>druid.metadata.storage.type</name>
+ </property>
+ </depends-on>
+ <description>Druid metadata storage type</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.passiveWaitTimeMs</name>
+ <value>30000</value>
+ <description>
+ Wait time in ms default to 30 seconds.
+ </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.working.directory</name>
+ <value>/tmp/druid-indexing</value>
+ <description>
+ Default hdfs working directory used to store some intermediate metadata.
+ </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.maxTries</name>
+ <value>5</value>
+ <description>
+ Maximum number of http call retries before giving up.
+ </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.bitmap.type</name>
+ <display-name>Druid metadata storage type </display-name>
+ <value-attributes>
+ <type>value-list</type>
+ <entries>
+ <entry>
+ <value>roaring</value>
+ </entry>
+ <entry>
+ <value>concise</value>
+ </entry>
+ </entries>
+ </value-attributes>
+ <value>roaring</value>
+ <description>Druid Coding algorithm use to encode the bitmaps</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.druid.http.read.timeout</name>
+ <value>PT10M</value>
+ <description>
+ Maximum number of http call retries before giving up.
+ </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/ce2a0a00/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 0d2925e..b634e71 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -605,7 +605,7 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
# druid is not in list of services to be installed
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
if 'DRUID' in servicesList:
- putHiveSiteProperty = self.putProperty(configurations, "hive-site", services)
+ putHiveInteractiveSiteProperty = self.putProperty(configurations, "hive-interactive-site", services)
if 'druid-coordinator' in services['configurations']:
component_hosts = self.getHostsWithComponent("DRUID", 'DRUID_COORDINATOR', services, hosts)
if component_hosts is not None and len(component_hosts) > 0:
@@ -642,11 +642,11 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
else:
druid_metadata_user = ""
- putHiveSiteProperty('hive.druid.broker.address.default', druid_broker_host_port)
- putHiveSiteProperty('hive.druid.coordinator.address.default', druid_coordinator_host_port)
- putHiveSiteProperty('hive.druid.metadata.uri', druid_metadata_uri)
- putHiveSiteProperty('hive.druid.metadata.username', druid_metadata_user)
- putHiveSiteProperty('hive.druid.metadata.db.type', druid_metadata_type)
+ putHiveInteractiveSiteProperty('hive.druid.broker.address.default', druid_broker_host_port)
+ putHiveInteractiveSiteProperty('hive.druid.coordinator.address.default', druid_coordinator_host_port)
+ putHiveInteractiveSiteProperty('hive.druid.metadata.uri', druid_metadata_uri)
+ putHiveInteractiveSiteProperty('hive.druid.metadata.username', druid_metadata_user)
+ putHiveInteractiveSiteProperty('hive.druid.metadata.db.type', druid_metadata_type)
def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
http://git-wip-us.apache.org/repos/asf/ambari/blob/ce2a0a00/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index 2b4c656..fd7e438 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -75,11 +75,19 @@
<set key ="atlas.jaas.ticketBased-KafkaClient.option.useTicketCache" value="true"
if-type="cluster-env" if-key="security_enabled" if-value="true"/>
</definition>
+ </changes>
+ </component>
+ <component name="HIVE_SERVER_INTERACTIVE">
+ <changes>
+ <definition xsi:type="configure" id="llap_update_tez_shuffle_ssl_enable" summary="Update additional LLAP-Tez settings">
+ <type>tez-interactive-site</type>
+ <set key="tez.runtime.shuffle.ssl.enable" value="false"/>
+ </definition>
<definition xsi:type="configure" id="hdp_2_6_maint_druid_config_for_hive_hook" summary="Updating druid hive related properties">
<type>hive-site</type>
<set key="hive.druid.metadata.password" value="{{druid_metadata_password}}" if-type="druid-common"/>
- <set key="hive.druid.indexer.segments.granularity" value="MINUTE" if-type="druid-common"/>
- <set key="hive.druid.indexer.partition.size.max" value="5000000" if-type="druid-common"/>
+ <set key="hive.druid.indexer.segments.granularity" value="DAY" if-type="druid-common"/>
+ <set key="hive.druid.indexer.partition.size.max" value="1000000" if-type="druid-common"/>
<set key="hive.druid.indexer.memory.rownum.max" value="75000" if-type="druid-common"/>
<set key="hive.druid.select.distribute" value="true" if-type="druid-common"/>
<set key="hive.druid.basePersistDirectory" value="" if-type="druid-common"/>
@@ -87,15 +95,6 @@
<set key="hive.druid.passiveWaitTimeMs" value="30000" if-type="druid-common"/>
<set key="hive.druid.working.directory" value="/tmp/druid-indexing" if-type="druid-common"/>
<set key="hive.druid.bitmap.type" value="roaring" if-type="druid-common"/>
-
- </definition>
- </changes>
- </component>
- <component name="HIVE_SERVER_INTERACTIVE">
- <changes>
- <definition xsi:type="configure" id="llap_update_tez_shuffle_ssl_enable" summary="Update additional LLAP-Tez settings">
- <type>tez-interactive-site</type>
- <set key="tez.runtime.shuffle.ssl.enable" value="false"/>
</definition>
</changes>
</component>
[29/31] ambari git commit: AMBARI-22176. Remove duplicate kerberos
setting for superset (Slim Bouguerra via rlevas)
Posted by jl...@apache.org.
AMBARI-22176. Remove duplicate kerberos setting for superset (Slim Bouguerra via rlevas)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a3a8afcd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a3a8afcd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a3a8afcd
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: a3a8afcd62e4c32b9972b2fcdd4744f7d6728e2a
Parents: 24c3589
Author: Slim Bouguerra <sb...@hortonworks.com>
Authored: Tue Oct 10 13:14:19 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Tue Oct 10 13:14:19 2017 -0400
----------------------------------------------------------------------
.../stacks/HDP/2.6/services/DRUID/kerberos.json | 30 --------------------
1 file changed, 30 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/a3a8afcd/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
index 464e420..198c351 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
@@ -35,27 +35,6 @@
}
},
{
- "name": "superset",
- "principal": {
- "value": "${druid-env/druid_user}@${realm}",
- "type": "user",
- "configuration": "druid-superset/KERBEROS_PRINCIPAL",
- "local_username": "${druid-env/druid_user}"
- },
- "keytab": {
- "file": "${keytab_dir}/superset.headless.keytab",
- "owner": {
- "name": "${druid-env/druid_user}",
- "access": "r"
- },
- "group": {
- "name": "${cluster-env/user_group}",
- "access": "r"
- },
- "configuration": "druid-superset/KERBEROS_KEYTAB"
- }
- },
- {
"name": "druid_smokeuser",
"reference": "/smokeuser"
}
@@ -105,15 +84,6 @@
"reference": "/druid"
}
]
- },
- {
- "name": "DRUID_SUPERSET",
- "identities": [
- {
- "name": "druid_druid_superset_druid",
- "reference": "/druid"
- }
- ]
}
],
"configurations": [
[16/31] ambari git commit: Revert "AMBARI-21205 Make ToggleKerberos
and AddDeleteService experimental features (Duc Le via rzang)"
Posted by jl...@apache.org.
Revert "AMBARI-21205 Make ToggleKerberos and AddDeleteService experimental features (Duc Le via rzang)"
This reverts commit 57bb1365e414c1f110d2d142fa198fb8e043af95.
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7e0fe291
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7e0fe291
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7e0fe291
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 7e0fe2913619f4b70097e3ebcd7fb89e84eee62d
Parents: 5af1e53
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Mon Oct 9 11:26:04 2017 -0700
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Mon Oct 9 11:26:04 2017 -0700
----------------------------------------------------------------------
ambari-web/app/config.js | 6 ++--
ambari-web/app/routes/add_service_routes.js | 2 +-
ambari-web/app/routes/main.js | 2 +-
.../app/templates/main/admin/kerberos.hbs | 34 +++++++++-----------
.../main/service/all_services_actions.hbs | 6 ++--
ambari-web/app/views/main/admin.js | 14 ++++----
.../main/admin/stack_upgrade/services_view.js | 2 +-
ambari-web/app/views/main/menu.js | 16 ++++-----
ambari-web/app/views/main/service/item.js | 2 +-
.../admin/stack_upgrade/services_view_test.js | 1 -
10 files changed, 36 insertions(+), 49 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index 0963f70..ba1b75d 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -86,11 +86,9 @@ App.supports = {
addingNewRepository: false,
kerberosStackAdvisor: true,
logCountVizualization: false,
- createAlerts: false,
- enabledWizardForHostOrderedUpgrade: true,
manageJournalNode: true,
- enableToggleKerberos: true,
- enableAddDeleteServices: true
+ createAlerts: false,
+ enabledWizardForHostOrderedUpgrade: true
};
if (App.enableExperimental) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/routes/add_service_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/add_service_routes.js b/ambari-web/app/routes/add_service_routes.js
index 75b3586..1615f0d 100644
--- a/ambari-web/app/routes/add_service_routes.js
+++ b/ambari-web/app/routes/add_service_routes.js
@@ -24,7 +24,7 @@ module.exports = App.WizardRoute.extend({
route: '/service/add',
enter: function (router) {
- if (App.isAuthorized('SERVICE.ADD_DELETE_SERVICES') && App.supports.enableAddDeleteServices) {
+ if (App.isAuthorized('SERVICE.ADD_DELETE_SERVICES')) {
// `getSecurityStatus` call is required to retrieve information related to kerberos type: Manual or automated kerberos
router.get('mainController').isLoading.call(router.get('clusterController'),'isClusterNameLoaded').done(function () {
App.router.get('mainAdminKerberosController').getSecurityStatus().always(function () {
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/routes/main.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/main.js b/ambari-web/app/routes/main.js
index 7ed18de..30cc8aa 100644
--- a/ambari-web/app/routes/main.js
+++ b/ambari-web/app/routes/main.js
@@ -460,7 +460,7 @@ module.exports = Em.Route.extend(App.RouterRedirections, {
route: '/kerberos',
enter: function (router, transition) {
- if (router.get('loggedIn') && (!App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || !App.supports.enableToggleKerberos)) {
+ if (router.get('loggedIn') && !App.isAuthorized('CLUSTER.TOGGLE_KERBEROS')) {
router.transitionTo('main.dashboard.index');
}
},
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/templates/main/admin/kerberos.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/kerberos.hbs b/ambari-web/app/templates/main/admin/kerberos.hbs
index 2b41122..e7bb618 100644
--- a/ambari-web/app/templates/main/admin/kerberos.hbs
+++ b/ambari-web/app/templates/main/admin/kerberos.hbs
@@ -20,22 +20,20 @@
<div>
<p class="text-success">{{t admin.security.enabled}}
{{#isAuthorized "CLUSTER.TOGGLE_KERBEROS"}}
- {{#if App.supports.enableToggleKerberos}}
- <button class="btn btn-padding btn-warning admin-disable-security-btn" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action notifySecurityOffPopup target="controller"}}>{{t admin.kerberos.button.disable}} </button>
- {{#unless isManualKerberos}}
- <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
- <i class="glyphicon glyphicon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
- {{#if App.isCredentialStorePersistent}}
- <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
- {{/if}}
- {{/unless}}
- <br/>
- {{#unless isEditMode}}
- <a href="#" {{action makeConfigsEditable target="controller"}} class="pull-right">
- {{t common.edit}}
- </a>
- {{/unless}}
+ <button class="btn btn-padding btn-warning admin-disable-security-btn" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action notifySecurityOffPopup target="controller"}}>{{t admin.kerberos.button.disable}} </button>
+ {{#unless isManualKerberos}}
+ <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
+ <i class="glyphicon glyphicon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
+ {{#if App.isCredentialStorePersistent}}
+ <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
{{/if}}
+ {{/unless}}
+ <br/>
+ {{#unless isEditMode}}
+ <a href="#" {{action makeConfigsEditable target="controller"}} class="pull-right">
+ {{t common.edit}}
+ </a>
+ {{/unless}}
{{/isAuthorized}}
</p>
</div>
@@ -53,10 +51,8 @@
<div>
<p class="muted background-text">{{t admin.security.disabled}}
{{#isAuthorized "CLUSTER.TOGGLE_KERBEROS"}}
- {{#if App.supports.enableToggleKerberos}}
- <a class="btn btn-padding btn-success admin-enable-security-btn" {{action checkAndStartKerberosWizard target="controller"}}>{{t admin.kerberos.button.enable}} </a>
- <br/>
- {{/if}}
+ <a class="btn btn-padding btn-success admin-enable-security-btn" {{action checkAndStartKerberosWizard target="controller"}}>{{t admin.kerberos.button.enable}} </a>
+ <br/>
{{/isAuthorized}}
</p>
</div>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/templates/main/service/all_services_actions.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/all_services_actions.hbs b/ambari-web/app/templates/main/service/all_services_actions.hbs
index a9e122b..3e87cb2 100644
--- a/ambari-web/app/templates/main/service/all_services_actions.hbs
+++ b/ambari-web/app/templates/main/service/all_services_actions.hbs
@@ -22,14 +22,12 @@
</div>
<ul class="dropdown-menu">
{{#isAuthorized "SERVICE.ADD_DELETE_SERVICES"}}
- {{#if App.supports.enableAddDeleteServices}}
- <li {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}>
+ <li {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}>
<a href="#"
{{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}
{{action gotoAddService target="view.serviceController"}}>
<i class="glyphicon glyphicon-plus"></i> {{t services.service.add}}</a>
- </li>
- {{/if}}
+ </li>
{{/isAuthorized}}
{{#isAuthorized "SERVICE.START_STOP"}}
<li class="divider"></li>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/views/main/admin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin.js b/ambari-web/app/views/main/admin.js
index 05d0f56..509f380 100644
--- a/ambari-web/app/views/main/admin.js
+++ b/ambari-web/app/views/main/admin.js
@@ -39,14 +39,12 @@ App.MainAdminView = Em.View.extend({
});
}
if (!App.get('isHadoopWindowsStack') && App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || (App.get('upgradeInProgress') || App.get('upgradeHolding')) ) {
- if (App.supports.enableToggleKerberos) {
- items.push({
- name: 'kerberos',
- url: 'adminKerberos.index',
- label: Em.I18n.t('common.kerberos'),
- disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
- });
- }
+ items.push({
+ name: 'kerberos',
+ url: 'adminKerberos.index',
+ label: Em.I18n.t('common.kerberos'),
+ disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
+ });
}
if ((App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') && App.isAuthorized('SERVICE.MANAGE_AUTO_START, CLUSTER.MANAGE_AUTO_START')) || (App.get('upgradeInProgress') || App.get('upgradeHolding'))) {
if (App.supports.serviceAutoStart) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/services_view.js b/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
index 25efffe..f566814 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
@@ -56,7 +56,7 @@ App.MainAdminStackServicesView = Em.View.extend({
* @param event
*/
goToAddService: function (event) {
- if (!App.isAuthorized('SERVICE.ADD_DELETE_SERVICES') || !App.supports.enableAddDeleteServices) {
+ if (!App.isAuthorized('SERVICE.ADD_DELETE_SERVICES')) {
return;
} else if (event.context == "KERBEROS") {
App.router.get('mainAdminKerberosController').checkAndStartKerberosWizard();
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/views/main/menu.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/menu.js b/ambari-web/app/views/main/menu.js
index 32c4f6f..4bb53ae 100644
--- a/ambari-web/app/views/main/menu.js
+++ b/ambari-web/app/views/main/menu.js
@@ -118,15 +118,13 @@ App.MainSideMenuView = Em.CollectionView.extend({
});
}
if (!App.get('isHadoopWindowsStack') && App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || upg) {
- if (App.supports.enableToggleKerberos) {
- categories.push({
- name: 'kerberos',
- url: 'kerberos/',
- label: Em.I18n.t('common.kerberos'),
- disabled: App.get('upgradeInProgress') || App.get('upgradeHolding'),
- href: router.urlFor('main.admin.adminKerberos')
- });
- }
+ categories.push({
+ name: 'kerberos',
+ url: 'kerberos/',
+ label: Em.I18n.t('common.kerberos'),
+ disabled: App.get('upgradeInProgress') || App.get('upgradeHolding'),
+ href: router.urlFor('main.admin.adminKerberos')
+ });
}
if ((App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') && App.isAuthorized('SERVICE.MANAGE_AUTO_START, CLUSTER.MANAGE_AUTO_START')) || upg) {
if (App.supports.serviceAutoStart) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/views/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/item.js b/ambari-web/app/views/main/service/item.js
index 45c783b..37e0904 100644
--- a/ambari-web/app/views/main/service/item.js
+++ b/ambari-web/app/views/main/service/item.js
@@ -289,7 +289,7 @@ App.MainServiceItemView = Em.View.extend({
options.push(actionMap.DOWNLOAD_CLIENT_CONFIGS);
}
- if (App.isAuthorized("SERVICE.ADD_DELETE_SERVICES") && App.supports.enableAddDeleteServices) {
+ if (App.isAuthorized("SERVICE.ADD_DELETE_SERVICES")) {
options.push(actionMap.DELETE_SERVICE);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
index da75cf2..70d182c 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
@@ -34,7 +34,6 @@ describe('App.MainAdminStackServicesView', function () {
sinon.stub(App.router, 'get').returns(mock);
sinon.spy(mock, 'checkAndStartKerberosWizard');
isAccessibleMock = sinon.stub(App, 'isAuthorized');
- App.set('supports.enableAddDeleteServices', true);
});
afterEach(function() {
App.get('router').transitionTo.restore();
[28/31] ambari git commit: AMBARI-22172. Change log level for
ClusterNotFoundException (magyari_sandor)
Posted by jl...@apache.org.
AMBARI-22172. Change log level for ClusterNotFoundException (magyari_sandor)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/24c35893
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/24c35893
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/24c35893
Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 24c35893b0be52146bc821b6d44eece8fd49c50a
Parents: 2d23e12
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Tue Oct 10 13:19:39 2017 +0200
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Tue Oct 10 17:10:40 2017 +0200
----------------------------------------------------------------------
.../ambari/server/controller/AmbariManagementControllerImpl.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/24c35893/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 8c4888c..b0eb8ac 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -1126,7 +1126,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
try {
cluster = clusters.getCluster(request.getClusterName());
} catch (ClusterNotFoundException e) {
- LOG.error("Cluster not found ", e);
+ LOG.info(e.getMessage());
throw new ParentObjectNotFoundException("Parent Cluster resource doesn't exist", e);
}