You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by rl...@apache.org on 2017/07/10 20:58:41 UTC
[01/18] ambari git commit: AMBARI-21339 logviewer started along with
nimbus if supervisor is not running on the same machine
Repository: ambari
Updated Branches:
refs/heads/branch-feature-AMBARI-20859 e5c1b247e -> 07e50d869
AMBARI-21339 logviewer started along with nimbus if supervisor is not running on the same machine
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1939dabc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1939dabc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1939dabc
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 1939dabcd7f6eeff3bb93e4d6f718b8a32351bd2
Parents: 6832ed9
Author: lpuskas <lp...@apache.org>
Authored: Thu Jun 29 17:11:59 2017 +0200
Committer: lpuskas <la...@sequenceiq.com>
Committed: Fri Jul 7 11:23:12 2017 +0200
----------------------------------------------------------------------
.../STORM/0.9.1/package/scripts/nimbus.py | 8 ++-
.../stacks/2.1/STORM/test_storm_nimbus.py | 60 +++++++++++++++++++-
.../stacks/2.1/configs/default-storm-start.json | 14 +++++
.../test/python/stacks/2.1/configs/default.json | 13 +++++
.../stacks/2.1/configs/secured-storm-start.json | 13 +++++
.../test/python/stacks/2.1/configs/secured.json | 15 ++++-
6 files changed, 119 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/1939dabc/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
index 360af5d..126ae78 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
@@ -70,12 +70,18 @@ class NimbusDefault(Nimbus):
setup_ranger_storm(upgrade_type=upgrade_type)
service("nimbus", action="start")
+ if "SUPERVISOR" not in params.config['localComponents']:
+ service("logviewer", action="start")
+
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
service("nimbus", action="stop")
+ if "SUPERVISOR" not in params.config['localComponents']:
+ service("logviewer", action="stop")
+
def status(self, env):
import status_params
@@ -85,7 +91,7 @@ class NimbusDefault(Nimbus):
def get_log_folder(self):
import params
return params.log_dir
-
+
def get_user(self):
import params
return params.storm_user
http://git-wip-us.apache.org/repos/asf/ambari/blob/1939dabc/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
index 35f057c..fd25126 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
@@ -58,6 +58,15 @@ class TestStormNimbus(TestStormBase):
owner = 'storm',
group = 'hadoop',
)
+ self.assertResourceCalled('Execute', 'source /etc/storm/conf/storm-env.sh ; export PATH=$JAVA_HOME/bin:$PATH ; storm logviewer > /var/log/storm/logviewer.out 2>&1 &\n echo $! > /var/run/storm/logviewer.pid',
+ path = ['/usr/bin'],
+ user = 'storm',
+ not_if = "ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1'",
+ )
+ self.assertResourceCalled('File', '/var/run/storm/logviewer.pid',
+ owner = 'storm',
+ group = 'hadoop',
+ )
self.assertNoMoreResources()
def test_start_with_metrics_collector(self):
@@ -99,6 +108,15 @@ class TestStormNimbus(TestStormBase):
owner = 'storm',
group = 'hadoop',
)
+ self.assertResourceCalled('Execute', 'source /etc/storm/conf/storm-env.sh ; export PATH=$JAVA_HOME/bin:$PATH ; storm logviewer > /var/log/storm/logviewer.out 2>&1 &\n echo $! > /var/run/storm/logviewer.pid',
+ path = ['/usr/bin'],
+ user = 'storm',
+ not_if = "ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1'",
+ )
+ self.assertResourceCalled('File', '/var/run/storm/logviewer.pid',
+ owner = 'storm',
+ group = 'hadoop',
+ )
self.assertNoMoreResources()
def test_start_with_metrics_collector_modern(self):
@@ -141,12 +159,21 @@ class TestStormNimbus(TestStormBase):
owner = 'storm',
group = 'hadoop',
)
+ self.assertResourceCalled('Execute', 'source /etc/storm/conf/storm-env.sh ; export PATH=$JAVA_HOME/bin:$PATH ; storm logviewer > /var/log/storm/logviewer.out 2>&1 &\n echo $! > /var/run/storm/logviewer.pid',
+ path = ['/usr/bin'],
+ user = 'storm',
+ not_if = "ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1'",
+ )
+ self.assertResourceCalled('File', '/var/run/storm/logviewer.pid',
+ owner = 'storm',
+ group = 'hadoop',
+ )
self.assertNoMoreResources()
@patch("os.path.exists")
def test_stop_default(self, path_exists_mock):
# Bool for the pid file
- path_exists_mock.side_effect = [True]
+ path_exists_mock.side_effect = [True, True]
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
classname = "Nimbus",
command = "stop",
@@ -164,6 +191,16 @@ class TestStormNimbus(TestStormBase):
self.assertResourceCalled('File', '/var/run/storm/nimbus.pid',
action = ['delete'],
)
+ self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
+ not_if = "! (ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1')",
+ )
+ self.assertResourceCalled('Execute', "ambari-sudo.sh kill -9 123",
+ not_if = "sleep 2; ! (ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1') || sleep 20; ! (ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1')",
+ ignore_failures = True,
+ )
+ self.assertResourceCalled('File', '/var/run/storm/logviewer.pid',
+ action = ['delete'],
+ )
self.assertNoMoreResources()
def test_configure_secured(self):
@@ -196,12 +233,21 @@ class TestStormNimbus(TestStormBase):
owner = 'storm',
group = 'hadoop',
)
+ self.assertResourceCalled('Execute', 'source /etc/storm/conf/storm-env.sh ; export PATH=$JAVA_HOME/bin:$PATH ; storm logviewer > /var/log/storm/logviewer.out 2>&1 &\n echo $! > /var/run/storm/logviewer.pid',
+ path = ['/usr/bin'],
+ user = 'storm',
+ not_if = "ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1'",
+ )
+ self.assertResourceCalled('File', '/var/run/storm/logviewer.pid',
+ owner = 'storm',
+ group = 'hadoop',
+ )
self.assertNoMoreResources()
@patch("os.path.exists")
def test_stop_secured(self, path_exists_mock):
# Bool for the pid file
- path_exists_mock.side_effect = [True]
+ path_exists_mock.side_effect = [True, True]
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
classname = "Nimbus",
command = "stop",
@@ -219,6 +265,16 @@ class TestStormNimbus(TestStormBase):
self.assertResourceCalled('File', '/var/run/storm/nimbus.pid',
action = ['delete'],
)
+ self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
+ not_if = "! (ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1')",
+ )
+ self.assertResourceCalled('Execute', "ambari-sudo.sh kill -9 123",
+ not_if = "sleep 2; ! (ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1') || sleep 20; ! (ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1')",
+ ignore_failures = True,
+ )
+ self.assertResourceCalled('File', '/var/run/storm/logviewer.pid',
+ action = ['delete'],
+ )
self.assertNoMoreResources()
def test_pre_upgrade_restart(self):
http://git-wip-us.apache.org/repos/asf/ambari/blob/1939dabc/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json b/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
index 27cb63e..05330a0 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
@@ -1,4 +1,18 @@
{
+ "localComponents": [
+ "APP_TIMELINE_SERVER",
+ "TEZ_CLIENT",
+ "DATANODE",
+ "HDFS_CLIENT",
+ "ZOOKEEPER_SERVER",
+ "RESOURCEMANAGER",
+ "MAPREDUCE2_CLIENT",
+ "YARN_CLIENT",
+ "HISTORYSERVER",
+ "ZOOKEEPER_CLIENT",
+ "NAMENODE"
+ ],
+
"configuration_attributes": {
"storm-site": {},
"hdfs-site": {
http://git-wip-us.apache.org/repos/asf/ambari/blob/1939dabc/ambari-server/src/test/python/stacks/2.1/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default.json b/ambari-server/src/test/python/stacks/2.1/configs/default.json
index e04e1eb..536074e 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default.json
@@ -1,4 +1,17 @@
{
+ "localComponents": [
+ "APP_TIMELINE_SERVER",
+ "TEZ_CLIENT",
+ "DATANODE",
+ "HDFS_CLIENT",
+ "ZOOKEEPER_SERVER",
+ "RESOURCEMANAGER",
+ "MAPREDUCE2_CLIENT",
+ "YARN_CLIENT",
+ "HISTORYSERVER",
+ "ZOOKEEPER_CLIENT",
+ "NAMENODE"
+ ],
"roleCommand": "SERVICE_CHECK",
"clusterName": "c1",
"hostname": "c6401.ambari.apache.org",
http://git-wip-us.apache.org/repos/asf/ambari/blob/1939dabc/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
index 1b027b7..6d7fdd1 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
@@ -1,4 +1,17 @@
{
+ "localComponents": [
+ "APP_TIMELINE_SERVER",
+ "TEZ_CLIENT",
+ "DATANODE",
+ "HDFS_CLIENT",
+ "ZOOKEEPER_SERVER",
+ "RESOURCEMANAGER",
+ "MAPREDUCE2_CLIENT",
+ "YARN_CLIENT",
+ "HISTORYSERVER",
+ "ZOOKEEPER_CLIENT",
+ "NAMENODE"
+ ],
"configuration_attributes": {
"storm-site": {},
"hdfs-site": {
http://git-wip-us.apache.org/repos/asf/ambari/blob/1939dabc/ambari-server/src/test/python/stacks/2.1/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured.json b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
index 61b359c..e2c22be 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
@@ -1,5 +1,18 @@
{
- "roleCommand": "INSTALL",
+ "localComponents": [
+ "APP_TIMELINE_SERVER",
+ "TEZ_CLIENT",
+ "DATANODE",
+ "HDFS_CLIENT",
+ "ZOOKEEPER_SERVER",
+ "RESOURCEMANAGER",
+ "MAPREDUCE2_CLIENT",
+ "YARN_CLIENT",
+ "HISTORYSERVER",
+ "ZOOKEEPER_CLIENT",
+ "NAMENODE"
+ ],
+ "roleCommand": "INSTALL",
"clusterName": "c1",
"hostname": "c6401.ambari.apache.org",
"hostLevelParams": {
[18/18] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-20859
Posted by rl...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-20859
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/07e50d86
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/07e50d86
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/07e50d86
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 07e50d869fd901fdd5c9a1c866ec2e566dbf4e4e
Parents: e5c1b24 70cf77e
Author: Robert Levas <rl...@hortonworks.com>
Authored: Mon Jul 10 16:58:31 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Mon Jul 10 16:58:31 2017 -0400
----------------------------------------------------------------------
.../libraries/functions/solr_cloud_util.py | 10 +
.../libraries/functions/stack_features.py | 13 +
.../libraries/functions/stack_tools.py | 39 +
.../libraries/script/script.py | 19 +-
.../src/main/resources/solr | 826 ++++++++---
.../ambari/infra/solr/AmbariSolrCloudCLI.java | 14 +
.../infra/solr/AmbariSolrCloudClient.java | 8 +
.../commands/RemoveAdminHandlersCommand.java | 46 +
.../commands/SetClusterPropertyZkCommand.java | 6 +-
.../InfraRuleBasedAuthorizationPluginTest.java | 5 +
ambari-infra/pom.xml | 2 +-
.../api/ShipperConfigElementDescription.java | 59 +
.../api/ShipperConfigTypeDescription.java | 44 +
.../model/inputconfig/impl/ConditionsImpl.java | 13 +
.../model/inputconfig/impl/FieldsImpl.java | 14 +
.../inputconfig/impl/FilterDescriptorImpl.java | 51 +
.../impl/FilterGrokDescriptorImpl.java | 24 +
.../impl/FilterKeyValueDescriptorImpl.java | 28 +
.../model/inputconfig/impl/InputConfigImpl.java | 18 +
.../inputconfig/impl/InputDescriptorImpl.java | 101 ++
.../impl/InputFileBaseDescriptorImpl.java | 27 +
.../impl/InputS3FileDescriptorImpl.java | 16 +
.../impl/MapAnonymizeDescriptorImpl.java | 21 +-
.../inputconfig/impl/MapDateDescriptorImpl.java | 20 +-
.../impl/MapFieldCopyDescriptorImpl.java | 14 +-
.../impl/MapFieldDescriptorImpl.java | 33 +
.../impl/MapFieldNameDescriptorImpl.java | 14 +-
.../impl/MapFieldValueDescriptorImpl.java | 20 +-
.../inputconfig/impl/PostMapValuesAdapter.java | 2 +-
.../ambari-logsearch-logfeeder/docs/filter.md | 4 +-
.../ambari-logsearch-logfeeder/docs/input.md | 10 +-
.../docs/postMapValues.md | 2 +-
.../logfeeder/common/LogEntryParseTester.java | 2 +-
.../ambari/logfeeder/filter/FilterJSONTest.java | 12 +-
.../configsets/audit_logs/conf/solrconfig.xml | 3 +-
.../configsets/hadoop_logs/conf/solrconfig.xml | 3 +-
.../main/configsets/history/conf/solrconfig.xml | 3 +-
.../common/ShipperConfigDescriptionStorage.java | 67 +
.../logsearch/dao/SolrSchemaFieldDao.java | 2 +-
.../ambari/logsearch/doc/DocConstants.java | 1 +
.../ambari/logsearch/manager/InfoManager.java | 9 +
.../response/ShipperConfigDescriptionData.java | 52 +
.../ambari/logsearch/rest/InfoResource.java | 10 +
ambari-logsearch/docker/Dockerfile | 2 +-
ambari-logsearch/docker/bin/start.sh | 4 +-
ambari-logsearch/pom.xml | 2 +-
.../server/api/query/JpaPredicateVisitor.java | 8 +-
.../controller/ActionExecutionContext.java | 26 +
.../controller/AmbariActionExecutionHelper.java | 26 +-
.../BlueprintConfigurationProcessor.java | 59 +-
.../ClusterStackVersionResourceProvider.java | 163 ++-
.../ambari/server/state/ConfigHelper.java | 32 +
.../ambari/server/topology/AmbariContext.java | 35 +-
.../server/upgrade/UpgradeCatalog252.java | 61 +
.../server/upgrade/UpgradeCatalog300.java | 18 +
.../0.1.0/package/scripts/params.py | 3 +
.../0.1.0/package/scripts/setup_infra_solr.py | 17 +-
.../HBASE/0.96.0.2.0/package/scripts/hbase.py | 12 +-
.../0.96.0.2.0/package/scripts/params_linux.py | 3 +
.../package/alerts/alert_hive_metastore.py | 11 +-
.../package/alerts/alert_llap_app_status.py | 12 +-
.../properties/audit_logs-solrconfig.xml.j2 | 3 +-
.../properties/service_logs-solrconfig.xml.j2 | 3 +-
.../package/alerts/alert_check_oozie_server.py | 8 +-
.../STORM/0.9.1/package/scripts/nimbus.py | 8 +-
.../resources/host_scripts/alert_disk_space.py | 10 +-
.../host_scripts/alert_version_select.py | 16 +-
.../HDP/2.0.6/configuration/cluster-env.xml | 16 +-
.../HDP/2.0.6/properties/stack_features.json | 852 +++++------
.../HDP/2.0.6/properties/stack_tools.json | 16 +-
.../PERF/1.0/configuration/cluster-env.xml | 16 +-
.../PERF/1.0/properties/stack_features.json | 38 +-
.../stacks/PERF/1.0/properties/stack_tools.json | 16 +-
.../BlueprintConfigurationProcessorTest.java | 41 +-
...ClusterStackVersionResourceProviderTest.java | 4 +-
.../ClusterConfigurationRequestTest.java | 60 +-
.../server/upgrade/UpgradeCatalog300Test.java | 33 +
.../common-services/configs/hawq_default.json | 6 +-
.../python/host_scripts/TestAlertDiskSpace.py | 16 +-
.../stacks/2.1/STORM/test_storm_nimbus.py | 60 +-
.../stacks/2.1/configs/default-storm-start.json | 14 +
.../test/python/stacks/2.1/configs/default.json | 13 +
.../stacks/2.1/configs/secured-storm-start.json | 13 +
.../test/python/stacks/2.1/configs/secured.json | 15 +-
.../stacks/2.4/AMBARI_INFRA/test_infra_solr.py | 3 +
.../2.5/configs/ranger-admin-default.json | 990 ++++++-------
.../2.5/configs/ranger-admin-secured.json | 1108 +++++++--------
.../stacks/2.5/configs/ranger-kms-default.json | 1158 +++++++--------
.../stacks/2.5/configs/ranger-kms-secured.json | 1320 +++++++++---------
.../2.6/configs/ranger-admin-default.json | 953 +++++++------
.../2.6/configs/ranger-admin-secured.json | 1066 +++++++-------
.../src/test/python/stacks/utils/RMFTestCase.py | 8 +-
92 files changed, 5983 insertions(+), 4081 deletions(-)
----------------------------------------------------------------------
[16/18] ambari git commit: AMBARI-21423 Add REST end point for the
documentation of the Log Feeder shipper properties (mgergely)
Posted by rl...@apache.org.
AMBARI-21423 Add REST end point for the documentation of the Log Feeder shipper properties (mgergely)
Change-Id: If6d1b66c3a1f74b118ae60a7edc26624d49fb7e6
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/15dd999f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/15dd999f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/15dd999f
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 15dd999fff99fb80bc65ddfc94513e890a6efdef
Parents: c088289
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Mon Jul 10 14:51:23 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Mon Jul 10 14:51:23 2017 +0200
----------------------------------------------------------------------
.../api/ShipperConfigElementDescription.java | 59 +++++++++++
.../api/ShipperConfigTypeDescription.java | 44 ++++++++
.../model/inputconfig/impl/ConditionsImpl.java | 13 +++
.../model/inputconfig/impl/FieldsImpl.java | 14 +++
.../inputconfig/impl/FilterDescriptorImpl.java | 51 ++++++++++
.../impl/FilterGrokDescriptorImpl.java | 24 +++++
.../impl/FilterKeyValueDescriptorImpl.java | 28 +++++
.../model/inputconfig/impl/InputConfigImpl.java | 18 ++++
.../inputconfig/impl/InputDescriptorImpl.java | 101 +++++++++++++++++++
.../impl/InputFileBaseDescriptorImpl.java | 27 +++++
.../impl/InputS3FileDescriptorImpl.java | 16 +++
.../impl/MapAnonymizeDescriptorImpl.java | 21 +++-
.../inputconfig/impl/MapDateDescriptorImpl.java | 20 +++-
.../impl/MapFieldCopyDescriptorImpl.java | 14 ++-
.../impl/MapFieldDescriptorImpl.java | 33 ++++++
.../impl/MapFieldNameDescriptorImpl.java | 14 ++-
.../impl/MapFieldValueDescriptorImpl.java | 20 +++-
.../inputconfig/impl/PostMapValuesAdapter.java | 2 +-
.../ambari-logsearch-logfeeder/docs/filter.md | 4 +-
.../ambari-logsearch-logfeeder/docs/input.md | 10 +-
.../docs/postMapValues.md | 2 +-
.../ambari/logfeeder/filter/FilterJSONTest.java | 12 ++-
.../common/ShipperConfigDescriptionStorage.java | 67 ++++++++++++
.../ambari/logsearch/doc/DocConstants.java | 1 +
.../ambari/logsearch/manager/InfoManager.java | 9 ++
.../response/ShipperConfigDescriptionData.java | 52 ++++++++++
.../ambari/logsearch/rest/InfoResource.java | 10 ++
27 files changed, 667 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigElementDescription.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigElementDescription.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigElementDescription.java
new file mode 100644
index 0000000..d65bf8e
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigElementDescription.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.config.api;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Marker for the shipper configuration properties.
+ * Can be used to generate documentation about the shipper configs.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.FIELD})
+public @interface ShipperConfigElementDescription {
+
+ /**
+ * The path of the json element.
+ */
+ String path();
+
+ /**
+ * The type of the json element.
+ */
+ String type();
+
+ /**
+ * Describe what the json element is used for.
+ */
+ String description();
+
+ /**
+ * An example value for the element, if applicable.
+ */
+ String[] examples() default {};
+
+ /**
+ * Default value of the json element, if applicable.
+ */
+ String defaultValue() default "";
+
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigTypeDescription.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigTypeDescription.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigTypeDescription.java
new file mode 100644
index 0000000..1c112d8
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigTypeDescription.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.config.api;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Marker for the shipper configuration types.
+ * Can be used to generate documentation about the shipper configs.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.TYPE})
+public @interface ShipperConfigTypeDescription {
+
+ /**
+ * The name of the element type.
+ */
+ String name();
+
+ /**
+ * The description of the json element.
+ */
+ String description();
+
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
index 8bbff8f..2ba472c 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
@@ -19,11 +19,24 @@
package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.Conditions;
import com.google.gson.annotations.Expose;
+@ShipperConfigTypeDescription(
+ name = "Conditions",
+ description = "Describes the conditions that should be met in order to match a filter to an input element.\n" +
+ "\n" +
+ "It has the following attributes:"
+)
public class ConditionsImpl implements Conditions {
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/conditions/fields",
+ type = "json object",
+ description = "The fields in the input element of which's value should be met."
+ )
@Expose
private FieldsImpl fields;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
index 68cd0e2..32a0348 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
@@ -21,11 +21,25 @@ package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
import java.util.Set;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.Fields;
import com.google.gson.annotations.Expose;
+@ShipperConfigTypeDescription(
+ name = "Fields",
+ description = "Describes a the fields which's value should be met in order to match a filter to an input element.\n" +
+ "\n" +
+ "It has the following attributes:"
+ )
public class FieldsImpl implements Fields {
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/conditions/fields/type",
+ type = "list of strings",
+ description = "The acceptable values for the type field in the input element.",
+ examples = {"ambari_server", "\"spark_jobhistory_server\", \"spark_thriftserver\", \"livy_server\""}
+ )
@Expose
private Set<String> type;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
index 4e11715..eb9d38c 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
@@ -22,35 +22,86 @@ package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
import java.util.List;
import java.util.Map;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
+@ShipperConfigTypeDescription(
+ name = "Filter",
+ description = "The filter element in the [input configuration](inputConfig.md) contains a list of filter descriptions, each describing one filter applied on an input.\n" +
+ "\n" +
+ "The general elements in the json are the following:"
+)
public abstract class FilterDescriptorImpl implements FilterDescriptor {
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/filter",
+ type = "string",
+ description = "The type of the filter.",
+ examples = {"grok", "keyvalue", "json"}
+ )
@Expose
private String filter;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/conditions",
+ type = "json object",
+ description = "The conditions of which input to filter."
+ )
@Expose
private ConditionsImpl conditions;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/sort_order",
+ type = "integer",
+ description = "Describes the order in which the filters should be applied.",
+ examples = {"1", "3"}
+ )
@Expose
@SerializedName("sort_order")
private Integer sortOrder;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/source_field",
+ type = "integer",
+ description = "The source of the filter, must be set for keyvalue filters.",
+ examples = {"field_further_to_filter"},
+ defaultValue = "log_message"
+ )
@Expose
@SerializedName("source_field")
private String sourceField;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/remove_source_field",
+ type = "boolean",
+ description = "Remove the source field after the filter is applied.",
+ examples = {"true", "false"},
+ defaultValue = "false"
+ )
@Expose
@SerializedName("remove_source_field")
private Boolean removeSourceField;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/post_map_values",
+ type = "dictionary string to list of json objects",
+ description = "Mappings done after the filtering provided it's result."
+ )
@Expose
@SerializedName("post_map_values")
private Map<String, List<PostMapValuesImpl>> postMapValues;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/is_enabled",
+ type = "boolean",
+ description = "A flag to show if the filter should be used.",
+ examples = {"true", "false"},
+ defaultValue = "true"
+ )
@Expose
@SerializedName("is_enabled")
private Boolean isEnabled;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
index 995f76b..e140df0 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
@@ -19,20 +19,44 @@
package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
+@ShipperConfigTypeDescription(
+ name = "Grok Filter",
+ description = "Grok filters have the following additional parameters:"
+)
public class FilterGrokDescriptorImpl extends FilterDescriptorImpl implements FilterGrokDescriptor {
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/log4j_format",
+ type = "string",
+ description = "The log4j pattern of the log, not used, it is only there for documentation.",
+ examples = {"%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n"}
+ )
@Expose
@SerializedName("log4j_format")
private String log4jFormat;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/multiline_pattern",
+ type = "string",
+ description = "The grok pattern that shows that the line is not a log line on it's own but the part of a multi line entry.",
+ examples = {"^(%{TIMESTAMP_ISO8601:logtime})"}
+ )
@Expose
@SerializedName("multiline_pattern")
private String multilinePattern;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/message_pattern",
+ type = "string",
+ description = "The grok pattern to use to parse the log entry.",
+ examples = {"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}-%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\@%{INT:line_number}\\]%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}"}
+ )
@Expose
@SerializedName("message_pattern")
private String messagePattern;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
index 8e89990..1c782c5 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
@@ -19,20 +19,48 @@
package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
+@ShipperConfigTypeDescription(
+ name = "Key-value Filter",
+ description = "value_borders is only used if it is specified, and value_split is not.\n" +
+ "\n" +
+ "Key-value filters have the following additional parameters:"
+)
public class FilterKeyValueDescriptorImpl extends FilterDescriptorImpl implements FilterKeyValueDescriptor {
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/field_split",
+ type = "string",
+ description = "The string that splits the key-value pairs.",
+ examples = {" ", ","},
+ defaultValue = "\\t"
+ )
@Expose
@SerializedName("field_split")
private String fieldSplit;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/value_split",
+ type = "string",
+ description = "The string that separates keys from values.",
+ examples = {":", "->"},
+ defaultValue = "="
+ )
@Expose
@SerializedName("value_split")
private String valueSplit;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/value_borders",
+ type = "string",
+ description = "The borders around the value, must be 2 characters long, first before it, second after it.",
+ examples = {"()", "[]", "{}"}
+ )
@Expose
@SerializedName("value_borders")
private String valueBorders;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
index a4eba8e..6ce634f 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
@@ -21,16 +21,34 @@ package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
import java.util.List;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
import com.google.gson.annotations.Expose;
+@ShipperConfigTypeDescription(
+ name = "Input Config",
+ description = "The input configurations are stored in json files. Each of them are describing the processing of the log files of a service.\n" +
+ "\n" +
+ "The json contains two elements:"
+)
public class InputConfigImpl implements InputConfig {
+ @ShipperConfigElementDescription(
+ path = "/input",
+ type = "list of json objects",
+ description = "A list of input descriptions"
+ )
@Expose
private List<InputDescriptorImpl> input;
+ @ShipperConfigElementDescription(
+ path = "/filter",
+ type = "list of json objects",
+ description = "A list of filter descriptions"
+ )
@Expose
private List<FilterDescriptorImpl> filter;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
index 54b4b9b..cec16c8 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
@@ -21,59 +21,160 @@ package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
import java.util.Map;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
+@ShipperConfigTypeDescription(
+ name = "Input",
+ description = "The input element in the input configuration contains a list of input descriptions, each describing one source of input.\n" +
+ "\n" +
+ "The general elements in the json are the following:"
+)
public abstract class InputDescriptorImpl implements InputDescriptor {
+ @ShipperConfigElementDescription(
+ path = "/input/[]/type",
+ type = "string",
+ description = "The log id for this source.",
+ examples = {"zookeeper", "ambari_server"}
+ )
@Expose
private String type;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/rowtype",
+ type = "string",
+ description = "The type of the row.",
+ examples = {"service", "audit"}
+ )
@Expose
private String rowtype;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/path",
+ type = "string",
+ description = "The path of the source, may contain '*' characters too.",
+ examples = {"/var/log/ambari-logsearch-logfeeder/logsearch-logfeeder.json", "/var/log/zookeeper/zookeeper*.log"}
+ )
@Expose
private String path;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/add_fields",
+ type = "dictionary",
+ description = "The element contains field_name: field_value pairs which will be added to each rows data.",
+ examples = {"\"cluster\":\"cluster_name\""}
+ )
@Expose
@SerializedName("add_fields")
private Map<String, String> addFields;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/source",
+ type = "dictionary",
+ description = "The type of the input source.",
+ examples = {"file", "s3_file"}
+ )
@Expose
private String source;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/tail",
+ type = "boolean",
+ description = "The input should check for only the latest file matching the pattern, not all of them.",
+ examples = {"true", "false"},
+ defaultValue = "true"
+ )
@Expose
private Boolean tail;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/gen_event_md5",
+ type = "boolean",
+ description = "Generate an event_md5 field for each row by creating a hash of the row data.",
+ examples = {"true", "false"},
+ defaultValue = "true"
+ )
@Expose
@SerializedName("gen_event_md5")
private Boolean genEventMd5;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/use_event_md5_as_id",
+ type = "boolean",
+ description = "Generate an id for each row by creating a hash of the row data.",
+ examples = {"true", "false"},
+ defaultValue = "false"
+ )
@Expose
@SerializedName("use_event_md5_as_id")
private Boolean useEventMd5AsId;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/cache_enabled",
+ type = "boolean",
+ description = "Allows the input to use a cache to filter out duplications.",
+ examples = {"true", "false"},
+ defaultValue = "false"
+ )
@Expose
@SerializedName("cache_enabled")
private Boolean cacheEnabled;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/cache_key_field",
+ type = "string",
+ description = "Specifies the field for which to use the cache to find duplications of.",
+ examples = {"some_field_prone_to_repeating_value"},
+ defaultValue = "log_message"
+ )
@Expose
@SerializedName("cache_key_field")
private String cacheKeyField;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/cache_last_dedup_enabled",
+ type = "boolean",
+ description = "Allow to filter out entries which are same as the most recent one irrelevant of it's time.",
+ examples = {"true", "false"},
+ defaultValue = "false"
+ )
@Expose
@SerializedName("cache_last_dedup_enabled")
private Boolean cacheLastDedupEnabled;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/cache_size",
+ type = "integer",
+ description = "The number of entries to store in the cache.",
+ examples = {"50"},
+ defaultValue = "100"
+ )
@Expose
@SerializedName("cache_size")
private Integer cacheSize;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/cache_dedup_interval",
+ type = "integer",
+ description = "The maximum interval in ms which may pass between two identical log messages to filter the latter out.",
+ examples = {"500"},
+ defaultValue = "1000"
+ )
@Expose
@SerializedName("cache_dedup_interval")
private Long cacheDedupInterval;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/is_enabled",
+ type = "boolean",
+ description = "A flag to show if the input should be used.",
+ examples = {"true", "false"},
+ defaultValue = "true"
+ )
@Expose
@SerializedName("is_enabled")
private Boolean isEnabled;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
index 51c7ec8..8281daa 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
@@ -19,20 +19,47 @@
package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileBaseDescriptor;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
+@ShipperConfigTypeDescription(
+ name = "File Input",
+ description = "File inputs have some additional parameters:"
+)
public class InputFileBaseDescriptorImpl extends InputDescriptorImpl implements InputFileBaseDescriptor {
+ @ShipperConfigElementDescription(
+ path = "/input/[]/checkpoint_interval_ms",
+ type = "integer",
+ description = "The time interval in ms when the checkpoint file should be updated.",
+ examples = {"10000"},
+ defaultValue = "5000"
+ )
@Expose
@SerializedName("checkpoint_interval_ms")
private Integer checkpointIntervalMs;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/process_file",
+ type = "boolean",
+ description = "Should the file be processed.",
+ examples = {"true", "false"},
+ defaultValue = "true"
+ )
@Expose
@SerializedName("process_file")
private Boolean processFile;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/copy_file",
+ type = "boolean",
+ description = "Should the file be copied (only if not processed).",
+ examples = {"true", "false"},
+ defaultValue = "false"
+ )
@Expose
@SerializedName("copy_file")
private Boolean copyFile;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
index 277a57c..19f52d3 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
@@ -19,16 +19,32 @@
package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
+@ShipperConfigTypeDescription(
+ name = "S3 File Input",
+ description = "S3 file inputs have the following parameters in addition to the general file parameters:"
+)
public class InputS3FileDescriptorImpl extends InputFileBaseDescriptorImpl implements InputS3FileDescriptor {
+ @ShipperConfigElementDescription(
+ path = "/input/[]/s3_access_key",
+ type = "string",
+ description = "The access key used for AWS credentials."
+ )
@Expose
@SerializedName("s3_access_key")
private String s3AccessKey;
+ @ShipperConfigElementDescription(
+ path = "/input/[]/s3_secret_key",
+ type = "string",
+ description = "The secret key used for AWS credentials."
+ )
@Expose
@SerializedName("s3_secret_key")
private String s3SecretKey;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java
index 5fdbbab..8c128de 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java
@@ -19,20 +19,39 @@
package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.MapAnonymizeDescriptor;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
-public class MapAnonymizeDescriptorImpl implements MapAnonymizeDescriptor {
+@ShipperConfigTypeDescription(
+ name = "Map Anonymize",
+ description = "The name of the mapping element should be map_anonymize. The value json element should contain the following parameter:"
+)
+public class MapAnonymizeDescriptorImpl extends MapFieldDescriptorImpl implements MapAnonymizeDescriptor {
@Override
public String getJsonName() {
return "map_anonymize";
}
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/post_map_values/{field_name}/[]/map_anonymize/pattern",
+ type = "string",
+ description = "The pattern to use to identify parts to anonymize. The parts to hide should be marked with the \"<hide>\" string.",
+ examples = {"Some secret is here: <hide>, and another one is here: <hide>"}
+ )
@Expose
private String pattern;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/post_map_values/{field_name}/[]/map_anonymize/hide_char",
+ type = "string",
+ description = "The character to hide with",
+ defaultValue = "*",
+ examples = {"X", "-"}
+ )
@Expose
@SerializedName("hide_char")
private Character hideChar;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
index 2e54e7a..feec4b6 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
@@ -19,21 +19,39 @@
package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.MapDateDescriptor;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
-public class MapDateDescriptorImpl implements MapDateDescriptor {
+@ShipperConfigTypeDescription(
+ name = "Map Date",
+ description = "The name of the mapping element should be map_date. The value json element may contain the following parameters:"
+)
+public class MapDateDescriptorImpl extends MapFieldDescriptorImpl implements MapDateDescriptor {
@Override
public String getJsonName() {
return "map_date";
}
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/post_map_values/{field_name}/[]/map_date/src_date_pattern",
+ type = "string",
+ description = "If it is specified than the mapper converts from this format to the target, and also adds missing year",
+ examples = {"MMM dd HH:mm:ss"}
+ )
@Expose
@SerializedName("src_date_pattern")
private String sourceDatePattern;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/post_map_values/{field_name}/[]/map_date/target_date_pattern",
+ type = "string",
+ description = "If 'epoch' then the field is parsed as seconds from 1970, otherwise the content used as pattern",
+ examples = {"yyyy-MM-dd HH:mm:ss,SSS", "epoch"}
+ )
@Expose
@SerializedName("target_date_pattern")
private String targetDatePattern;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
index 4a8d746..e7b8fdf 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
@@ -19,17 +19,29 @@
package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldCopyDescriptor;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
-public class MapFieldCopyDescriptorImpl implements MapFieldCopyDescriptor {
+@ShipperConfigTypeDescription(
+ name = "Map Copy",
+ description = "The name of the mapping element should be map_copy. The value json element should contain the following parameter:"
+)
+public class MapFieldCopyDescriptorImpl extends MapFieldDescriptorImpl implements MapFieldCopyDescriptor {
@Override
public String getJsonName() {
return "map_fieldcopy";
}
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/post_map_values/{field_name}/[]/map_copy/copy_name",
+ type = "string",
+ description = "The name of the copied field",
+ examples = {"new_name"}
+ )
@Expose
@SerializedName("copy_name")
private String copyName;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldDescriptorImpl.java
new file mode 100644
index 0000000..101e0d4
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldDescriptorImpl.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+
+@ShipperConfigTypeDescription(
+ name = "Post Map Values",
+ description = "The Post Map Values element in the [filter](filter.md) field names as keys, the values are lists of sets of " +
+ "post map values, each describing one mapping done on a field named before obtained after filtering.\n" +
+ "\n" +
+ "Currently there are four kind of mappings are supported:"
+ )
+public abstract class MapFieldDescriptorImpl implements MapFieldDescriptor {
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
index bd32018..e1b71e6 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
@@ -19,17 +19,29 @@
package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldNameDescriptor;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
-public class MapFieldNameDescriptorImpl implements MapFieldNameDescriptor {
+@ShipperConfigTypeDescription(
+ name = "Map Field Name",
+ description = "The name of the mapping element should be map_fieldname. The value json element should contain the following parameter:"
+)
+public class MapFieldNameDescriptorImpl extends MapFieldDescriptorImpl implements MapFieldNameDescriptor {
@Override
public String getJsonName() {
return "map_fieldname";
}
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/post_map_values/{field_name}/[]/map_fieldname/new_field_name",
+ type = "string",
+ description = "The name of the renamed field",
+ examples = {"new_name"}
+ )
@Expose
@SerializedName("new_field_name")
private String newFieldName;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
index 599e152..a80a994 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
@@ -19,21 +19,39 @@
package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldValueDescriptor;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
-public class MapFieldValueDescriptorImpl implements MapFieldValueDescriptor {
+@ShipperConfigTypeDescription(
+ name = "Map Field Value",
+ description = "The name of the mapping element should be map_fieldvalue. The value json element should contain the following parameter:"
+)
+public class MapFieldValueDescriptorImpl extends MapFieldDescriptorImpl implements MapFieldValueDescriptor {
@Override
public String getJsonName() {
return "map_fieldvalue";
}
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/post_map_values/{field_name}/[]/map_fieldvalue/pre_value",
+ type = "string",
+ description = "The value that the field must match (ignoring case) to be mapped",
+ examples = {"old_value"}
+ )
@Expose
@SerializedName("pre_value")
private String preValue;
+ @ShipperConfigElementDescription(
+ path = "/filter/[]/post_map_values/{field_name}/[]/map_fieldvalue/post_value",
+ type = "string",
+ description = "The value to which the field is modified to",
+ examples = {"new_value"}
+ )
@Expose
@SerializedName("post_value")
private String postValue;
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
index 3c21fd8..e3f9886 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
@@ -95,7 +95,7 @@ public class PostMapValuesAdapter implements JsonDeserializer<List<PostMapValues
private JsonElement createMapperObject(PostMapValuesImpl postMapValues, JsonSerializationContext context) {
JsonObject jsonObject = new JsonObject();
for (MapFieldDescriptor m : postMapValues.getMappers()) {
- jsonObject.add(((MapFieldDescriptor)m).getJsonName(), context.serialize(m));
+ jsonObject.add(((MapFieldDescriptorImpl)m).getJsonName(), context.serialize(m));
}
return jsonObject;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md b/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md
index 129279b..d825290 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md
@@ -48,6 +48,8 @@ Grok filters have the following additional parameters:
## Key-value Filter
+value\_borders is only used if it is specified, and value\_split is not.
+
Key-value filters have the following additional parameters:
| Field | Description | Default |
@@ -56,4 +58,4 @@ Key-value filters have the following additional parameters:
| value\_split | The string that separates keys from values | "=" |
| value\_borders | The borders around the value, must be 2 characters long, first before it, second after it | - |
-If value\_borders is only used if it is specified, and value\_split is not.
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md b/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md
index 661eeb8..1a9ce8d 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md
@@ -20,20 +20,18 @@ limitations under the License.
# Input
The input element in the [input configuration](inputConfig.md) contains a list of input descriptions, each describing one source
-of input.
-
-The general elements in the json are the following:
+of input. The general elements in the json are the following:
| Field | Description | Default |
|-----------------------------|-------------------------------------------------------------------------------------------------------|--------------|
-| type | The type of the input source, currently file and s3_file are supported | - |
+| type | The log id for this source | - |
| rowtype | The type of the row, can be service / audit | - |
| path | The path of the source, may contain '*' characters too | - |
| add\_fields | The element contains field\_name: field\_value pairs which will be added to each rows data | - |
+| source | The type of the input source, currently file and s3_file are supported | - |
| tail | The input should check for only the latest file matching the pattern, not all of them | true |
| gen\_event\_md5 | Generate an event\_md5 field for each row by creating a hash of the row data | true |
| use\_event\_md5\_as\_id | Generate an id for each row by creating a hash of the row data | false |
-| start\_position | Should the parsing start from the beginning | beginning |
| cache\_enabled | Allows the input to use a cache to filter out duplications | true |
| cache\_key\_field | Specifies the field for which to use the cache to find duplications of | log\_message |
| cache\_last\_dedup\_enabled | Allow to filter out entries which are same as the most recent one irrelevant of it's time | false |
@@ -44,7 +42,7 @@ The general elements in the json are the following:
## File Input
-File inputs have the following parameters too:
+File inputs have some additional parameters:
| Field | Description | Default |
|--------------------------|--------------------------------------------------------------------|---------|
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md b/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md
index 7ec439a..bc219df 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md
@@ -67,4 +67,4 @@ The name of the mapping element should be map\_anonymize. The value json element
| Field | Description |
|------------|-----------------------------------------------------------------------------------------------------------------|
| pattern | The pattern to use to identify parts to anonymize. The parts to hide should be marked with the "<hide>" string. |
-| hide\_char | The character to hide with, if it is not specified then the default is 'X' |
+| hide\_char | The character to hide with, if it is not specified then the default is '*' |
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
index 7abf177..acc3d4d 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
@@ -131,13 +131,15 @@ public class FilterJSONTest {
@Test
public void testJSONFilterCode_invalidJson() throws Exception {
LOG.info("testJSONFilterCode_invalidJson()");
+
init(new FilterJsonDescriptorImpl());
- String inputStr="invalid json";
+
+ String inputStr = "invalid json";
try{
- filterJson.apply(inputStr,new InputMarker(null, null, 0));
- fail("Expected LogFeederException was not occured");
- }catch(LogFeederException logFeederException){
- assertEquals("Json parsing failed for inputstr = "+inputStr, logFeederException.getLocalizedMessage());
+ filterJson.apply(inputStr,new InputMarker(null, null, 0));
+ fail("Expected LogFeederException was not occured");
+ } catch(LogFeederException logFeederException) {
+ assertEquals("Json parsing failed for inputstr = " + inputStr, logFeederException.getLocalizedMessage());
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/ShipperConfigDescriptionStorage.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/ShipperConfigDescriptionStorage.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/ShipperConfigDescriptionStorage.java
new file mode 100644
index 0000000..7d4bc2c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/ShipperConfigDescriptionStorage.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.common;
+
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.model.response.ShipperConfigDescriptionData;
+import org.reflections.Reflections;
+import org.reflections.scanners.FieldAnnotationsScanner;
+
+import javax.annotation.PostConstruct;
+import javax.inject.Named;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+@Named
+public class ShipperConfigDescriptionStorage {
+
+ private static final String SHIPPER_CONFIG_PACKAGE = "org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl";
+
+ private final List<ShipperConfigDescriptionData> shipperConfigDescription = new ArrayList<>();
+
+ @PostConstruct
+ public void postConstruct() {
+ Thread loadShipperConfigDescriptionThread = new Thread("load_shipper_config_description") {
+ @Override
+ public void run() {
+ fillShipperConfigDescriptions();
+ }
+ };
+ loadShipperConfigDescriptionThread.setDaemon(true);
+ loadShipperConfigDescriptionThread.start();
+ }
+
+ public List<ShipperConfigDescriptionData> getShipperConfigDescription() {
+ return shipperConfigDescription;
+ }
+
+ private void fillShipperConfigDescriptions() {
+ Reflections reflections = new Reflections(SHIPPER_CONFIG_PACKAGE, new FieldAnnotationsScanner());
+ Set<Field> fields = reflections.getFieldsAnnotatedWith(ShipperConfigElementDescription.class);
+ for (Field field : fields) {
+ ShipperConfigElementDescription description = field.getAnnotation(ShipperConfigElementDescription.class);
+ shipperConfigDescription.add(new ShipperConfigDescriptionData(description.path(), description.description(),
+ description.examples(), description.defaultValue()));
+ }
+
+ shipperConfigDescription.sort((o1, o2) -> o1.getPath().compareTo(o2.getPath()));
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
index 6d1382d..da0a8bb 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
@@ -104,6 +104,7 @@ public class DocConstants {
public static final String GET_AUTH_DETAILS_OD = "Get authentication details.";
public static final String GET_ALL_PROPERTIES_INFO_OD = "List all available properties for Log Search and Log Feeder";
public static final String GET_LOGSEARCH_PROPERTIES_INFO_OD = "List all available properties for Log Search property file (e.g: logsearch.properties/logfeeder.properties)";
+ public static final String GET_ALL_SHIPPER_CONFIG_INFO_OD = "List all available shipper configuration element";
}
public class EventHistoryDescriptions {
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java
index f6d0449..2f63492 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java
@@ -25,7 +25,9 @@ import java.util.Map;
import org.apache.ambari.logsearch.conf.AuthPropsConfig;
import org.apache.ambari.logsearch.common.PropertyDescriptionStorage;
+import org.apache.ambari.logsearch.common.ShipperConfigDescriptionStorage;
import org.apache.ambari.logsearch.model.response.PropertyDescriptionData;
+import org.apache.ambari.logsearch.model.response.ShipperConfigDescriptionData;
import javax.inject.Inject;
import javax.inject.Named;
@@ -39,6 +41,9 @@ public class InfoManager extends JsonManagerBase {
@Inject
private PropertyDescriptionStorage propertyDescriptionStore;
+ @Inject
+ private ShipperConfigDescriptionStorage shipperConfigDescriptionStore;
+
public Map<String, Boolean> getAuthMap() {
Map<String, Boolean> authMap = new HashMap<>();
authMap.put("external", authPropsConfig.isAuthExternalEnabled());
@@ -56,4 +61,8 @@ public class InfoManager extends JsonManagerBase {
public List<PropertyDescriptionData> getLogSearchPropertyDescriptions(String propertiesFile) {
return getPropertyDescriptions().get(propertiesFile);
}
+
+ public List<ShipperConfigDescriptionData> getLogSearchShipperConfigDescription() {
+ return shipperConfigDescriptionStore.getShipperConfigDescription();
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/response/ShipperConfigDescriptionData.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/response/ShipperConfigDescriptionData.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/response/ShipperConfigDescriptionData.java
new file mode 100644
index 0000000..91f7420
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/response/ShipperConfigDescriptionData.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.model.response;
+
+public class ShipperConfigDescriptionData {
+ private final String path;
+
+ private final String description;
+
+ private final String[] examples;
+
+ private final String defaultValue;
+
+ public ShipperConfigDescriptionData(String path, String description, String[] examples, String defaultValue) {
+ this.path = path;
+ this.description = description;
+ this.examples = examples;
+ this.defaultValue = defaultValue;
+ }
+
+ public String getPath() {
+ return path;
+ }
+
+ public String getDescription() {
+ return description;
+ }
+
+ public String[] getExamples() {
+ return examples;
+ }
+
+ public String getDefaultValue() {
+ return defaultValue;
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java
index 6ea0bab..e49be90 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java
@@ -29,12 +29,14 @@ import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.apache.ambari.logsearch.manager.InfoManager;
import org.apache.ambari.logsearch.model.response.PropertyDescriptionData;
+import org.apache.ambari.logsearch.model.response.ShipperConfigDescriptionData;
import org.springframework.context.annotation.Scope;
import java.util.List;
import java.util.Map;
import static org.apache.ambari.logsearch.doc.DocConstants.PublicOperationDescriptions.GET_ALL_PROPERTIES_INFO_OD;
+import static org.apache.ambari.logsearch.doc.DocConstants.PublicOperationDescriptions.GET_ALL_SHIPPER_CONFIG_INFO_OD;
import static org.apache.ambari.logsearch.doc.DocConstants.PublicOperationDescriptions.GET_LOGSEARCH_PROPERTIES_INFO_OD;
import static org.apache.ambari.logsearch.doc.DocConstants.PublicOperationDescriptions.GET_AUTH_DETAILS_OD;
@@ -70,4 +72,12 @@ public class InfoResource {
public List<PropertyDescriptionData> getPropertyFileDescription(@PathParam("propertyFile") String propertyFile) {
return infoManager.getLogSearchPropertyDescriptions(propertyFile);
}
+
+ @GET
+ @Path("/shipperconfig")
+ @Produces({"application/json"})
+ @ApiOperation(GET_ALL_SHIPPER_CONFIG_INFO_OD)
+ public List<ShipperConfigDescriptionData> getShipperConfigDescription() {
+ return infoManager.getLogSearchShipperConfigDescription();
+ }
}
[06/18] ambari git commit: AMBARI-21430 - Allow Multiple Versions of
Stack Tools to Co-Exist (jonathanhurley)
Posted by rl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
index 4e7d857..bcadd03 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
@@ -1,873 +1,873 @@
{
"localComponents": [
- "SECONDARY_NAMENODE",
- "HDFS_CLIENT",
- "DATANODE",
- "NAMENODE",
- "RANGER_ADMIN",
- "RANGER_TAGSYNC",
- "RANGER_USERSYNC",
- "ZOOKEEPER_SERVER",
- "ZOOKEEPER_CLIENT",
- "KERBEROS_CLIENT",
+ "SECONDARY_NAMENODE",
+ "HDFS_CLIENT",
+ "DATANODE",
+ "NAMENODE",
+ "RANGER_ADMIN",
+ "RANGER_TAGSYNC",
+ "RANGER_USERSYNC",
+ "ZOOKEEPER_SERVER",
+ "ZOOKEEPER_CLIENT",
+ "KERBEROS_CLIENT",
"RANGER_KMS_SERVER"
- ],
+ ],
"configuration_attributes": {
- "ranger-kms-site": {},
- "ranger-hdfs-audit": {},
- "ssl-client": {},
- "ranger-admin-site": {},
- "kms-log4j": {},
- "ranger-hdfs-policymgr-ssl": {},
- "tagsync-application-properties": {},
- "ranger-env": {},
- "ranger-ugsync-site": {},
- "ranger-hdfs-plugin-properties": {},
- "ranger-kms-security": {},
- "kerberos-env": {},
- "kms-properties": {},
- "admin-properties": {},
- "ranger-kms-policymgr-ssl": {},
+ "ranger-kms-site": {},
+ "ranger-hdfs-audit": {},
+ "ssl-client": {},
+ "ranger-admin-site": {},
+ "kms-log4j": {},
+ "ranger-hdfs-policymgr-ssl": {},
+ "tagsync-application-properties": {},
+ "ranger-env": {},
+ "ranger-ugsync-site": {},
+ "ranger-hdfs-plugin-properties": {},
+ "ranger-kms-security": {},
+ "kerberos-env": {},
+ "kms-properties": {},
+ "admin-properties": {},
+ "ranger-kms-policymgr-ssl": {},
"hdfs-site": {
"final": {
- "dfs.datanode.data.dir": "true",
- "dfs.namenode.http-address": "true",
- "dfs.datanode.failed.volumes.tolerated": "true",
- "dfs.support.append": "true",
- "dfs.namenode.name.dir": "true",
+ "dfs.datanode.data.dir": "true",
+ "dfs.namenode.http-address": "true",
+ "dfs.datanode.failed.volumes.tolerated": "true",
+ "dfs.support.append": "true",
+ "dfs.namenode.name.dir": "true",
"dfs.webhdfs.enabled": "true"
}
- },
- "ranger-tagsync-site": {},
- "tagsync-log4j": {},
- "ranger-kms-audit": {},
- "hadoop-policy": {},
- "hdfs-log4j": {},
- "usersync-log4j": {},
- "krb5-conf": {},
- "kms-site": {},
+ },
+ "ranger-tagsync-site": {},
+ "tagsync-log4j": {},
+ "ranger-kms-audit": {},
+ "hadoop-policy": {},
+ "hdfs-log4j": {},
+ "usersync-log4j": {},
+ "krb5-conf": {},
+ "kms-site": {},
"core-site": {
"final": {
"fs.defaultFS": "true"
}
- },
- "hadoop-env": {},
- "zookeeper-log4j": {},
- "ssl-server": {},
- "ranger-site": {},
- "zookeeper-env": {},
- "admin-log4j": {},
- "zoo.cfg": {},
- "ranger-hdfs-security": {},
- "usersync-properties": {},
- "kms-env": {},
- "dbks-site": {},
+ },
+ "hadoop-env": {},
+ "zookeeper-log4j": {},
+ "ssl-server": {},
+ "ranger-site": {},
+ "zookeeper-env": {},
+ "admin-log4j": {},
+ "zoo.cfg": {},
+ "ranger-hdfs-security": {},
+ "usersync-properties": {},
+ "kms-env": {},
+ "dbks-site": {},
"cluster-env": {}
- },
- "public_hostname": "c6401.ambari.apache.org",
- "commandId": "43-0",
- "hostname": "c6401.ambari.apache.org",
- "kerberosCommandParams": [],
- "serviceName": "RANGER_KMS",
- "role": "RANGER_KMS_SERVER",
- "forceRefreshConfigTagsBeforeExecution": [],
- "requestId": 43,
+ },
+ "public_hostname": "c6401.ambari.apache.org",
+ "commandId": "43-0",
+ "hostname": "c6401.ambari.apache.org",
+ "kerberosCommandParams": [],
+ "serviceName": "RANGER_KMS",
+ "role": "RANGER_KMS_SERVER",
+ "forceRefreshConfigTagsBeforeExecution": [],
+ "requestId": 43,
"agentConfigParams": {
"agent": {
"parallel_execution": 0
}
- },
- "clusterName": "c1",
- "commandType": "EXECUTION_COMMAND",
- "taskId": 200,
- "roleParams": {},
+ },
+ "clusterName": "c1",
+ "commandType": "EXECUTION_COMMAND",
+ "taskId": 200,
+ "roleParams": {},
"configurationTags": {
"ranger-kms-site": {
"tag": "version1467026737262"
- },
+ },
"ranger-hdfs-audit": {
"tag": "version1466705299922"
- },
+ },
"ssl-client": {
"tag": "version1"
- },
+ },
"ranger-admin-site": {
"tag": "version1467016680635"
- },
+ },
"kms-log4j": {
"tag": "version1467026737262"
- },
+ },
"ranger-hdfs-policymgr-ssl": {
"tag": "version1466705299922"
- },
+ },
"tagsync-application-properties": {
"tag": "version1467016680511"
- },
+ },
"ranger-env": {
"tag": "version1466705299949"
- },
+ },
"ranger-ugsync-site": {
"tag": "version1467016680537"
- },
+ },
"ranger-hdfs-plugin-properties": {
"tag": "version1466705299922"
- },
+ },
"ranger-kms-security": {
"tag": "version1467026737262"
- },
+ },
"kerberos-env": {
"tag": "version1467016537243"
- },
+ },
"admin-log4j": {
"tag": "version1466705299949"
- },
+ },
"admin-properties": {
"tag": "version1466705299949"
- },
+ },
"ranger-kms-policymgr-ssl": {
"tag": "version1467026737262"
- },
+ },
"hdfs-site": {
"tag": "version1467016680401"
- },
+ },
"ranger-tagsync-site": {
"tag": "version1467016680586"
- },
+ },
"zoo.cfg": {
"tag": "version1"
- },
+ },
"ranger-kms-audit": {
"tag": "version1467026737262"
- },
+ },
"hadoop-policy": {
"tag": "version1"
- },
+ },
"hdfs-log4j": {
"tag": "version1"
- },
+ },
"usersync-log4j": {
"tag": "version1466705299949"
- },
+ },
"krb5-conf": {
"tag": "version1467016537243"
- },
+ },
"kms-site": {
"tag": "version1467026751210"
- },
+ },
"core-site": {
"tag": "version1467026751256"
- },
+ },
"hadoop-env": {
"tag": "version1467016680446"
- },
+ },
"zookeeper-log4j": {
"tag": "version1"
- },
+ },
"ssl-server": {
"tag": "version1"
- },
+ },
"ranger-site": {
"tag": "version1466705299949"
- },
+ },
"zookeeper-env": {
"tag": "version1467016680492"
- },
+ },
"kms-properties": {
"tag": "version1467026737262"
- },
+ },
"tagsync-log4j": {
"tag": "version1466705299949"
- },
+ },
"ranger-hdfs-security": {
"tag": "version1466705299922"
- },
+ },
"usersync-properties": {
"tag": "version1466705299949"
- },
+ },
"kms-env": {
"tag": "version1467026737262"
- },
+ },
"dbks-site": {
"tag": "version1467026751234"
- },
+ },
"cluster-env": {
"tag": "version1467016680567"
}
- },
- "roleCommand": "START",
+ },
+ "roleCommand": "START",
"hostLevelParams": {
- "agent_stack_retry_on_unavailability": "false",
- "stack_name": "HDP",
- "package_version": "2_5_0_0_*",
+ "agent_stack_retry_on_unavailability": "false",
+ "stack_name": "HDP",
+ "package_version": "2_5_0_0_*",
"custom_mysql_jdbc_name": "mysql-connector-java.jar",
"previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
- "host_sys_prepped": "false",
- "ambari_db_rca_username": "mapred",
- "current_version": "2.5.0.0-801",
- "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
- "agent_stack_retry_count": "5",
- "stack_version": "2.5",
- "jdk_name": "jdk-8u60-linux-x64.tar.gz",
- "ambari_db_rca_driver": "org.postgresql.Driver",
- "java_home": "/usr/jdk64/jdk1.7.0_45",
- "repository_version_id": "1",
- "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
- "not_managed_hdfs_path_list": "[\"/tmp\"]",
- "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
- "java_version": "8",
- "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
- "package_list": "[{\"name\":\"ranger_${stack_version}-kms\",\"condition\":\"\",\"skipUpgrade\":false}]",
- "db_name": "ambari",
- "group_list": "[\"kms\",\"ranger\",\"hadoop\",\"users\"]",
- "agentCacheDir": "/var/lib/ambari-agent/cache",
- "ambari_db_rca_password": "mapred",
- "jce_name": "UnlimitedJCEPolicyJDK7.zip",
- "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
- "db_driver_filename": "mysql-connector-java.jar",
- "user_list": "[\"kms\",\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
+ "host_sys_prepped": "false",
+ "ambari_db_rca_username": "mapred",
+ "current_version": "2.5.0.0-801",
+ "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+ "agent_stack_retry_count": "5",
+ "stack_version": "2.5",
+ "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+ "ambari_db_rca_driver": "org.postgresql.Driver",
+ "java_home": "/usr/jdk64/jdk1.7.0_45",
+ "repository_version_id": "1",
+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+ "not_managed_hdfs_path_list": "[\"/tmp\"]",
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+ "java_version": "8",
+ "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+ "package_list": "[{\"name\":\"ranger_${stack_version}-kms\",\"condition\":\"\",\"skipUpgrade\":false}]",
+ "db_name": "ambari",
+ "group_list": "[\"kms\",\"ranger\",\"hadoop\",\"users\"]",
+ "agentCacheDir": "/var/lib/ambari-agent/cache",
+ "ambari_db_rca_password": "mapred",
+ "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+ "db_driver_filename": "mysql-connector-java.jar",
+ "user_list": "[\"kms\",\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
"clientsToUpdateConfigs": "[\"*\"]"
- },
+ },
"commandParams": {
- "service_package_folder": "common-services/RANGER_KMS/0.5.0.2.3/package",
- "script": "scripts/kms_server.py",
- "hooks_folder": "HDP/2.0.6/hooks",
- "version": "2.5.0.0-801",
- "max_duration_for_retries": "0",
- "command_retry_enabled": "false",
- "command_timeout": "600",
+ "service_package_folder": "common-services/RANGER_KMS/0.5.0.2.3/package",
+ "script": "scripts/kms_server.py",
+ "hooks_folder": "HDP/2.0.6/hooks",
+ "version": "2.5.0.0-801",
+ "max_duration_for_retries": "0",
+ "command_retry_enabled": "false",
+ "command_timeout": "600",
"script_type": "PYTHON"
- },
- "forceRefreshConfigTags": [],
- "stageId": 0,
+ },
+ "forceRefreshConfigTags": [],
+ "stageId": 0,
"clusterHostInfo": {
"snamenode_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"ambari_server_use_ssl": [
"false"
- ],
+ ],
"all_ping_ports": [
"8670"
- ],
+ ],
"ranger_tagsync_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"ranger_kms_server_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"ranger_usersync_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"all_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"slave_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"namenode_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"ambari_server_port": [
"8080"
- ],
+ ],
"ranger_admin_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"all_racks": [
"/default-rack"
- ],
+ ],
"all_ipv4_ips": [
"172.22.83.73"
- ],
+ ],
"ambari_server_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"zookeeper_hosts": [
"c6401.ambari.apache.org"
]
- },
+ },
"configurations": {
"ranger-kms-site": {
- "ranger.service.https.port": "9393",
- "ranger.service.https.attrib.ssl.enabled": "false",
- "xa.webapp.dir": "./webapp",
- "ranger.service.host": "{{kms_host}}",
- "ranger.service.shutdown.port": "7085",
- "ranger.contextName": "/kms",
+ "ranger.service.https.port": "9393",
+ "ranger.service.https.attrib.ssl.enabled": "false",
+ "xa.webapp.dir": "./webapp",
+ "ranger.service.host": "{{kms_host}}",
+ "ranger.service.shutdown.port": "7085",
+ "ranger.contextName": "/kms",
"ranger.service.http.port": "{{kms_port}}"
- },
+ },
"ranger-hdfs-audit": {
"xasecure.audit.destination.solr.zookeepers": "NONE",
- "xasecure.audit.destination.solr.urls": "",
- "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+ "xasecure.audit.destination.solr.urls": "",
+ "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
"xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
- "xasecure.audit.destination.hdfs": "true",
- "xasecure.audit.destination.solr": "false",
- "xasecure.audit.provider.summary.enabled": "false",
- "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+ "xasecure.audit.destination.hdfs": "true",
+ "xasecure.audit.destination.solr": "false",
+ "xasecure.audit.provider.summary.enabled": "false",
+ "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
"xasecure.audit.is.enabled": "true"
- },
+ },
"ssl-client": {
- "ssl.client.truststore.reload.interval": "10000",
- "ssl.client.keystore.password": "bigdata",
- "ssl.client.truststore.type": "jks",
- "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
- "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
- "ssl.client.truststore.password": "bigdata",
+ "ssl.client.truststore.reload.interval": "10000",
+ "ssl.client.keystore.password": "bigdata",
+ "ssl.client.truststore.type": "jks",
+ "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+ "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+ "ssl.client.truststore.password": "bigdata",
"ssl.client.keystore.type": "jks"
- },
+ },
"ranger-admin-site": {
- "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
- "ranger.kms.service.user.hdfs": "hdfs",
- "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
- "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
- "ranger.plugins.hive.serviceuser": "hive",
- "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
- "ranger.plugins.kms.serviceuser": "kms",
- "ranger.service.https.attrib.ssl.enabled": "false",
- "ranger.sso.browser.useragent": "Mozilla,chrome",
- "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
- "ranger.plugins.hbase.serviceuser": "hbase",
+ "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
+ "ranger.kms.service.user.hdfs": "hdfs",
+ "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+ "ranger.plugins.hive.serviceuser": "hive",
+ "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
+ "ranger.plugins.kms.serviceuser": "kms",
+ "ranger.service.https.attrib.ssl.enabled": "false",
+ "ranger.sso.browser.useragent": "Mozilla,chrome",
+ "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+ "ranger.plugins.hbase.serviceuser": "hbase",
"ranger.plugins.hdfs.serviceuser": "hdfs",
- "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
- "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
- "ranger.plugins.knox.serviceuser": "knox",
- "ranger.ldap.base.dn": "dc=example,dc=com",
- "ranger.sso.publicKey": "",
- "ranger.admin.kerberos.cookie.path": "/",
+ "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+ "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+ "ranger.plugins.knox.serviceuser": "knox",
+ "ranger.ldap.base.dn": "dc=example,dc=com",
+ "ranger.sso.publicKey": "",
+ "ranger.admin.kerberos.cookie.path": "/",
"ranger.service.https.attrib.clientAuth": "want",
- "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
- "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
- "ranger.ldap.group.roleattribute": "cn",
- "ranger.plugins.kafka.serviceuser": "kafka",
- "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
- "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+ "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+ "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+ "ranger.ldap.group.roleattribute": "cn",
+ "ranger.plugins.kafka.serviceuser": "kafka",
+ "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
+ "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
"ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
- "ranger.ldap.referral": "ignore",
- "ranger.service.http.port": "6080",
- "ranger.ldap.user.searchfilter": "(uid={0})",
- "ranger.plugins.atlas.serviceuser": "atlas",
- "ranger.truststore.password": "changeit",
- "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
- "ranger.audit.solr.password": "NONE",
- "ranger.audit.solr.zookeepers": "NONE",
- "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
- "ranger.service.https.port": "6182",
- "ranger.plugins.storm.serviceuser": "storm",
- "ranger.externalurl": "{{ranger_external_url}}",
- "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
- "ranger.kms.service.user.hive": "",
- "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
- "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
- "ranger.service.host": "{{ranger_host}}",
+ "ranger.ldap.referral": "ignore",
+ "ranger.service.http.port": "6080",
+ "ranger.ldap.user.searchfilter": "(uid={0})",
+ "ranger.plugins.atlas.serviceuser": "atlas",
+ "ranger.truststore.password": "changeit",
+ "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+ "ranger.audit.solr.password": "NONE",
+ "ranger.audit.solr.zookeepers": "NONE",
+ "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
+ "ranger.service.https.port": "6182",
+ "ranger.plugins.storm.serviceuser": "storm",
+ "ranger.externalurl": "{{ranger_external_url}}",
+ "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+ "ranger.kms.service.user.hive": "",
+ "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+ "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+ "ranger.service.host": "{{ranger_host}}",
"ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
- "ranger.service.https.attrib.keystore.pass": "xasecure",
- "ranger.unixauth.remote.login.enabled": "true",
- "ranger.jpa.jdbc.credential.alias": "rangeradmin",
- "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
- "ranger.audit.solr.username": "ranger_solr",
- "ranger.sso.enabled": "false",
- "ranger.audit.solr.urls": "",
- "ranger.ldap.ad.domain": "",
- "ranger.plugins.yarn.serviceuser": "yarn",
- "ranger.audit.source.type": "solr",
- "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
- "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
- "ranger.authentication.method": "UNIX",
- "ranger.service.http.enabled": "true",
- "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
- "ranger.ldap.ad.referral": "ignore",
- "ranger.ldap.ad.base.dn": "dc=example,dc=com",
- "ranger.jpa.jdbc.password": "_",
- "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
- "ranger.sso.providerurl": "",
- "ranger.unixauth.service.hostname": "{{ugsync_host}}",
- "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
- "ranger.admin.kerberos.token.valid.seconds": "30",
- "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
+ "ranger.service.https.attrib.keystore.pass": "xasecure",
+ "ranger.unixauth.remote.login.enabled": "true",
+ "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+ "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+ "ranger.audit.solr.username": "ranger_solr",
+ "ranger.sso.enabled": "false",
+ "ranger.audit.solr.urls": "",
+ "ranger.ldap.ad.domain": "",
+ "ranger.plugins.yarn.serviceuser": "yarn",
+ "ranger.audit.source.type": "solr",
+ "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+ "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+ "ranger.authentication.method": "UNIX",
+ "ranger.service.http.enabled": "true",
+ "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+ "ranger.ldap.ad.referral": "ignore",
+ "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+ "ranger.jpa.jdbc.password": "_",
+ "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "ranger.sso.providerurl": "",
+ "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+ "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
+ "ranger.admin.kerberos.token.valid.seconds": "30",
+ "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
"ranger.unixauth.service.port": "5151"
- },
+ },
"kms-log4j": {
"content": "\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License. See accompanying LICENSE file.\n#\n\n# If the Java System property 'kms.log.dir' is not defined at KMS start up time\n# Setup sets its value to '${kms.home}/logs'\n\nlog4j.appender.kms=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kms.DatePattern='.'yyyy-MM-dd\nlog4j.appender.kms.File=${kms.log.dir}/kms.log\nlog4j.appender.kms.Append=true\nlog4j.appender.kms.layout=org.apache.log4j.PatternLayout\
nlog4j.appender.kms.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n\n\nlog4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd\nlog4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log\nlog4j.appender.kms-audit.Append=true\nlog4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kms-audit.layout.ConversionPattern=%d{ISO8601} %m%n\n\nlog4j.logger.kms-audit=INFO, kms-audit\nlog4j.additivity.kms-audit=false\n\nlog4j.rootLogger=ALL, kms\nlog4j.logger.org.apache.hadoop.conf=ERROR\nlog4j.logger.org.apache.hadoop=INFO\nlog4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF"
- },
+ },
"ranger-hdfs-policymgr-ssl": {
- "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
- "xasecure.policymgr.clientssl.truststore.password": "changeit",
- "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
- "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
- "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+ "xasecure.policymgr.clientssl.truststore.password": "changeit",
+ "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+ "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
- },
+ },
"tagsync-application-properties": {
- "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
- "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
- "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
- "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
- "atlas.kafka.entities.group.id": "ranger_entities_consumer",
- "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
- "atlas.jaas.KafkaClient.option.serviceName": "kafka",
- "atlas.kafka.bootstrap.servers": "localhost:6667",
- "atlas.jaas.KafkaClient.option.useKeyTab": "true",
- "atlas.jaas.KafkaClient.option.storeKey": "true",
- "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
+ "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+ "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
+ "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
+ "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
+ "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+ "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+ "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+ "atlas.kafka.bootstrap.servers": "localhost:6667",
+ "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+ "atlas.jaas.KafkaClient.option.storeKey": "true",
+ "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
"atlas.kafka.sasl.kerberos.service.name": "kafka"
- },
+ },
"ranger-env": {
- "ranger_solr_shards": "1",
- "ranger_solr_config_set": "ranger_audits",
- "ranger_user": "ranger",
- "xml_configurations_supported": "true",
- "ranger-atlas-plugin-enabled": "No",
- "ranger-hbase-plugin-enabled": "No",
- "ranger-yarn-plugin-enabled": "No",
- "bind_anonymous": "false",
- "ranger_admin_username": "amb_ranger_admin",
- "admin_password": "admin",
- "is_solrCloud_enabled": "false",
- "ranger-storm-plugin-enabled": "No",
- "ranger-hdfs-plugin-enabled": "No",
- "ranger_group": "ranger",
- "ranger-knox-plugin-enabled": "No",
- "ranger_admin_log_dir": "/var/log/ranger/admin",
- "ranger-kafka-plugin-enabled": "No",
- "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
- "ranger-hive-plugin-enabled": "No",
- "xasecure.audit.destination.solr": "false",
- "ranger_pid_dir": "/var/run/ranger",
- "xasecure.audit.destination.hdfs": "true",
- "admin_username": "admin",
- "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
- "create_db_dbuser": "true",
- "ranger_solr_collection_name": "ranger_audits",
- "ranger_admin_password": "P1!q9xa96SMi5NCl",
+ "ranger_solr_shards": "1",
+ "ranger_solr_config_set": "ranger_audits",
+ "ranger_user": "ranger",
+ "xml_configurations_supported": "true",
+ "ranger-atlas-plugin-enabled": "No",
+ "ranger-hbase-plugin-enabled": "No",
+ "ranger-yarn-plugin-enabled": "No",
+ "bind_anonymous": "false",
+ "ranger_admin_username": "amb_ranger_admin",
+ "admin_password": "admin",
+ "is_solrCloud_enabled": "false",
+ "ranger-storm-plugin-enabled": "No",
+ "ranger-hdfs-plugin-enabled": "No",
+ "ranger_group": "ranger",
+ "ranger-knox-plugin-enabled": "No",
+ "ranger_admin_log_dir": "/var/log/ranger/admin",
+ "ranger-kafka-plugin-enabled": "No",
+ "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+ "ranger-hive-plugin-enabled": "No",
+ "xasecure.audit.destination.solr": "false",
+ "ranger_pid_dir": "/var/run/ranger",
+ "xasecure.audit.destination.hdfs": "true",
+ "admin_username": "admin",
+ "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+ "create_db_dbuser": "true",
+ "ranger_solr_collection_name": "ranger_audits",
+ "ranger_admin_password": "P1!q9xa96SMi5NCl",
"ranger_usersync_log_dir": "/var/log/ranger/usersync"
- },
+ },
"ranger-ugsync-site": {
- "ranger.usersync.ldap.binddn": "",
- "ranger.usersync.policymgr.username": "rangerusersync",
- "ranger.usersync.policymanager.mockrun": "false",
- "ranger.usersync.group.searchbase": "",
- "ranger.usersync.ldap.bindalias": "testldapalias",
- "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
- "ranger.usersync.port": "5151",
- "ranger.usersync.pagedresultssize": "500",
- "ranger.usersync.group.memberattributename": "",
- "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
- "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
- "ranger.usersync.ldap.referral": "ignore",
- "ranger.usersync.group.searchfilter": "",
- "ranger.usersync.ldap.user.objectclass": "person",
- "ranger.usersync.logdir": "{{usersync_log_dir}}",
- "ranger.usersync.ldap.user.searchfilter": "",
- "ranger.usersync.ldap.groupname.caseconversion": "none",
- "ranger.usersync.ldap.ldapbindpassword": "",
- "ranger.usersync.unix.minUserId": "500",
- "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
- "ranger.usersync.group.nameattribute": "",
- "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
- "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
- "ranger.usersync.user.searchenabled": "false",
- "ranger.usersync.group.usermapsyncenabled": "true",
- "ranger.usersync.ldap.bindkeystore": "",
- "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
- "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
- "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
- "ranger.usersync.group.objectclass": "",
- "ranger.usersync.ldap.user.searchscope": "sub",
- "ranger.usersync.unix.password.file": "/etc/passwd",
- "ranger.usersync.ldap.user.nameattribute": "",
- "ranger.usersync.pagedresultsenabled": "true",
- "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
- "ranger.usersync.group.search.first.enabled": "false",
- "ranger.usersync.group.searchenabled": "false",
- "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
- "ranger.usersync.ssl": "true",
- "ranger.usersync.ldap.url": "",
- "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
- "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
- "ranger.usersync.ldap.user.searchbase": "",
- "ranger.usersync.ldap.username.caseconversion": "none",
- "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
- "ranger.usersync.keystore.password": "UnIx529p",
- "ranger.usersync.unix.group.file": "/etc/group",
- "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
- "ranger.usersync.group.searchscope": "",
- "ranger.usersync.truststore.password": "changeit",
- "ranger.usersync.enabled": "true",
- "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
+ "ranger.usersync.ldap.binddn": "",
+ "ranger.usersync.policymgr.username": "rangerusersync",
+ "ranger.usersync.policymanager.mockrun": "false",
+ "ranger.usersync.group.searchbase": "",
+ "ranger.usersync.ldap.bindalias": "testldapalias",
+ "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+ "ranger.usersync.port": "5151",
+ "ranger.usersync.pagedresultssize": "500",
+ "ranger.usersync.group.memberattributename": "",
+ "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
+ "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+ "ranger.usersync.ldap.referral": "ignore",
+ "ranger.usersync.group.searchfilter": "",
+ "ranger.usersync.ldap.user.objectclass": "person",
+ "ranger.usersync.logdir": "{{usersync_log_dir}}",
+ "ranger.usersync.ldap.user.searchfilter": "",
+ "ranger.usersync.ldap.groupname.caseconversion": "none",
+ "ranger.usersync.ldap.ldapbindpassword": "",
+ "ranger.usersync.unix.minUserId": "500",
+ "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+ "ranger.usersync.group.nameattribute": "",
+ "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+ "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+ "ranger.usersync.user.searchenabled": "false",
+ "ranger.usersync.group.usermapsyncenabled": "true",
+ "ranger.usersync.ldap.bindkeystore": "",
+ "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+ "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
+ "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+ "ranger.usersync.group.objectclass": "",
+ "ranger.usersync.ldap.user.searchscope": "sub",
+ "ranger.usersync.unix.password.file": "/etc/passwd",
+ "ranger.usersync.ldap.user.nameattribute": "",
+ "ranger.usersync.pagedresultsenabled": "true",
+ "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+ "ranger.usersync.group.search.first.enabled": "false",
+ "ranger.usersync.group.searchenabled": "false",
+ "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+ "ranger.usersync.ssl": "true",
+ "ranger.usersync.ldap.url": "",
+ "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+ "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+ "ranger.usersync.ldap.user.searchbase": "",
+ "ranger.usersync.ldap.username.caseconversion": "none",
+ "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+ "ranger.usersync.keystore.password": "UnIx529p",
+ "ranger.usersync.unix.group.file": "/etc/group",
+ "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+ "ranger.usersync.group.searchscope": "",
+ "ranger.usersync.truststore.password": "changeit",
+ "ranger.usersync.enabled": "true",
+ "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
"ranger.usersync.filesource.text.delimiter": ","
- },
+ },
"ranger-hdfs-plugin-properties": {
- "hadoop.rpc.protection": "authentication",
- "ranger-hdfs-plugin-enabled": "No",
- "REPOSITORY_CONFIG_USERNAME": "hadoop",
- "policy_user": "ambari-qa",
- "common.name.for.certificate": "",
+ "hadoop.rpc.protection": "authentication",
+ "ranger-hdfs-plugin-enabled": "No",
+ "REPOSITORY_CONFIG_USERNAME": "hadoop",
+ "policy_user": "ambari-qa",
+ "common.name.for.certificate": "",
"REPOSITORY_CONFIG_PASSWORD": "hadoop"
- },
+ },
"ranger-kms-security": {
- "ranger.plugin.kms.policy.pollIntervalMs": "30000",
- "ranger.plugin.kms.service.name": "{{repo_name}}",
- "ranger.plugin.kms.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
- "ranger.plugin.kms.policy.rest.ssl.config.file": "/etc/ranger/kms/conf/ranger-policymgr-ssl.xml",
- "ranger.plugin.kms.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+ "ranger.plugin.kms.policy.pollIntervalMs": "30000",
+ "ranger.plugin.kms.service.name": "{{repo_name}}",
+ "ranger.plugin.kms.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+ "ranger.plugin.kms.policy.rest.ssl.config.file": "/etc/ranger/kms/conf/ranger-policymgr-ssl.xml",
+ "ranger.plugin.kms.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
"ranger.plugin.kms.policy.rest.url": "{{policymgr_mgr_url}}"
- },
+ },
"kerberos-env": {
- "kdc_hosts": "c6401.ambari.apache.org",
- "manage_auth_to_local": "true",
- "install_packages": "true",
- "realm": "EXAMPLE.COM",
- "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
- "ad_create_attributes_template": "\n{\n \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n \"cn\": \"$principal_name\",\n #if( $is_service )\n \"servicePrincipalName\": \"$principal_name\",\n #end\n \"userPrincipalName\": \"$normalized_principal\",\n \"unicodePwd\": \"$password\",\n \"accountExpires\": \"0\",\n \"userAccountControl\": \"66048\"\n}",
- "kdc_create_attributes": "",
- "admin_server_host": "c6401.ambari.apache.org",
- "group": "ambari-managed-principals",
- "password_length": "20",
- "ldap_url": "",
- "manage_identities": "true",
- "password_min_lowercase_letters": "1",
- "create_ambari_principal": "true",
- "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
- "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
- "password_chat_timeout": "5",
- "kdc_type": "mit-kdc",
- "set_password_expiry": "false",
- "password_min_punctuation": "1",
- "container_dn": "",
- "case_insensitive_username_rules": "false",
- "password_min_whitespace": "0",
- "password_min_uppercase_letters": "1",
+ "kdc_hosts": "c6401.ambari.apache.org",
+ "manage_auth_to_local": "true",
+ "install_packages": "true",
+ "realm": "EXAMPLE.COM",
+ "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+ "ad_create_attributes_template": "\n{\n \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n \"cn\": \"$principal_name\",\n #if( $is_service )\n \"servicePrincipalName\": \"$principal_name\",\n #end\n \"userPrincipalName\": \"$normalized_principal\",\n \"unicodePwd\": \"$password\",\n \"accountExpires\": \"0\",\n \"userAccountControl\": \"66048\"\n}",
+ "kdc_create_attributes": "",
+ "admin_server_host": "c6401.ambari.apache.org",
+ "group": "ambari-managed-principals",
+ "password_length": "20",
+ "ldap_url": "",
+ "manage_identities": "true",
+ "password_min_lowercase_letters": "1",
+ "create_ambari_principal": "true",
+ "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+ "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+ "password_chat_timeout": "5",
+ "kdc_type": "mit-kdc",
+ "set_password_expiry": "false",
+ "password_min_punctuation": "1",
+ "container_dn": "",
+ "case_insensitive_username_rules": "false",
+ "password_min_whitespace": "0",
+ "password_min_uppercase_letters": "1",
"password_min_digits": "1"
- },
+ },
"kms-properties": {
- "REPOSITORY_CONFIG_USERNAME": "keyadmin",
- "db_user": "rangerkms01",
- "DB_FLAVOR": "MYSQL",
- "db_password": "rangerkms01",
- "KMS_MASTER_KEY_PASSWD": "StrongPassword01",
- "db_root_user": "root",
- "db_name": "rangerkms01",
- "db_host": "c6401.ambari.apache.org",
- "db_root_password": "vagrant",
- "SQL_CONNECTOR_JAR": "{{driver_curl_target}}",
+ "REPOSITORY_CONFIG_USERNAME": "keyadmin",
+ "db_user": "rangerkms01",
+ "DB_FLAVOR": "MYSQL",
+ "db_password": "rangerkms01",
+ "KMS_MASTER_KEY_PASSWD": "StrongPassword01",
+ "db_root_user": "root",
+ "db_name": "rangerkms01",
+ "db_host": "c6401.ambari.apache.org",
+ "db_root_password": "vagrant",
+ "SQL_CONNECTOR_JAR": "{{driver_curl_target}}",
"REPOSITORY_CONFIG_PASSWORD": "keyadmin"
- },
+ },
"admin-properties": {
- "db_user": "rangeradmin01",
- "DB_FLAVOR": "MYSQL",
- "db_password": "rangeradmin01",
- "db_root_user": "root",
- "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
- "db_name": "ranger01",
- "db_host": "c6401.ambari.apache.org",
- "db_root_password": "vagrant",
+ "db_user": "rangeradmin01",
+ "DB_FLAVOR": "MYSQL",
+ "db_password": "rangeradmin01",
+ "db_root_user": "root",
+ "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+ "db_name": "ranger01",
+ "db_host": "c6401.ambari.apache.org",
+ "db_root_password": "vagrant",
"SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
- },
+ },
"ranger-kms-policymgr-ssl": {
- "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-keystore.jks",
- "xasecure.policymgr.clientssl.truststore.password": "changeit",
- "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
- "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-truststore.jks",
- "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-keystore.jks",
+ "xasecure.policymgr.clientssl.truststore.password": "changeit",
+ "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-truststore.jks",
+ "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
- },
+ },
"hdfs-site": {
- "dfs.namenode.checkpoint.period": "21600",
- "dfs.namenode.avoid.write.stale.datanode": "true",
- "dfs.permissions.superusergroup": "hdfs",
- "dfs.namenode.startup.delay.block.deletion.sec": "3600",
- "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
- "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
- "dfs.heartbeat.interval": "3",
- "dfs.content-summary.limit": "5000",
- "dfs.support.append": "true",
- "dfs.datanode.address": "0.0.0.0:1019",
- "dfs.cluster.administrators": " hdfs",
- "dfs.namenode.audit.log.async": "true",
- "dfs.datanode.balance.bandwidthPerSec": "6250000",
- "dfs.namenode.safemode.threshold-pct": "1",
- "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
- "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
- "dfs.permissions.enabled": "true",
- "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
- "dfs.client.read.shortcircuit": "true",
- "dfs.https.port": "50470",
- "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
- "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
- "dfs.blocksize": "134217728",
- "dfs.blockreport.initialDelay": "120",
- "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
- "dfs.namenode.fslock.fair": "false",
- "dfs.datanode.max.transfer.threads": "4096",
- "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
- "dfs.replication": "3",
- "dfs.namenode.handler.count": "50",
- "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
- "fs.permissions.umask-mode": "022",
- "dfs.namenode.stale.datanode.interval": "30000",
- "dfs.datanode.ipc.address": "0.0.0.0:8010",
- "dfs.datanode.failed.volumes.tolerated": "0",
- "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
- "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
- "dfs.webhdfs.enabled": "true",
- "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
- "dfs.namenode.accesstime.precision": "0",
- "dfs.namenode.write.stale.datanode.ratio": "1.0f",
- "dfs.datanode.https.address": "0.0.0.0:50475",
- "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
- "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
- "nfs.exports.allowed.hosts": "* rw",
- "dfs.namenode.checkpoint.txns": "1000000",
- "dfs.datanode.http.address": "0.0.0.0:1022",
- "dfs.datanode.du.reserved": "33011188224",
- "dfs.client.read.shortcircuit.streams.cache.size": "4096",
- "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
- "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
- "dfs.http.policy": "HTTP_ONLY",
- "dfs.block.access.token.enable": "true",
- "dfs.client.retry.policy.enabled": "false",
- "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
- "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
- "dfs.namenode.name.dir.restore": "true",
- "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
- "dfs.journalnode.https-address": "0.0.0.0:8481",
- "dfs.journalnode.http-address": "0.0.0.0:8480",
- "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
- "dfs.namenode.avoid.read.stale.datanode": "true",
- "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
- "dfs.datanode.data.dir.perm": "750",
- "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
- "dfs.replication.max": "50",
+ "dfs.namenode.checkpoint.period": "21600",
+ "dfs.namenode.avoid.write.stale.datanode": "true",
+ "dfs.permissions.superusergroup": "hdfs",
+ "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+ "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+ "dfs.heartbeat.interval": "3",
+ "dfs.content-summary.limit": "5000",
+ "dfs.support.append": "true",
+ "dfs.datanode.address": "0.0.0.0:1019",
+ "dfs.cluster.administrators": " hdfs",
+ "dfs.namenode.audit.log.async": "true",
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
+ "dfs.namenode.safemode.threshold-pct": "1",
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+ "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+ "dfs.permissions.enabled": "true",
+ "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "dfs.client.read.shortcircuit": "true",
+ "dfs.https.port": "50470",
+ "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+ "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+ "dfs.blocksize": "134217728",
+ "dfs.blockreport.initialDelay": "120",
+ "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+ "dfs.namenode.fslock.fair": "false",
+ "dfs.datanode.max.transfer.threads": "4096",
+ "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.replication": "3",
+ "dfs.namenode.handler.count": "50",
+ "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "fs.permissions.umask-mode": "022",
+ "dfs.namenode.stale.datanode.interval": "30000",
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
+ "dfs.datanode.failed.volumes.tolerated": "0",
+ "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+ "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+ "dfs.webhdfs.enabled": "true",
+ "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+ "dfs.namenode.accesstime.precision": "0",
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+ "dfs.datanode.https.address": "0.0.0.0:50475",
+ "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+ "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+ "nfs.exports.allowed.hosts": "* rw",
+ "dfs.namenode.checkpoint.txns": "1000000",
+ "dfs.datanode.http.address": "0.0.0.0:1022",
+ "dfs.datanode.du.reserved": "33011188224",
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+ "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.http.policy": "HTTP_ONLY",
+ "dfs.block.access.token.enable": "true",
+ "dfs.client.retry.policy.enabled": "false",
+ "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+ "dfs.namenode.name.dir.restore": "true",
+ "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.journalnode.https-address": "0.0.0.0:8481",
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+ "dfs.namenode.avoid.read.stale.datanode": "true",
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+ "dfs.datanode.data.dir.perm": "750",
+ "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
+ "dfs.replication.max": "50",
"dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
- },
+ },
"ranger-tagsync-site": {
- "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
- "ranger.tagsync.source.atlasrest.username": "",
- "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
- "ranger.tagsync.source.atlasrest.download.interval.millis": "",
- "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
- "ranger.tagsync.source.file.check.interval.millis": "",
- "ranger.tagsync.source.atlasrest.endpoint": "",
- "ranger.tagsync.dest.ranger.username": "rangertagsync",
- "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
- "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
- "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
- "ranger.tagsync.source.atlas": "false",
- "ranger.tagsync.source.atlasrest": "false",
- "ranger.tagsync.source.file": "false",
+ "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+ "ranger.tagsync.source.atlasrest.username": "",
+ "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+ "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+ "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+ "ranger.tagsync.source.file.check.interval.millis": "",
+ "ranger.tagsync.source.atlasrest.endpoint": "",
+ "ranger.tagsync.dest.ranger.username": "rangertagsync",
+ "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+ "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
+ "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
+ "ranger.tagsync.source.atlas": "false",
+ "ranger.tagsync.source.atlasrest": "false",
+ "ranger.tagsync.source.file": "false",
"ranger.tagsync.source.file.filename": ""
- },
+ },
"tagsync-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
.log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
- },
+ },
"ranger-kms-audit": {
- "xasecure.audit.destination.solr.zookeepers": "NONE",
- "xasecure.audit.destination.solr.urls": "",
- "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/ranger/kms/audit/solr/spool",
- "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/ranger/kms/audit/hdfs/spool",
- "xasecure.audit.destination.hdfs": "true",
- "xasecure.audit.destination.solr": "true",
+ "xasecure.audit.destination.solr.zookeepers": "NONE",
+ "xasecure.audit.destination.solr.urls": "",
+ "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/ranger/kms/audit/solr/spool",
+ "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/ranger/kms/audit/hdfs/spool",
+ "xasecure.audit.destination.hdfs": "true",
+ "xasecure.audit.destination.solr": "true",
"xasecure.audit.provider.summary.enabled": "false",
- "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+ "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
"xasecure.audit.is.enabled": "true"
- },
+ },
"hadoop-policy": {
- "security.job.client.protocol.acl": "*",
- "security.job.task.protocol.acl": "*",
- "security.datanode.protocol.acl": "*",
- "security.namenode.protocol.acl": "*",
- "security.client.datanode.protocol.acl": "*",
- "security.inter.tracker.protocol.acl": "*",
- "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
- "security.client.protocol.acl": "*",
- "security.refresh.policy.protocol.acl": "hadoop",
- "security.admin.operations.protocol.acl": "hadoop",
+ "security.job.client.protocol.acl": "*",
+ "security.job.task.protocol.acl": "*",
+ "security.datanode.protocol.acl": "*",
+ "security.namenode.protocol.acl": "*",
+ "security.client.datanode.protocol.acl": "*",
+ "security.inter.tracker.protocol.acl": "*",
+ "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+ "security.client.protocol.acl": "*",
+ "security.refresh.policy.protocol.acl": "hadoop",
+ "security.admin.operations.protocol.acl": "hadoop",
"security.inter.datanode.protocol.acl": "*"
- },
+ },
"hdfs-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
ange=WARN"
- },
+ },
"usersync-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
- },
+ },
"krb5-conf": {
- "domains": "",
- "manage_krb5_conf": "true",
- "content": "\n[libdefaults]\n renew_lifetime = 7d\n forwardable = true\n default_realm = {{realm}}\n ticket_lifetime = 24h\n dns_lookup_realm = false\n dns_lookup_kdc = false\n default_ccache_name = /tmp/krb5cc_%{uid}\n #default_tgs_enctypes = {{encryption_types}}\n #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n default = FILE:/var/log/krb5kdc.log\n admin_server = FILE:/var/log/kadmind.log\n kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',') -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n }\n\
n{# Append additional realm declarations below #}",
+ "domains": "",
+ "manage_krb5_conf": "true",
+ "content": "\n[libdefaults]\n renew_lifetime = 7d\n forwardable = true\n default_realm = {{realm}}\n ticket_lifetime = 24h\n dns_lookup_realm = false\n dns_lookup_kdc = false\n default_ccache_name = /tmp/krb5cc_%{uid}\n #default_tgs_enctypes = {{encryption_types}}\n #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n default = FILE:/var/log/krb5kdc.log\n admin_server = FILE:/var/log/kadmind.log\n kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',') -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n }\n\
n{# Append additional realm declarations below #}",
"conf_dir": "/etc"
- },
+ },
"kms-site": {
- "hadoop.kms.proxyuser.ranger.hosts": "*",
- "hadoop.kms.authentication.type": "kerberos",
- "hadoop.kms.proxyuser.ranger.groups": "*",
- "hadoop.kms.authentication.signer.secret.provider.zookeeper.path": "/hadoop-kms/hadoop-auth-signature-secret",
- "hadoop.kms.security.authorization.manager": "org.apache.ranger.authorization.kms.authorizer.RangerKmsAuthorizer",
- "hadoop.kms.authentication.kerberos.name.rules": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
- "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
- "hadoop.kms.current.key.cache.timeout.ms": "30000",
- "hadoop.kms.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
- "hadoop.kms.audit.aggregation.window.ms": "10000",
- "hadoop.kms.proxyuser.ranger.users": "*",
- "hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type": "kerberos",
- "hadoop.kms.key.provider.uri": "dbks://http@localhost:9292/kms",
- "hadoop.security.keystore.JavaKeyStoreProvider.password": "none",
- "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
- "hadoop.kms.authentication.signer.secret.provider": "random",
- "hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string": "#HOSTNAME#:#PORT#,...",
- "hadoop.kms.cache.enable": "true",
- "hadoop.kms.cache.timeout.ms": "600000",
+ "hadoop.kms.proxyuser.ranger.hosts": "*",
+ "hadoop.kms.authentication.type": "kerberos",
+ "hadoop.kms.proxyuser.ranger.groups": "*",
+ "hadoop.kms.authentication.signer.secret.provider.zookeeper.path": "/hadoop-kms/hadoop-auth-signature-secret",
+ "hadoop.kms.security.authorization.manager": "org.apache.ranger.authorization.kms.authorizer.RangerKmsAuthorizer",
+ "hadoop.kms.authentication.kerberos.name.rules": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+ "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "hadoop.kms.current.key.cache.timeout.ms": "30000",
+ "hadoop.kms.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "hadoop.kms.audit.aggregation.window.ms": "10000",
+ "hadoop.kms.proxyuser.ranger.users": "*",
+ "hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type": "kerberos",
+ "hadoop.kms.key.provider.uri": "dbks://http@localhost:9292/kms",
+ "hadoop.security.keystore.JavaKeyStoreProvider.password": "none",
+ "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "hadoop.kms.authentication.signer.secret.provider": "random",
+ "hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string": "#HOSTNAME#:#PORT#,...",
+ "hadoop.kms.cache.enable": "true",
+ "hadoop.kms.cache.timeout.ms": "600000",
"hadoop.kms.authentication.kerberos.principal": "*"
- },
+ },
"core-site": {
- "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
- "hadoop.proxyuser.hdfs.groups": "*",
- "fs.trash.interval": "360",
- "ipc.server.tcpnodelay": "true",
- "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
- "ipc.client.idlethreshold": "8000",
- "io.file.buffer.size": "131072",
- "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
- "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
- "hadoop.security.authentication": "kerberos",
- "mapreduce.jobtracker.webinterface.trusted": "false",
- "hadoop.proxyuser.kms.groups": "*",
- "hadoop.proxyuser.hdfs.hosts": "*",
- "hadoop.proxyuser.HTTP.groups": "users",
- "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
- "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
- "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
- "hadoop.security.authorization": "true",
- "hadoop.http.authentication.simple.anonymous.allowed": "true",
- "ipc.client.connect.max.retries": "50",
- "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
- "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
+ "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+ "hadoop.proxyuser.hdfs.groups": "*",
+ "fs.trash.interval": "360",
+ "ipc.server.tcpnodelay": "true",
+ "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+ "ipc.client.idlethreshold": "8000",
+ "io.file.buffer.size": "131072",
+ "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+ "hadoop.security.authentication": "kerberos",
+ "mapreduce.jobtracker.webinterface.trusted": "false",
+ "hadoop.proxyuser.kms.groups": "*",
+ "hadoop.proxyuser.hdfs.hosts": "*",
+ "hadoop.proxyuser.HTTP.groups": "users",
+ "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+ "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+ "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
+ "hadoop.security.authorization": "true",
+ "hadoop.http.authentication.simple.anonymous.allowed": "true",
+ "ipc.client.connect.max.retries": "50",
+ "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+ "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
"ipc.client.connection.maxidletime": "30000"
- },
+ },
"hadoop-env": {
- "keyserver_port": "",
- "proxyuser_group": "users",
- "hdfs_user_nproc_limit": "65536",
- "hdfs_log_dir_prefix": "/var/log/hadoop",
- "hdfs_user_nofile_limit": "128000",
- "hdfs_user": "hdfs",
+ "keyserver_port": "",
+ "proxyuser_group": "users",
+ "hdfs_user_nproc_limit": "65536",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
+ "hdfs_user_nofile_limit": "128000",
+ "hdfs_user": "hdfs",
"hdfs_principal_name": "hdfs-cl1@EXAMPLE.COM",
- "keyserver_host": " ",
- "namenode_opt_maxnewsize": "128m",
- "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
- "namenode_opt_maxpermsize": "256m",
- "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/ha
<TRUNCATED>
[09/18] ambari git commit: AMBARI-21430 - Allow Multiple Versions of
Stack Tools to Co-Exist (jonathanhurley)
Posted by rl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
index a1d930c..fb77531 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
@@ -1,150 +1,150 @@
{
"localComponents": [
- "NAMENODE",
- "SECONDARY_NAMENODE",
- "ZOOKEEPER_SERVER",
- "DATANODE",
- "HDFS_CLIENT",
- "ZOOKEEPER_CLIENT",
- "RANGER_USERSYNC",
- "RANGER_ADMIN",
+ "NAMENODE",
+ "SECONDARY_NAMENODE",
+ "ZOOKEEPER_SERVER",
+ "DATANODE",
+ "HDFS_CLIENT",
+ "ZOOKEEPER_CLIENT",
+ "RANGER_USERSYNC",
+ "RANGER_ADMIN",
"RANGER_TAGSYNC",
"LOGSEARCH_SOLR",
"LOGSEARCH_SOLR_CLIENT"
- ],
+ ],
"configuration_attributes": {
- "ranger-hdfs-audit": {},
- "ssl-client": {},
- "ranger-admin-site": {},
- "ranger-hdfs-policymgr-ssl": {},
- "tagsync-application-properties": {},
- "ranger-env": {},
- "usersync-log4j": {},
- "admin-properties": {},
- "ranger-ugsync-site": {},
+ "ranger-hdfs-audit": {},
+ "ssl-client": {},
+ "ranger-admin-site": {},
+ "ranger-hdfs-policymgr-ssl": {},
+ "tagsync-application-properties": {},
+ "ranger-env": {},
+ "usersync-log4j": {},
+ "admin-properties": {},
+ "ranger-ugsync-site": {},
"hdfs-site": {
"final": {
- "dfs.datanode.data.dir": "true",
- "dfs.namenode.http-address": "true",
- "dfs.datanode.failed.volumes.tolerated": "true",
- "dfs.support.append": "true",
- "dfs.namenode.name.dir": "true",
+ "dfs.datanode.data.dir": "true",
+ "dfs.namenode.http-address": "true",
+ "dfs.datanode.failed.volumes.tolerated": "true",
+ "dfs.support.append": "true",
+ "dfs.namenode.name.dir": "true",
"dfs.webhdfs.enabled": "true"
}
- },
- "ranger-tagsync-site": {},
- "zoo.cfg": {},
- "hadoop-policy": {},
- "hdfs-log4j": {},
- "ranger-hdfs-plugin-properties": {},
+ },
+ "ranger-tagsync-site": {},
+ "zoo.cfg": {},
+ "hadoop-policy": {},
+ "hdfs-log4j": {},
+ "ranger-hdfs-plugin-properties": {},
"core-site": {
"final": {
"fs.defaultFS": "true"
}
- },
- "hadoop-env": {},
- "zookeeper-log4j": {},
- "ssl-server": {},
- "ranger-site": {},
- "admin-log4j": {},
- "tagsync-log4j": {},
- "ranger-hdfs-security": {},
- "usersync-properties": {},
+ },
+ "hadoop-env": {},
+ "zookeeper-log4j": {},
+ "ssl-server": {},
+ "ranger-site": {},
+ "admin-log4j": {},
+ "tagsync-log4j": {},
+ "ranger-hdfs-security": {},
+ "usersync-properties": {},
"zookeeper-env": {},
"infra-solr-env": {},
"infra-solr-client-log4j": {},
"cluster-env": {}
- },
- "public_hostname": "c6401.ambari.apache.org",
- "commandId": "11-0",
- "hostname": "c6401.ambari.apache.org",
- "kerberosCommandParams": [],
- "serviceName": "RANGER",
- "role": "RANGER_ADMIN",
- "forceRefreshConfigTagsBeforeExecution": [],
- "requestId": 11,
+ },
+ "public_hostname": "c6401.ambari.apache.org",
+ "commandId": "11-0",
+ "hostname": "c6401.ambari.apache.org",
+ "kerberosCommandParams": [],
+ "serviceName": "RANGER",
+ "role": "RANGER_ADMIN",
+ "forceRefreshConfigTagsBeforeExecution": [],
+ "requestId": 11,
"agentConfigParams": {
"agent": {
"parallel_execution": 0
}
- },
- "clusterName": "c1",
- "commandType": "EXECUTION_COMMAND",
- "taskId": 31,
- "roleParams": {},
+ },
+ "clusterName": "c1",
+ "commandType": "EXECUTION_COMMAND",
+ "taskId": 31,
+ "roleParams": {},
"configurationTags": {
"ranger-hdfs-audit": {
"tag": "version1466705299922"
- },
+ },
"ssl-client": {
"tag": "version1"
- },
+ },
"ranger-admin-site": {
"tag": "version1466705299949"
- },
+ },
"ranger-hdfs-policymgr-ssl": {
"tag": "version1466705299922"
- },
+ },
"tagsync-application-properties": {
"tag": "version1466705299949"
- },
+ },
"ranger-env": {
"tag": "version1466705299949"
- },
+ },
"usersync-log4j": {
"tag": "version1466705299949"
- },
+ },
"admin-properties": {
"tag": "version1466705299949"
- },
+ },
"ranger-ugsync-site": {
"tag": "version1466705299949"
- },
+ },
"hdfs-site": {
"tag": "version1"
- },
+ },
"ranger-tagsync-site": {
"tag": "version1466705299949"
- },
+ },
"zoo.cfg": {
"tag": "version1"
- },
+ },
"hadoop-policy": {
"tag": "version1"
- },
+ },
"hdfs-log4j": {
"tag": "version1"
- },
+ },
"ranger-hdfs-plugin-properties": {
"tag": "version1466705299922"
- },
+ },
"core-site": {
"tag": "version1"
- },
+ },
"hadoop-env": {
"tag": "version1"
- },
+ },
"zookeeper-log4j": {
"tag": "version1"
- },
+ },
"ssl-server": {
"tag": "version1"
- },
+ },
"ranger-site": {
"tag": "version1466705299949"
- },
+ },
"admin-log4j": {
"tag": "version1466705299949"
- },
+ },
"tagsync-log4j": {
"tag": "version1466705299949"
- },
+ },
"ranger-hdfs-security": {
"tag": "version1466705299922"
- },
+ },
"usersync-properties": {
"tag": "version1466705299949"
- },
+ },
"zookeeper-env": {
"tag": "version1"
},
@@ -157,492 +157,492 @@
"cluster-env": {
"tag": "version1"
}
- },
- "roleCommand": "START",
+ },
+ "roleCommand": "START",
"hostLevelParams": {
- "agent_stack_retry_on_unavailability": "false",
- "stack_name": "HDP",
- "package_version": "2_5_0_0_*",
+ "agent_stack_retry_on_unavailability": "false",
+ "stack_name": "HDP",
+ "package_version": "2_5_0_0_*",
"custom_mysql_jdbc_name": "mysql-connector-java.jar",
"previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
- "host_sys_prepped": "false",
- "ambari_db_rca_username": "mapred",
- "current_version": "2.5.0.0-801",
- "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
- "agent_stack_retry_count": "5",
- "stack_version": "2.5",
- "jdk_name": "jdk-8u60-linux-x64.tar.gz",
- "ambari_db_rca_driver": "org.postgresql.Driver",
- "java_home": "/usr/jdk64/jdk1.7.0_45",
- "repository_version_id": "1",
- "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
- "not_managed_hdfs_path_list": "[\"/tmp\"]",
- "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
- "java_version": "8",
- "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
- "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
- "db_name": "ambari",
- "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
- "agentCacheDir": "/var/lib/ambari-agent/cache",
- "ambari_db_rca_password": "mapred",
- "jce_name": "jce_policy-8.zip",
- "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
- "db_driver_filename": "mysql-connector-java.jar",
- "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
+ "host_sys_prepped": "false",
+ "ambari_db_rca_username": "mapred",
+ "current_version": "2.5.0.0-801",
+ "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+ "agent_stack_retry_count": "5",
+ "stack_version": "2.5",
+ "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+ "ambari_db_rca_driver": "org.postgresql.Driver",
+ "java_home": "/usr/jdk64/jdk1.7.0_45",
+ "repository_version_id": "1",
+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+ "not_managed_hdfs_path_list": "[\"/tmp\"]",
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+ "java_version": "8",
+ "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+ "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
+ "db_name": "ambari",
+ "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+ "agentCacheDir": "/var/lib/ambari-agent/cache",
+ "ambari_db_rca_password": "mapred",
+ "jce_name": "jce_policy-8.zip",
+ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+ "db_driver_filename": "mysql-connector-java.jar",
+ "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
"clientsToUpdateConfigs": "[\"*\"]"
- },
+ },
"commandParams": {
- "service_package_folder": "common-services/RANGER/0.4.0/package",
- "script": "scripts/ranger_admin.py",
- "hooks_folder": "HDP/2.0.6/hooks",
- "version": "2.5.0.0-801",
- "max_duration_for_retries": "0",
- "command_retry_enabled": "false",
- "command_timeout": "600",
+ "service_package_folder": "common-services/RANGER/0.4.0/package",
+ "script": "scripts/ranger_admin.py",
+ "hooks_folder": "HDP/2.0.6/hooks",
+ "version": "2.5.0.0-801",
+ "max_duration_for_retries": "0",
+ "command_retry_enabled": "false",
+ "command_timeout": "600",
"script_type": "PYTHON"
- },
- "forceRefreshConfigTags": [],
- "stageId": 0,
+ },
+ "forceRefreshConfigTags": [],
+ "stageId": 0,
"clusterHostInfo": {
"snamenode_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"ambari_server_use_ssl": [
"false"
- ],
+ ],
"all_ping_ports": [
"8670"
- ],
+ ],
"ranger_tagsync_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"ranger_usersync_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"all_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"slave_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"namenode_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"ambari_server_port": [
"8080"
- ],
+ ],
"ranger_admin_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"all_racks": [
"/default-rack"
- ],
+ ],
"all_ipv4_ips": [
"172.22.83.73"
- ],
+ ],
"ambari_server_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"zookeeper_hosts": [
"c6401.ambari.apache.org"
],
"infra_solr_hosts": [
"c6401.ambari.apache.org"
]
- },
+ },
"configurations": {
"ranger-hdfs-audit": {
- "xasecure.audit.destination.solr.zookeepers": "NONE",
- "xasecure.audit.destination.solr.urls": "",
- "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
- "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
+ "xasecure.audit.destination.solr.zookeepers": "NONE",
+ "xasecure.audit.destination.solr.urls": "",
+ "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+ "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
"xasecure.audit.destination.hdfs": "true",
- "xasecure.audit.destination.solr": "false",
+ "xasecure.audit.destination.solr": "false",
"xasecure.audit.provider.summary.enabled": "false",
"xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
"xasecure.audit.is.enabled": "true"
- },
+ },
"ssl-client": {
- "ssl.client.truststore.reload.interval": "10000",
- "ssl.client.keystore.password": "bigdata",
- "ssl.client.truststore.type": "jks",
- "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
- "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
- "ssl.client.truststore.password": "bigdata",
+ "ssl.client.truststore.reload.interval": "10000",
+ "ssl.client.keystore.password": "bigdata",
+ "ssl.client.truststore.type": "jks",
+ "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+ "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+ "ssl.client.truststore.password": "bigdata",
"ssl.client.keystore.type": "jks"
- },
+ },
"ranger-admin-site": {
"ranger.admin.kerberos.cookie.domain": "",
- "ranger.kms.service.user.hdfs": "hdfs",
- "ranger.spnego.kerberos.principal": "",
- "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
- "ranger.plugins.hive.serviceuser": "hive",
- "ranger.lookup.kerberos.keytab": "",
- "ranger.plugins.kms.serviceuser": "kms",
- "ranger.service.https.attrib.ssl.enabled": "false",
- "ranger.sso.browser.useragent": "Mozilla,chrome",
- "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
- "ranger.plugins.hbase.serviceuser": "hbase",
- "ranger.plugins.hdfs.serviceuser": "hdfs",
- "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
- "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
- "ranger.plugins.knox.serviceuser": "knox",
- "ranger.ldap.base.dn": "dc=example,dc=com",
- "ranger.sso.publicKey": "",
- "ranger.admin.kerberos.cookie.path": "/",
- "ranger.service.https.attrib.clientAuth": "want",
- "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
- "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
- "ranger.ldap.group.roleattribute": "cn",
- "ranger.plugins.kafka.serviceuser": "kafka",
- "ranger.admin.kerberos.principal": "",
- "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
- "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
- "ranger.ldap.referral": "ignore",
- "ranger.service.http.port": "6080",
- "ranger.ldap.user.searchfilter": "(uid={0})",
- "ranger.plugins.atlas.serviceuser": "atlas",
- "ranger.truststore.password": "changeit",
- "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
- "ranger.audit.solr.password": "NONE",
+ "ranger.kms.service.user.hdfs": "hdfs",
+ "ranger.spnego.kerberos.principal": "",
+ "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+ "ranger.plugins.hive.serviceuser": "hive",
+ "ranger.lookup.kerberos.keytab": "",
+ "ranger.plugins.kms.serviceuser": "kms",
+ "ranger.service.https.attrib.ssl.enabled": "false",
+ "ranger.sso.browser.useragent": "Mozilla,chrome",
+ "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+ "ranger.plugins.hbase.serviceuser": "hbase",
+ "ranger.plugins.hdfs.serviceuser": "hdfs",
+ "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+ "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+ "ranger.plugins.knox.serviceuser": "knox",
+ "ranger.ldap.base.dn": "dc=example,dc=com",
+ "ranger.sso.publicKey": "",
+ "ranger.admin.kerberos.cookie.path": "/",
+ "ranger.service.https.attrib.clientAuth": "want",
+ "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+ "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+ "ranger.ldap.group.roleattribute": "cn",
+ "ranger.plugins.kafka.serviceuser": "kafka",
+ "ranger.admin.kerberos.principal": "",
+ "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+ "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
+ "ranger.ldap.referral": "ignore",
+ "ranger.service.http.port": "6080",
+ "ranger.ldap.user.searchfilter": "(uid={0})",
+ "ranger.plugins.atlas.serviceuser": "atlas",
+ "ranger.truststore.password": "changeit",
+ "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+ "ranger.audit.solr.password": "NONE",
"ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr",
- "ranger.lookup.kerberos.principal": "",
- "ranger.service.https.port": "6182",
- "ranger.plugins.storm.serviceuser": "storm",
- "ranger.externalurl": "{{ranger_external_url}}",
- "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
- "ranger.kms.service.user.hive": "",
- "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
- "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
- "ranger.service.host": "{{ranger_host}}",
- "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
- "ranger.service.https.attrib.keystore.pass": "xasecure",
- "ranger.unixauth.remote.login.enabled": "true",
- "ranger.jpa.jdbc.credential.alias": "rangeradmin",
- "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
- "ranger.audit.solr.username": "ranger_solr",
- "ranger.sso.enabled": "false",
- "ranger.audit.solr.urls": "",
- "ranger.ldap.ad.domain": "",
- "ranger.plugins.yarn.serviceuser": "yarn",
- "ranger.audit.source.type": "solr",
- "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
- "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
- "ranger.authentication.method": "UNIX",
- "ranger.service.http.enabled": "true",
- "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
- "ranger.ldap.ad.referral": "ignore",
- "ranger.ldap.ad.base.dn": "dc=example,dc=com",
- "ranger.jpa.jdbc.password": "_",
- "ranger.spnego.kerberos.keytab": "",
- "ranger.sso.providerurl": "",
- "ranger.unixauth.service.hostname": "{{ugsync_host}}",
- "ranger.admin.kerberos.keytab": "",
- "ranger.admin.kerberos.token.valid.seconds": "30",
- "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
+ "ranger.lookup.kerberos.principal": "",
+ "ranger.service.https.port": "6182",
+ "ranger.plugins.storm.serviceuser": "storm",
+ "ranger.externalurl": "{{ranger_external_url}}",
+ "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+ "ranger.kms.service.user.hive": "",
+ "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+ "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+ "ranger.service.host": "{{ranger_host}}",
+ "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+ "ranger.service.https.attrib.keystore.pass": "xasecure",
+ "ranger.unixauth.remote.login.enabled": "true",
+ "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+ "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+ "ranger.audit.solr.username": "ranger_solr",
+ "ranger.sso.enabled": "false",
+ "ranger.audit.solr.urls": "",
+ "ranger.ldap.ad.domain": "",
+ "ranger.plugins.yarn.serviceuser": "yarn",
+ "ranger.audit.source.type": "solr",
+ "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+ "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+ "ranger.authentication.method": "UNIX",
+ "ranger.service.http.enabled": "true",
+ "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+ "ranger.ldap.ad.referral": "ignore",
+ "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+ "ranger.jpa.jdbc.password": "_",
+ "ranger.spnego.kerberos.keytab": "",
+ "ranger.sso.providerurl": "",
+ "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+ "ranger.admin.kerberos.keytab": "",
+ "ranger.admin.kerberos.token.valid.seconds": "30",
+ "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
"ranger.unixauth.service.port": "5151"
- },
+ },
"ranger-hdfs-policymgr-ssl": {
- "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
- "xasecure.policymgr.clientssl.truststore.password": "changeit",
- "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
- "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
- "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+ "xasecure.policymgr.clientssl.truststore.password": "changeit",
+ "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+ "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
- },
+ },
"tagsync-application-properties": {
- "atlas.kafka.entities.group.id": "ranger_entities_consumer",
- "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+ "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+ "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
"atlas.kafka.bootstrap.servers": "localhost:6667"
- },
+ },
"ranger-env": {
- "ranger_solr_shards": "1",
- "ranger_solr_config_set": "ranger_audits",
- "ranger_user": "ranger",
+ "ranger_solr_shards": "1",
+ "ranger_solr_config_set": "ranger_audits",
+ "ranger_user": "ranger",
"ranger_solr_replication_factor": "1",
- "xml_configurations_supported": "true",
- "ranger-atlas-plugin-enabled": "No",
- "ranger-hbase-plugin-enabled": "No",
- "ranger-yarn-plugin-enabled": "No",
- "bind_anonymous": "false",
- "ranger_admin_username": "amb_ranger_admin",
- "admin_password": "admin",
- "is_solrCloud_enabled": "true",
- "ranger-storm-plugin-enabled": "No",
- "ranger-hdfs-plugin-enabled": "No",
- "ranger_group": "ranger",
- "ranger-knox-plugin-enabled": "No",
- "ranger_admin_log_dir": "/var/log/ranger/admin",
- "ranger-kafka-plugin-enabled": "No",
- "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
- "ranger-hive-plugin-enabled": "No",
- "xasecure.audit.destination.solr": "true",
- "ranger_pid_dir": "/var/run/ranger",
- "xasecure.audit.destination.hdfs": "true",
- "admin_username": "admin",
- "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
- "create_db_dbuser": "true",
- "ranger_solr_collection_name": "ranger_audits",
- "ranger_admin_password": "P1!q9xa96SMi5NCl",
+ "xml_configurations_supported": "true",
+ "ranger-atlas-plugin-enabled": "No",
+ "ranger-hbase-plugin-enabled": "No",
+ "ranger-yarn-plugin-enabled": "No",
+ "bind_anonymous": "false",
+ "ranger_admin_username": "amb_ranger_admin",
+ "admin_password": "admin",
+ "is_solrCloud_enabled": "true",
+ "ranger-storm-plugin-enabled": "No",
+ "ranger-hdfs-plugin-enabled": "No",
+ "ranger_group": "ranger",
+ "ranger-knox-plugin-enabled": "No",
+ "ranger_admin_log_dir": "/var/log/ranger/admin",
+ "ranger-kafka-plugin-enabled": "No",
+ "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+ "ranger-hive-plugin-enabled": "No",
+ "xasecure.audit.destination.solr": "true",
+ "ranger_pid_dir": "/var/run/ranger",
+ "xasecure.audit.destination.hdfs": "true",
+ "admin_username": "admin",
+ "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+ "create_db_dbuser": "true",
+ "ranger_solr_collection_name": "ranger_audits",
+ "ranger_admin_password": "P1!q9xa96SMi5NCl",
"ranger_usersync_log_dir": "/var/log/ranger/usersync"
- },
+ },
"usersync-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
- },
+ },
"admin-properties": {
- "db_user": "rangeradmin01",
- "DB_FLAVOR": "MYSQL",
- "db_password": "rangeradmin01",
- "db_root_user": "root",
- "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
- "db_name": "ranger01",
- "db_host": "c6401.ambari.apache.org",
- "db_root_password": "vagrant",
+ "db_user": "rangeradmin01",
+ "DB_FLAVOR": "MYSQL",
+ "db_password": "rangeradmin01",
+ "db_root_user": "root",
+ "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+ "db_name": "ranger01",
+ "db_host": "c6401.ambari.apache.org",
+ "db_root_password": "vagrant",
"SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
- },
+ },
"ranger-ugsync-site": {
- "ranger.usersync.ldap.binddn": "",
- "ranger.usersync.policymgr.username": "rangerusersync",
- "ranger.usersync.policymanager.mockrun": "false",
- "ranger.usersync.group.searchbase": "",
- "ranger.usersync.ldap.bindalias": "testldapalias",
- "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
- "ranger.usersync.port": "5151",
- "ranger.usersync.pagedresultssize": "500",
- "ranger.usersync.group.memberattributename": "",
- "ranger.usersync.kerberos.principal": "",
- "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
- "ranger.usersync.ldap.referral": "ignore",
- "ranger.usersync.group.searchfilter": "",
- "ranger.usersync.ldap.user.objectclass": "person",
- "ranger.usersync.logdir": "{{usersync_log_dir}}",
- "ranger.usersync.ldap.user.searchfilter": "",
- "ranger.usersync.ldap.groupname.caseconversion": "none",
- "ranger.usersync.ldap.ldapbindpassword": "",
- "ranger.usersync.unix.minUserId": "500",
- "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
- "ranger.usersync.group.nameattribute": "",
- "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
- "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
- "ranger.usersync.user.searchenabled": "false",
- "ranger.usersync.group.usermapsyncenabled": "true",
- "ranger.usersync.ldap.bindkeystore": "",
- "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
- "ranger.usersync.kerberos.keytab": "",
- "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
- "ranger.usersync.group.objectclass": "",
- "ranger.usersync.ldap.user.searchscope": "sub",
- "ranger.usersync.unix.password.file": "/etc/passwd",
- "ranger.usersync.ldap.user.nameattribute": "",
- "ranger.usersync.pagedresultsenabled": "true",
- "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
- "ranger.usersync.group.search.first.enabled": "false",
- "ranger.usersync.group.searchenabled": "false",
- "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
- "ranger.usersync.ssl": "true",
- "ranger.usersync.ldap.url": "",
- "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
- "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
- "ranger.usersync.ldap.user.searchbase": "",
- "ranger.usersync.ldap.username.caseconversion": "none",
- "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
- "ranger.usersync.keystore.password": "UnIx529p",
- "ranger.usersync.unix.group.file": "/etc/group",
- "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
- "ranger.usersync.group.searchscope": "",
- "ranger.usersync.truststore.password": "changeit",
- "ranger.usersync.enabled": "true",
- "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
+ "ranger.usersync.ldap.binddn": "",
+ "ranger.usersync.policymgr.username": "rangerusersync",
+ "ranger.usersync.policymanager.mockrun": "false",
+ "ranger.usersync.group.searchbase": "",
+ "ranger.usersync.ldap.bindalias": "testldapalias",
+ "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+ "ranger.usersync.port": "5151",
+ "ranger.usersync.pagedresultssize": "500",
+ "ranger.usersync.group.memberattributename": "",
+ "ranger.usersync.kerberos.principal": "",
+ "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+ "ranger.usersync.ldap.referral": "ignore",
+ "ranger.usersync.group.searchfilter": "",
+ "ranger.usersync.ldap.user.objectclass": "person",
+ "ranger.usersync.logdir": "{{usersync_log_dir}}",
+ "ranger.usersync.ldap.user.searchfilter": "",
+ "ranger.usersync.ldap.groupname.caseconversion": "none",
+ "ranger.usersync.ldap.ldapbindpassword": "",
+ "ranger.usersync.unix.minUserId": "500",
+ "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+ "ranger.usersync.group.nameattribute": "",
+ "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+ "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+ "ranger.usersync.user.searchenabled": "false",
+ "ranger.usersync.group.usermapsyncenabled": "true",
+ "ranger.usersync.ldap.bindkeystore": "",
+ "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+ "ranger.usersync.kerberos.keytab": "",
+ "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+ "ranger.usersync.group.objectclass": "",
+ "ranger.usersync.ldap.user.searchscope": "sub",
+ "ranger.usersync.unix.password.file": "/etc/passwd",
+ "ranger.usersync.ldap.user.nameattribute": "",
+ "ranger.usersync.pagedresultsenabled": "true",
+ "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+ "ranger.usersync.group.search.first.enabled": "false",
+ "ranger.usersync.group.searchenabled": "false",
+ "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+ "ranger.usersync.ssl": "true",
+ "ranger.usersync.ldap.url": "",
+ "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+ "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+ "ranger.usersync.ldap.user.searchbase": "",
+ "ranger.usersync.ldap.username.caseconversion": "none",
+ "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+ "ranger.usersync.keystore.password": "UnIx529p",
+ "ranger.usersync.unix.group.file": "/etc/group",
+ "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+ "ranger.usersync.group.searchscope": "",
+ "ranger.usersync.truststore.password": "changeit",
+ "ranger.usersync.enabled": "true",
+ "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
"ranger.usersync.filesource.text.delimiter": ","
- },
+ },
"hdfs-site": {
- "dfs.namenode.checkpoint.period": "21600",
- "dfs.namenode.avoid.write.stale.datanode": "true",
- "dfs.namenode.startup.delay.block.deletion.sec": "3600",
- "dfs.namenode.checkpoint.txns": "1000000",
- "dfs.content-summary.limit": "5000",
- "dfs.support.append": "true",
- "dfs.datanode.address": "0.0.0.0:50010",
- "dfs.cluster.administrators": " hdfs",
- "dfs.namenode.audit.log.async": "true",
- "dfs.datanode.balance.bandwidthPerSec": "6250000",
- "dfs.namenode.safemode.threshold-pct": "1",
- "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
- "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
- "dfs.permissions.enabled": "true",
- "dfs.client.read.shortcircuit": "true",
- "dfs.https.port": "50470",
- "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
- "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
- "dfs.blocksize": "134217728",
- "dfs.blockreport.initialDelay": "120",
- "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
- "dfs.namenode.fslock.fair": "false",
- "dfs.datanode.max.transfer.threads": "4096",
- "dfs.heartbeat.interval": "3",
- "dfs.replication": "3",
- "dfs.namenode.handler.count": "50",
- "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
- "fs.permissions.umask-mode": "022",
- "dfs.namenode.stale.datanode.interval": "30000",
- "dfs.datanode.ipc.address": "0.0.0.0:8010",
- "dfs.datanode.failed.volumes.tolerated": "0",
- "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
- "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
- "dfs.webhdfs.enabled": "true",
- "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
- "dfs.namenode.accesstime.precision": "0",
- "dfs.datanode.https.address": "0.0.0.0:50475",
- "dfs.namenode.write.stale.datanode.ratio": "1.0f",
- "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
- "nfs.exports.allowed.hosts": "* rw",
- "dfs.datanode.http.address": "0.0.0.0:50075",
- "dfs.datanode.du.reserved": "33011188224",
- "dfs.client.read.shortcircuit.streams.cache.size": "4096",
- "dfs.http.policy": "HTTP_ONLY",
- "dfs.block.access.token.enable": "true",
- "dfs.client.retry.policy.enabled": "false",
- "dfs.namenode.name.dir.restore": "true",
- "dfs.permissions.superusergroup": "hdfs",
- "dfs.journalnode.https-address": "0.0.0.0:8481",
- "dfs.journalnode.http-address": "0.0.0.0:8480",
- "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
- "dfs.namenode.avoid.read.stale.datanode": "true",
- "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
- "dfs.datanode.data.dir.perm": "750",
- "dfs.encryption.key.provider.uri": "",
- "dfs.replication.max": "50",
+ "dfs.namenode.checkpoint.period": "21600",
+ "dfs.namenode.avoid.write.stale.datanode": "true",
+ "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+ "dfs.namenode.checkpoint.txns": "1000000",
+ "dfs.content-summary.limit": "5000",
+ "dfs.support.append": "true",
+ "dfs.datanode.address": "0.0.0.0:50010",
+ "dfs.cluster.administrators": " hdfs",
+ "dfs.namenode.audit.log.async": "true",
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
+ "dfs.namenode.safemode.threshold-pct": "1",
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+ "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+ "dfs.permissions.enabled": "true",
+ "dfs.client.read.shortcircuit": "true",
+ "dfs.https.port": "50470",
+ "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+ "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+ "dfs.blocksize": "134217728",
+ "dfs.blockreport.initialDelay": "120",
+ "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+ "dfs.namenode.fslock.fair": "false",
+ "dfs.datanode.max.transfer.threads": "4096",
+ "dfs.heartbeat.interval": "3",
+ "dfs.replication": "3",
+ "dfs.namenode.handler.count": "50",
+ "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+ "fs.permissions.umask-mode": "022",
+ "dfs.namenode.stale.datanode.interval": "30000",
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
+ "dfs.datanode.failed.volumes.tolerated": "0",
+ "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+ "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+ "dfs.webhdfs.enabled": "true",
+ "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+ "dfs.namenode.accesstime.precision": "0",
+ "dfs.datanode.https.address": "0.0.0.0:50475",
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+ "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+ "nfs.exports.allowed.hosts": "* rw",
+ "dfs.datanode.http.address": "0.0.0.0:50075",
+ "dfs.datanode.du.reserved": "33011188224",
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+ "dfs.http.policy": "HTTP_ONLY",
+ "dfs.block.access.token.enable": "true",
+ "dfs.client.retry.policy.enabled": "false",
+ "dfs.namenode.name.dir.restore": "true",
+ "dfs.permissions.superusergroup": "hdfs",
+ "dfs.journalnode.https-address": "0.0.0.0:8481",
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+ "dfs.namenode.avoid.read.stale.datanode": "true",
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+ "dfs.datanode.data.dir.perm": "750",
+ "dfs.encryption.key.provider.uri": "",
+ "dfs.replication.max": "50",
"dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
- },
+ },
"ranger-tagsync-site": {
- "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
- "ranger.tagsync.source.atlasrest.username": "",
- "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
- "ranger.tagsync.source.atlasrest.download.interval.millis": "",
- "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
- "ranger.tagsync.source.file.check.interval.millis": "",
- "ranger.tagsync.source.atlasrest.endpoint": "",
- "ranger.tagsync.dest.ranger.username": "rangertagsync",
- "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
- "ranger.tagsync.kerberos.principal": "",
- "ranger.tagsync.kerberos.keytab": "",
- "ranger.tagsync.source.atlas": "false",
- "ranger.tagsync.source.atlasrest": "false",
- "ranger.tagsync.source.file": "false",
+ "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+ "ranger.tagsync.source.atlasrest.username": "",
+ "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+ "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+ "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+ "ranger.tagsync.source.file.check.interval.millis": "",
+ "ranger.tagsync.source.atlasrest.endpoint": "",
+ "ranger.tagsync.dest.ranger.username": "rangertagsync",
+ "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+ "ranger.tagsync.kerberos.principal": "",
+ "ranger.tagsync.kerberos.keytab": "",
+ "ranger.tagsync.source.atlas": "false",
+ "ranger.tagsync.source.atlasrest": "false",
+ "ranger.tagsync.source.file": "false",
"ranger.tagsync.source.file.filename": ""
- },
+ },
"zoo.cfg": {
- "clientPort": "2181",
- "autopurge.purgeInterval": "24",
- "syncLimit": "5",
- "dataDir": "/grid/0/hadoop/zookeeper",
- "initLimit": "10",
- "tickTime": "2000",
+ "clientPort": "2181",
+ "autopurge.purgeInterval": "24",
+ "syncLimit": "5",
+ "dataDir": "/grid/0/hadoop/zookeeper",
+ "initLimit": "10",
+ "tickTime": "2000",
"autopurge.snapRetainCount": "30"
- },
+ },
"hadoop-policy": {
- "security.job.client.protocol.acl": "*",
- "security.job.task.protocol.acl": "*",
- "security.datanode.protocol.acl": "*",
- "security.namenode.protocol.acl": "*",
- "security.client.datanode.protocol.acl": "*",
- "security.inter.tracker.protocol.acl": "*",
- "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
- "security.client.protocol.acl": "*",
- "security.refresh.policy.protocol.acl": "hadoop",
- "security.admin.operations.protocol.acl": "hadoop",
+ "security.job.client.protocol.acl": "*",
+ "security.job.task.protocol.acl": "*",
+ "security.datanode.protocol.acl": "*",
+ "security.namenode.protocol.acl": "*",
+ "security.client.datanode.protocol.acl": "*",
+ "security.inter.tracker.protocol.acl": "*",
+ "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+ "security.client.protocol.acl": "*",
+ "security.refresh.policy.protocol.acl": "hadoop",
+ "security.admin.operations.protocol.acl": "hadoop",
"security.inter.datanode.protocol.acl": "*"
- },
+ },
"hdfs-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
ange=WARN"
- },
+ },
"ranger-hdfs-plugin-properties": {
- "hadoop.rpc.protection": "authentication",
- "ranger-hdfs-plugin-enabled": "No",
- "REPOSITORY_CONFIG_USERNAME": "hadoop",
- "policy_user": "ambari-qa",
- "common.name.for.certificate": "",
+ "hadoop.rpc.protection": "authentication",
+ "ranger-hdfs-plugin-enabled": "No",
+ "REPOSITORY_CONFIG_USERNAME": "hadoop",
+ "policy_user": "ambari-qa",
+ "common.name.for.certificate": "",
"REPOSITORY_CONFIG_PASSWORD": "hadoop"
- },
+ },
"core-site": {
- "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org",
- "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
- "fs.trash.interval": "360",
- "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
- "hadoop.http.authentication.simple.anonymous.allowed": "true",
- "hadoop.security.authentication": "simple",
- "hadoop.proxyuser.root.groups": "*",
- "ipc.client.connection.maxidletime": "30000",
- "hadoop.security.key.provider.path": "",
- "mapreduce.jobtracker.webinterface.trusted": "false",
- "hadoop.security.authorization": "false",
- "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
- "ipc.server.tcpnodelay": "true",
- "ipc.client.connect.max.retries": "50",
- "hadoop.security.auth_to_local": "DEFAULT",
- "io.file.buffer.size": "131072",
- "hadoop.proxyuser.hdfs.hosts": "*",
- "hadoop.proxyuser.hdfs.groups": "*",
- "ipc.client.idlethreshold": "8000",
- "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+ "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org",
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+ "fs.trash.interval": "360",
+ "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+ "hadoop.http.authentication.simple.anonymous.allowed": "true",
+ "hadoop.security.authentication": "simple",
+ "hadoop.proxyuser.root.groups": "*",
+ "ipc.client.connection.maxidletime": "30000",
+ "hadoop.security.key.provider.path": "",
+ "mapreduce.jobtracker.webinterface.trusted": "false",
+ "hadoop.security.authorization": "false",
+ "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+ "ipc.server.tcpnodelay": "true",
+ "ipc.client.connect.max.retries": "50",
+ "hadoop.security.auth_to_local": "DEFAULT",
+ "io.file.buffer.size": "131072",
+ "hadoop.proxyuser.hdfs.hosts": "*",
+ "hadoop.proxyuser.hdfs.groups": "*",
+ "ipc.client.idlethreshold": "8000",
+ "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
"io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
- },
+ },
"hadoop-env": {
- "keyserver_port": "",
- "proxyuser_group": "users",
- "hdfs_user_nproc_limit": "65536",
- "hdfs_log_dir_prefix": "/var/log/hadoop",
- "hdfs_user_nofile_limit": "128000",
- "hdfs_user": "hdfs",
- "keyserver_host": " ",
- "namenode_opt_maxnewsize": "128m",
- "namenode_opt_maxpermsize": "256m",
- "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
N_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
ing priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n do\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
ECURE_DN_USER\" ]; then\n ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
- "namenode_heapsize": "1024m",
- "namenode_opt_newsize": "128m",
- "nfsgateway_heapsize": "1024",
- "dtnode_heapsize": "1024m",
- "hadoop_root_logger": "INFO,RFA",
- "hadoop_heapsize": "1024",
- "hadoop_pid_dir_prefix": "/var/run/hadoop",
- "namenode_opt_permsize": "128m",
+ "keyserver_port": "",
+ "proxyuser_group": "users",
+ "hdfs_user_nproc_limit": "65536",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
+ "hdfs_user_nofile_limit": "128000",
+ "hdfs_user": "hdfs",
+ "keyserver_host": " ",
+ "namenode_opt_maxnewsize": "128m",
+ "namenode_opt_maxpermsize": "256m",
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
N_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
ing priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n do\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
ECURE_DN_USER\" ]; then\n ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+ "namenode_heapsize": "1024m",
+ "namenode_opt_newsize": "128m",
+ "nfsgateway_heapsize": "1024",
+ "dtnode_heapsize": "1024m",
+ "hadoop_root_logger": "INFO,RFA",
+ "hadoop_heapsize": "1024",
+ "hadoop_pid_dir_prefix": "/var/run/hadoop",
+ "namenode_opt_permsize": "128m",
"hdfs_tmp_dir": "/tmp"
- },
+ },
"zookeeper-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
- },
+ },
"ssl-server": {
- "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
- "ssl.server.keystore.keypassword": "bigdata",
- "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
- "ssl.server.keystore.password": "bigdata",
- "ssl.server.truststore.password": "bigdata",
- "ssl.server.truststore.type": "jks",
- "ssl.server.keystore.type": "jks",
+ "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
+ "ssl.server.keystore.keypassword": "bigdata",
+ "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
+ "ssl.server.keystore.password": "bigdata",
+ "ssl.server.truststore.password": "bigdata",
+ "ssl.server.truststore.type": "jks",
+ "ssl.server.keystore.type": "jks",
"ssl.server.truststore.reload.interval": "10000"
- },
- "ranger-site": {},
+ },
+ "ranger-site": {},
"admin-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t]
%m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
- },
+ },
"tagsync-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
.log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
- },
+ },
"ranger-hdfs-security": {
- "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
- "ranger.plugin.hdfs.service.name": "{{repo_name}}",
- "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
- "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
- "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
- "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
+ "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+ "ranger.plugin.hdfs.service.name": "{{repo_name}}",
+ "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+ "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
+ "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
+ "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
"xasecure.add-hadoop-authorization": "true"
- },
- "usersync-properties": {},
+ },
+ "usersync-properties": {},
"zookeeper-env": {
- "zk_log_dir": "/var/log/zookeeper",
- "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
- "zk_server_heapsize": "1024m",
- "zk_pid_dir": "/var/run/zookeeper",
+ "zk_log_dir": "/var/log/zookeeper",
+ "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+ "zk_server_heapsize": "1024m",
+ "zk_pid_dir": "/var/run/zookeeper",
"zk_user": "zookeeper"
},
"infra-solr-env": {
@@ -651,7 +651,7 @@
"infra_solr_kerberos_name_rules": "DEFAULT",
"infra_solr_user": "infra-solr",
"infra_solr_maxmem": "1024",
- "content": "#!/bin/bash\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LI
<TRUNCATED>
[13/18] ambari git commit: AMBARI-20950. HdfsResource can not handle
S3 URL when hbase.rootdir is set to S3 URL (aonishuk)
Posted by rl...@apache.org.
AMBARI-20950. HdfsResource can not handle S3 URL when hbase.rootdir is set to S3 URL (aonishuk)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7f3d3b21
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7f3d3b21
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7f3d3b21
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 7f3d3b21a961581678cb7c072ec71e5eb15d7da9
Parents: d0f7a51
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon Jul 10 12:58:10 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Jul 10 12:58:10 2017 +0300
----------------------------------------------------------------------
.../HBASE/0.96.0.2.0/package/scripts/hbase.py | 12 +++++++-----
.../HBASE/0.96.0.2.0/package/scripts/params_linux.py | 3 +++
2 files changed, 10 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/7f3d3b21/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
index 8ad802e..cec6b2a 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
@@ -17,6 +17,7 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
+from urlparse import urlparse
import os
import sys
from resource_management.libraries.script.script import Script
@@ -200,11 +201,12 @@ def hbase(name=None):
owner=params.hbase_user
)
if name == "master":
- params.HdfsResource(params.hbase_hdfs_root_dir,
- type="directory",
- action="create_on_execute",
- owner=params.hbase_user
- )
+ if not params.hbase_hdfs_root_dir_protocol or params.hbase_hdfs_root_dir_protocol == urlparse(params.default_fs).scheme:
+ params.HdfsResource(params.hbase_hdfs_root_dir,
+ type="directory",
+ action="create_on_execute",
+ owner=params.hbase_user
+ )
params.HdfsResource(params.hbase_staging_dir,
type="directory",
action="create_on_execute",
http://git-wip-us.apache.org/repos/asf/ambari/blob/7f3d3b21/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index d45aea6..e05da06 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -17,6 +17,8 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
+from urlparse import urlparse
+
import status_params
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
@@ -237,6 +239,7 @@ else:
hbase_env_sh_template = config['configurations']['hbase-env']['content']
hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
+hbase_hdfs_root_dir_protocol = urlparse(hbase_hdfs_root_dir).scheme
hbase_staging_dir = "/apps/hbase/staging"
#for create_hdfs_directory
hostname = config["hostname"]
[17/18] ambari git commit: Revert "AMBARI-21427. Assigning hosts
concurrently to same config group may fail with
"org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException:
Config group already exist". (stoader)"
Posted by rl...@apache.org.
Revert "AMBARI-21427. Assigning hosts concurrently to same config group may fail with "org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist". (stoader)"
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/70cf77e4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/70cf77e4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/70cf77e4
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 70cf77e4087840e89fab50a741d36bf8747ba416
Parents: 15dd999
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Mon Jul 10 23:11:38 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Mon Jul 10 23:19:34 2017 +0300
----------------------------------------------------------------------
.../ambari/server/topology/AmbariContext.java | 81 +++++---------------
1 file changed, 19 insertions(+), 62 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/70cf77e4/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index dee0e6c..106d7c8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -1,4 +1,4 @@
-/*
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -30,7 +30,6 @@ import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Lock;
import javax.annotation.Nullable;
import javax.inject.Inject;
@@ -70,11 +69,9 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigFactory;
-import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.DesiredConfig;
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.configgroup.ConfigGroup;
import org.apache.ambari.server.utils.RetryHelper;
import org.slf4j.Logger;
@@ -82,8 +79,6 @@ import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.Striped;
-import com.google.inject.Provider;
/**
@@ -104,12 +99,6 @@ public class AmbariContext {
@Inject
ConfigFactory configFactory;
- /**
- * Used for getting configuration property values from stack and services.
- */
- @Inject
- private Provider<ConfigHelper> configHelper;
-
private static AmbariManagementController controller;
private static ClusterController clusterController;
//todo: task id's. Use existing mechanism for getting next task id sequence
@@ -123,16 +112,6 @@ public class AmbariContext {
private final static Logger LOG = LoggerFactory.getLogger(AmbariContext.class);
-
- /**
- * When config groups are created using Blueprints these are created when
- * hosts join a hostgroup and are added to the corresponding config group.
- * Since hosts join in parallel there might be a race condition in creating
- * the config group a host is to be added to. Thus we need to synchronize
- * the creation of config groups with the same name.
- */
- private Striped<Lock> configGroupCreateLock = Striped.lazyWeakLock(1);
-
public boolean isClusterKerberosEnabled(long clusterId) {
Cluster cluster;
try {
@@ -188,10 +167,9 @@ public class AmbariContext {
public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType, String repoVersion) {
Stack stack = topology.getBlueprint().getStack();
- StackId stackId = new StackId(stack.getName(), stack.getVersion());
createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType, repoVersion);
- createAmbariServiceAndComponentResources(topology, clusterName, stackId, repoVersion);
+ createAmbariServiceAndComponentResources(topology, clusterName);
}
public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType, String repoVersion) {
@@ -218,8 +196,7 @@ public class AmbariContext {
}
}
- public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName,
- StackId stackId, String repositoryVersion) {
+ public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName) {
Collection<String> services = topology.getBlueprint().getServices();
try {
@@ -228,13 +205,11 @@ public class AmbariContext {
} catch (AmbariException e) {
throw new RuntimeException("Failed to persist service and component resources: " + e, e);
}
- Set<ServiceRequest> serviceRequests = new HashSet<>();
- Set<ServiceComponentRequest> componentRequests = new HashSet<>();
+ Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
+ Set<ServiceComponentRequest> componentRequests = new HashSet<ServiceComponentRequest>();
for (String service : services) {
String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
- serviceRequests.add(new ServiceRequest(clusterName, service, stackId.getStackId(),
- repositoryVersion, null, credentialStoreEnabled));
-
+ serviceRequests.add(new ServiceRequest(clusterName, service, null, credentialStoreEnabled));
for (String component : topology.getBlueprint().getComponents(service)) {
String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(service, component);
componentRequests.add(new ServiceComponentRequest(clusterName, service, component, null, recoveryEnabled));
@@ -248,14 +223,14 @@ public class AmbariContext {
}
// set all services state to INSTALLED->STARTED
// this is required so the user can start failed services at the service level
- Map<String, Object> installProps = new HashMap<>();
+ Map<String, Object> installProps = new HashMap<String, Object>();
installProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "INSTALLED");
installProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
- Map<String, Object> startProps = new HashMap<>();
+ Map<String, Object> startProps = new HashMap<String, Object>();
startProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
startProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
- Predicate predicate = new EqualsPredicate<>(
- ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
+ Predicate predicate = new EqualsPredicate<String>(
+ ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
try {
getServiceResourceProvider().updateResources(
new RequestImpl(null, Collections.singleton(installProps), null, null), predicate);
@@ -287,9 +262,9 @@ public class AmbariContext {
}
String clusterName = cluster.getClusterName();
- Map<String, Object> properties = new HashMap<>();
+ Map<String, Object> properties = new HashMap<String, Object>();
properties.put(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
- properties.put(HostResourceProvider.HOST_HOST_NAME_PROPERTY_ID, hostName);
+ properties.put(HostResourceProvider.HOST_NAME_PROPERTY_ID, hostName);
properties.put(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID, host.getRackInfo());
try {
@@ -300,7 +275,7 @@ public class AmbariContext {
hostName, e.toString()), e);
}
- final Set<ServiceComponentHostRequest> requests = new HashSet<>();
+ final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
for (Map.Entry<String, Collection<String>> entry : components.entrySet()) {
String service = entry.getKey();
@@ -353,17 +328,11 @@ public class AmbariContext {
}
public void registerHostWithConfigGroup(final String hostName, final ClusterTopology topology, final String groupName) {
- String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
-
- Lock configGroupLock = configGroupCreateLock.get(qualifiedGroupName);
-
try {
- configGroupLock.lock();
-
boolean hostAdded = RetryHelper.executeWithRetry(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
- return addHostToExistingConfigGroups(hostName, topology, qualifiedGroupName);
+ return addHostToExistingConfigGroups(hostName, topology, groupName);
}
});
if (!hostAdded) {
@@ -373,9 +342,6 @@ public class AmbariContext {
LOG.error("Unable to register config group for host: ", e);
throw new RuntimeException("Unable to register config group for host: " + hostName);
}
- finally {
- configGroupLock.unlock();
- }
}
public RequestStatusResponse installHost(String hostName, String clusterName, Collection<String> skipInstallForComponents, Collection<String> dontSkipInstallForComponents, boolean skipFailure) {
@@ -583,7 +549,7 @@ public class AmbariContext {
/**
* Add the new host to an existing config group.
*/
- private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String configGroupName) {
+ private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String groupName) {
boolean addedHost = false;
Clusters clusters;
Cluster cluster;
@@ -597,8 +563,9 @@ public class AmbariContext {
// I don't know of a method to get config group by name
//todo: add a method to get config group by name
Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
+ String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
for (ConfigGroup group : configGroups.values()) {
- if (group.getName().equals(configGroupName)) {
+ if (group.getName().equals(qualifiedGroupName)) {
try {
Host host = clusters.getHost(hostName);
addedHost = true;
@@ -622,7 +589,7 @@ public class AmbariContext {
* and the hosts associated with the host group are assigned to the config group.
*/
private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String groupName) throws AmbariException {
- Map<String, Map<String, Config>> groupConfigs = new HashMap<>();
+ Map<String, Map<String, Config>> groupConfigs = new HashMap<String, Map<String, Config>>();
Stack stack = topology.getBlueprint().getStack();
// get the host-group config with cluster creation template overrides
@@ -641,7 +608,7 @@ public class AmbariContext {
//todo: attributes
Map<String, Config> serviceConfigs = groupConfigs.get(service);
if (serviceConfigs == null) {
- serviceConfigs = new HashMap<>();
+ serviceConfigs = new HashMap<String, Config>();
groupConfigs.put(service, serviceConfigs);
}
serviceConfigs.put(type, config);
@@ -702,16 +669,6 @@ public class AmbariContext {
return String.format("%s:%s", bpName, hostGroupName);
}
- /**
- * Gets an instance of {@link ConfigHelper} for classes which are not
- * dependency injected.
- *
- * @return a {@link ConfigHelper} instance.
- */
- public ConfigHelper getConfigHelper() {
- return configHelper.get();
- }
-
private synchronized HostResourceProvider getHostResourceProvider() {
if (hostResourceProvider == null) {
hostResourceProvider = (HostResourceProvider)
[02/18] ambari git commit: AMBARI-21400. Upgrade Infra Solr version
from 5.5.2 to 6.6.x (oleewere)
Posted by rl...@apache.org.
AMBARI-21400. Upgrade Infra Solr version from 5.5.2 to 6.6.x (oleewere)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a795f38c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a795f38c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a795f38c
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: a795f38cd49c2c55c837b0daecaa63a67015d0eb
Parents: 1939dab
Author: oleewere <ol...@gmail.com>
Authored: Thu Jul 6 20:28:11 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Fri Jul 7 14:25:24 2017 +0200
----------------------------------------------------------------------
.../libraries/functions/solr_cloud_util.py | 10 +
.../src/main/resources/solr | 826 +++++++++++++++----
.../ambari/infra/solr/AmbariSolrCloudCLI.java | 14 +
.../infra/solr/AmbariSolrCloudClient.java | 8 +
.../commands/RemoveAdminHandlersCommand.java | 46 ++
.../commands/SetClusterPropertyZkCommand.java | 6 +-
.../InfraRuleBasedAuthorizationPluginTest.java | 5 +
ambari-infra/pom.xml | 2 +-
.../configsets/audit_logs/conf/solrconfig.xml | 3 +-
.../configsets/hadoop_logs/conf/solrconfig.xml | 3 +-
.../main/configsets/history/conf/solrconfig.xml | 3 +-
.../logsearch/dao/SolrSchemaFieldDao.java | 2 +-
ambari-logsearch/docker/Dockerfile | 2 +-
ambari-logsearch/docker/bin/start.sh | 4 +-
ambari-logsearch/pom.xml | 2 +-
.../server/upgrade/UpgradeCatalog300.java | 18 +
.../0.1.0/package/scripts/params.py | 3 +
.../0.1.0/package/scripts/setup_infra_solr.py | 17 +-
.../properties/audit_logs-solrconfig.xml.j2 | 3 +-
.../properties/service_logs-solrconfig.xml.j2 | 3 +-
.../server/upgrade/UpgradeCatalog300Test.java | 33 +
.../stacks/2.4/AMBARI_INFRA/test_infra_solr.py | 3 +
22 files changed, 836 insertions(+), 180 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-common/src/main/python/resource_management/libraries/functions/solr_cloud_util.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/solr_cloud_util.py b/ambari-common/src/main/python/resource_management/libraries/functions/solr_cloud_util.py
index 1c5432b..12356ed 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/solr_cloud_util.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/solr_cloud_util.py
@@ -190,6 +190,16 @@ def secure_solr_znode(zookeeper_quorum, solr_znode, jaas_file, java64_home, sasl
secure_solr_znode_cmd = format('{solr_cli_prefix} --secure-solr-znode --jaas-file {jaas_file} --sasl-users {sasl_users_str}')
Execute(secure_solr_znode_cmd)
+def remove_admin_handlers(zookeeper_quorum, solr_znode, java64_home, collection, jaas_file, retry = 5, interval = 10):
+ """
+ Remove "solr.admin.AdminHandlers" request handler from collection config. Required for migrating to Solr 6 from Solr 5.
+ """
+ solr_cli_prefix = __create_solr_cloud_cli_prefix(zookeeper_quorum, solr_znode, java64_home)
+ remove_admin_handlers_cmd = format('{solr_cli_prefix} --remove-admin-handlers --collection {collection} --retry {retry} --interval {interval}')
+ if jaas_file is not None:
+ remove_admin_handlers_cmd+=format(' --jaas-file {jaas_file}')
+ Execute(remove_admin_handlers_cmd)
+
def default_config(config, name, default_value):
subdicts = filter(None, name.split('/'))
if not config:
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/ambari-infra-assembly/src/main/resources/solr
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-assembly/src/main/resources/solr b/ambari-infra/ambari-infra-assembly/src/main/resources/solr
old mode 100644
new mode 100755
index bf504d9..6f2de8f
--- a/ambari-infra/ambari-infra-assembly/src/main/resources/solr
+++ b/ambari-infra/ambari-infra-assembly/src/main/resources/solr
@@ -49,6 +49,9 @@ SOLR_SCRIPT="$0"
verbose=false
THIS_OS=`uname -s`
+# What version of Java is required to run this version of Solr.
+JAVA_VER_REQ="1.8"
+
stop_all=false
# for now, we don't support running this script from cygwin due to problems
@@ -84,6 +87,7 @@ if [ -z "$SOLR_INCLUDE" ]; then
/etc/default/solr.in.sh \
/opt/solr/solr.in.sh; do
if [ -r "$include" ]; then
+ SOLR_INCLUDE="$include"
. "$include"
break
fi
@@ -116,16 +120,44 @@ else
JAVA=java
fi
-# test that Java exists and is executable on this server
-"$JAVA" -version >/dev/null 2>&1 || {
+if [ -z "$SOLR_STOP_WAIT" ]; then
+ SOLR_STOP_WAIT=180
+fi
+# test that Java exists, is executable and correct version
+JAVA_VER=$("$JAVA" -version 2>&1)
+if [[ $? -ne 0 ]] ; then
echo >&2 "Java not found, or an error was encountered when running java."
- echo >&2 "A working Java 7 or later is required to run Solr!"
- echo >&2 "Please install Java or fix JAVA_HOME before running this script."
- echo >&2 "Command that we tried: '${JAVA} -version'"
+ echo >&2 "A working Java $JAVA_VER_REQ JRE is required to run Solr!"
+ echo >&2 "Please install latest version of Java $JAVA_VER_REQ or set JAVA_HOME properly."
+ echo >&2 "Command that we tried: '${JAVA} -version', with response:"
+ echo >&2 "${JAVA_VER}"
+ echo >&2
+ echo >&2 "Debug information:"
+ echo >&2 "JAVA_HOME: ${JAVA_HOME:-N/A}"
echo >&2 "Active Path:"
echo >&2 "${PATH}"
exit 1
-}
+else
+ JAVA_VER_NUM=$(echo $JAVA_VER | head -1 | awk -F '"' '/version/ {print $2}')
+ if [[ "$JAVA_VER_NUM" < "$JAVA_VER_REQ" ]] ; then
+ echo >&2 "Your current version of Java is too old to run this version of Solr"
+ echo >&2 "We found version $JAVA_VER_NUM, using command '${JAVA} -version', with response:"
+ echo >&2 "${JAVA_VER}"
+ echo >&2
+ echo >&2 "Please install latest version of Java $JAVA_VER_REQ or set JAVA_HOME properly."
+ echo >&2
+ echo >&2 "Debug information:"
+ echo >&2 "JAVA_HOME: ${JAVA_HOME:-N/A}"
+ echo >&2 "Active Path:"
+ echo >&2 "${PATH}"
+ exit 1
+ fi
+ JAVA_VENDOR="Oracle"
+ if [ "`echo $JAVA_VER | grep -i "IBM J9"`" != "" ]; then
+ JAVA_VENDOR="IBM J9"
+ fi
+fi
+
# Select HTTP OR HTTPS related configurations
SOLR_URL_SCHEME=http
@@ -134,30 +166,109 @@ SOLR_SSL_OPTS=""
if [ -n "$SOLR_SSL_KEY_STORE" ]; then
SOLR_JETTY_CONFIG+=("--module=https")
SOLR_URL_SCHEME=https
- SOLR_SSL_OPTS=" -Dsolr.jetty.keystore=$SOLR_SSL_KEY_STORE \
- -Dsolr.jetty.keystore.password=$SOLR_SSL_KEY_STORE_PASSWORD \
- -Dsolr.jetty.truststore=$SOLR_SSL_TRUST_STORE \
- -Dsolr.jetty.truststore.password=$SOLR_SSL_TRUST_STORE_PASSWORD \
- -Dsolr.jetty.ssl.needClientAuth=$SOLR_SSL_NEED_CLIENT_AUTH \
- -Dsolr.jetty.ssl.wantClientAuth=$SOLR_SSL_WANT_CLIENT_AUTH"
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.keystore=$SOLR_SSL_KEY_STORE"
+ if [ -n "$SOLR_SSL_KEY_STORE_PASSWORD" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.keystore.password=$SOLR_SSL_KEY_STORE_PASSWORD"
+ fi
+ if [ -n "$SOLR_SSL_KEY_STORE_TYPE" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.keystore.type=$SOLR_SSL_KEY_STORE_TYPE"
+ fi
+
+ if [ -n "$SOLR_SSL_TRUST_STORE" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.truststore=$SOLR_SSL_TRUST_STORE"
+ fi
+ if [ -n "$SOLR_SSL_TRUST_STORE_PASSWORD" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.truststore.password=$SOLR_SSL_TRUST_STORE_PASSWORD"
+ fi
+ if [ -n "$SOLR_SSL_TRUST_STORE_TYPE" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.truststore.type=$SOLR_SSL_TRUST_STORE_TYPE"
+ fi
+
+ if [ -n "$SOLR_SSL_NEED_CLIENT_AUTH" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.ssl.needClientAuth=$SOLR_SSL_NEED_CLIENT_AUTH"
+ fi
+ if [ -n "$SOLR_SSL_WANT_CLIENT_AUTH" ]; then
+ SOLR_SSL_OPTS+=" -Dsolr.jetty.ssl.wantClientAuth=$SOLR_SSL_WANT_CLIENT_AUTH"
+ fi
+
if [ -n "$SOLR_SSL_CLIENT_KEY_STORE" ]; then
- SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStore=$SOLR_SSL_CLIENT_KEY_STORE \
- -Djavax.net.ssl.keyStorePassword=$SOLR_SSL_CLIENT_KEY_STORE_PASSWORD \
- -Djavax.net.ssl.trustStore=$SOLR_SSL_CLIENT_TRUST_STORE \
- -Djavax.net.ssl.trustStorePassword=$SOLR_SSL_CLIENT_TRUST_STORE_PASSWORD"
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStore=$SOLR_SSL_CLIENT_KEY_STORE"
+
+ if [ -n "$SOLR_SSL_CLIENT_KEY_STORE_PASSWORD" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStorePassword=$SOLR_SSL_CLIENT_KEY_STORE_PASSWORD"
+ fi
+ if [ -n "$SOLR_SSL_CLIENT_KEY_STORE_TYPE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStoreType=$SOLR_SSL_CLIENT_KEY_STORE_TYPE"
+ fi
+ else
+ if [ -n "$SOLR_SSL_KEY_STORE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStore=$SOLR_SSL_KEY_STORE"
+ fi
+ if [ -n "$SOLR_SSL_KEY_STORE_PASSWORD" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStorePassword=$SOLR_SSL_KEY_STORE_PASSWORD"
+ fi
+ if [ -n "$SOLR_SSL_KEY_STORE_TYPE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStoreType=$SOLR_SSL_KEYSTORE_TYPE"
+ fi
+ fi
+
+ if [ -n "$SOLR_SSL_CLIENT_TRUST_STORE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStore=$SOLR_SSL_CLIENT_TRUST_STORE"
+
+ if [ -n "$SOLR_SSL_CLIENT_TRUST_STORE_PASSWORD" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStorePassword=$SOLR_SSL_CLIENT_TRUST_STORE_PASSWORD"
+ fi
+
+ if [ -n "$SOLR_SSL_CLIENT_TRUST_STORE_TYPE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStoreType=$SOLR_SSL_CLIENT_TRUST_STORE_TYPE"
+ fi
else
- SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStore=$SOLR_SSL_KEY_STORE \
- -Djavax.net.ssl.keyStorePassword=$SOLR_SSL_KEY_STORE_PASSWORD \
- -Djavax.net.ssl.trustStore=$SOLR_SSL_TRUST_STORE \
- -Djavax.net.ssl.trustStorePassword=$SOLR_SSL_TRUST_STORE_PASSWORD"
+ if [ -n "$SOLR_SSL_TRUST_STORE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStore=$SOLR_SSL_TRUST_STORE"
+ fi
+
+ if [ -n "$SOLR_SSL_TRUST_STORE_PASSWORD" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStorePassword=$SOLR_SSL_TRUST_STORE_PASSWORD"
+ fi
+
+ if [ -n "$SOLR_SSL_TRUST_STORE_TYPE" ]; then
+ SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStoreType=$SOLR_SSL_TRUST_STORE_TYPE"
+ fi
fi
else
SOLR_JETTY_CONFIG+=("--module=http")
fi
# Authentication options
+if [ -z "$SOLR_AUTH_TYPE" ] && [ -n "$SOLR_AUTHENTICATION_OPTS" ]; then
+ echo "WARNING: SOLR_AUTHENTICATION_OPTS environment variable configured without associated SOLR_AUTH_TYPE variable"
+ echo " Please configure SOLR_AUTH_TYPE environment variable with the authentication type to be used."
+ echo " Currently supported authentication types are [kerberos, basic]"
+fi
+
+if [ -n "$SOLR_AUTH_TYPE" ] && [ -n "$SOLR_AUTHENTICATION_CLIENT_CONFIGURER" ]; then
+ echo "WARNING: SOLR_AUTHENTICATION_CLIENT_CONFIGURER and SOLR_AUTH_TYPE environment variables are configured together."
+ echo " Use SOLR_AUTH_TYPE environment variable to configure authentication type to be used. "
+ echo " Currently supported authentication types are [kerberos, basic]"
+ echo " The value of SOLR_AUTHENTICATION_CLIENT_CONFIGURER environment variable will be ignored"
+fi
+
+if [ -n "$SOLR_AUTH_TYPE" ]; then
+ case "$(echo $SOLR_AUTH_TYPE | awk '{print tolower($0)}')" in
+ basic)
+ SOLR_AUTHENTICATION_CLIENT_CONFIGURER="org.apache.solr.client.solrj.impl.PreemptiveBasicAuthConfigurer"
+ ;;
+ kerberos)
+ SOLR_AUTHENTICATION_CLIENT_CONFIGURER="org.apache.solr.client.solrj.impl.Krb5HttpClientConfigurer"
+ ;;
+ *)
+ echo "ERROR: Value specified for SOLR_AUTH_TYPE environment variable is invalid."
+ exit 1
+ esac
+fi
+
if [ "$SOLR_AUTHENTICATION_CLIENT_CONFIGURER" != "" ]; then
- AUTHC_CLIENT_CONFIGURER_ARG="-Dsolr.authentication.httpclient.configurer=$SOLR_AUTHENTICATION_CLIENT_CONFIGURER"
+ AUTHC_CLIENT_CONFIGURER_ARG="-Dsolr.httpclient.builder.factory=$SOLR_AUTHENTICATION_CLIENT_CONFIGURER"
fi
AUTHC_OPTS="$AUTHC_CLIENT_CONFIGURER_ARG $SOLR_AUTHENTICATION_OPTS"
@@ -179,7 +290,7 @@ function print_usage() {
if [ -z "$CMD" ]; then
echo ""
echo "Usage: solr COMMAND OPTIONS"
- echo " where COMMAND is one of: start, stop, restart, status, healthcheck, create, create_core, create_collection, delete, version, zk"
+ echo " where COMMAND is one of: start, stop, restart, status, healthcheck, create, create_core, create_collection, delete, version, zk, auth"
echo ""
echo " Standalone server example (start Solr running in the background on port 8984):"
echo ""
@@ -206,7 +317,7 @@ function print_usage() {
echo ""
echo " -p <port> Specify the port to start the Solr HTTP listener on; default is 8983"
echo " The specified port (SOLR_PORT) will also be used to determine the stop port"
- echo " STOP_PORT=(\$SOLR_PORT-1000) and JMX RMI listen port RMI_PORT=(1\$SOLR_PORT). "
+ echo " STOP_PORT=(\$SOLR_PORT-1000) and JMX RMI listen port RMI_PORT=(\$SOLR_PORT+10000). "
echo " For instance, if you set -p 8985, then the STOP_PORT=7985 and RMI_PORT=18985"
echo ""
echo " -d <dir> Specify the Solr server directory; defaults to server"
@@ -237,7 +348,9 @@ function print_usage() {
echo ""
echo " -noprompt Don't prompt for input; accept all defaults when running examples that accept user input"
echo ""
- echo " -V Verbose messages from this script"
+ echo " -v and -q Verbose (-v) or quiet (-q) logging. Sets default log level to DEBUG or WARN instead of INFO"
+ echo ""
+ echo " -V or -verbose Verbose messages from this script"
echo ""
elif [ "$CMD" == "stop" ]; then
echo ""
@@ -354,28 +467,132 @@ function print_usage() {
echo " Solr instance and will use the port of the first server it finds."
echo ""
elif [ "$CMD" == "zk" ]; then
- echo "Usage: solr zk [-upconfig|-downconfig] [-d confdir] [-n configName] [-z zkHost]"
+ print_short_zk_usage ""
+ echo " Be sure to check the Solr logs in case of errors."
+ echo ""
+ echo " -z zkHost Optional Zookeeper connection string for all commands. If specified it"
+ echo " overrides the 'ZK_HOST=...'' defined in solr.in.sh."
+ echo ""
+ echo " upconfig uploads a configset from the local machine to Zookeeper. (Backcompat: -upconfig)"
+ echo ""
+ echo " downconfig downloads a configset from Zookeeper to the local machine. (Backcompat: -downconfig)"
+ echo ""
+ echo " -n configName   Name of the configset in Zookeeper that will be the destination of"
+ echo " 'upconfig' and the source for 'downconfig'."
+ echo ""
+ echo " -d confdir      The local directory the configuration will be uploaded from for"
+ echo " 'upconfig' or downloaded to for 'downconfig'. If 'confdir' is a child of"
+ echo " ...solr/server/solr/configsets' then the configs will be copied from/to"
+ echo " that directory. Otherwise it is interpreted as a simple local path."
+ echo ""
+ echo " cp copies files or folders to/from Zookeeper or Zokeeper -> Zookeeper"
+ echo " -r  Recursively copy <src> to <dst>. Command will fail if <src> has children and "
+ echo " -r is not specified. Optional"
+ echo ""
+ echo " <src>, <dest> : [file:][/]path/to/local/file or zk:/path/to/zk/node"
+ echo " NOTE: <src> and <dest> may both be Zookeeper resources prefixed by 'zk:'"
+ echo " When <src> is a zk resource, <dest> may be '.'"
+ echo " If <dest> ends with '/', then <dest> will be a local folder or parent znode and the last"
+ echo " element of the <src> path will be appended unless <src> also ends in a slash. "
+ echo " <dest> may be zk:, which may be useful when using the cp -r form to backup/restore "
+ echo " the entire zk state."
+ echo " You must enclose local paths that end in a wildcard in quotes or just"
+ echo " end the local path in a slash. That is,"
+ echo " 'bin/solr zk cp -r /some/dir/ zk:/ -z localhost:2181' is equivalent to"
+ echo " 'bin/solr zk cp -r \"/some/dir/*\" zk:/ -z localhost:2181'"
+ echo " but 'bin/solr zk cp -r /some/dir/* zk:/ -z localhost:2181' will throw an error"
+ echo ""
+ echo " here's an example of backup/restore for a ZK configuration:"
+ echo " to copy to local: 'bin/solr zk cp -r zk:/ /some/dir -z localhost:2181'"
+ echo " to restore to ZK: 'bin/solr zk cp -r /some/dir/ zk:/ -z localhost:2181'"
+ echo ""
+ echo " The 'file:' prefix is stripped, thus 'file:/wherever' specifies an absolute local path and"
+ echo " 'file:somewhere' specifies a relative local path. All paths on Zookeeper are absolute."
+ echo ""
+ echo " Zookeeper nodes CAN have data, so moving a single file to a parent znode"
+ echo " will overlay the data on the parent Znode so specifying the trailing slash"
+ echo " can be important."
+ echo ""
+ echo " Wildcards are supported when copying from local, trailing only and must be quoted."
+ echo ""
+ echo " rm deletes files or folders on Zookeeper"
+ echo " -r     Recursively delete if <path> is a directory. Command will fail if <path>"
+ echo " has children and -r is not specified. Optional"
+ echo " <path>Â : [zk:]/path/to/zk/node. <path> may not be the root ('/')"
+ echo ""
+ echo " mv moves (renames) znodes on Zookeeper"
+ echo " <src>, <dest> : Zookeeper nodes, the 'zk:' prefix is optional."
+ echo " If <dest> ends with '/', then <dest> will be a parent znode"
+ echo " and the last element of the <src> path will be appended."
+ echo " Zookeeper nodes CAN have data, so moving a single file to a parent znode"
+ echo " will overlay the data on the parent Znode so specifying the trailing slash"
+ echo " is important."
+ echo ""
+ echo " ls lists the znodes on Zookeeper"
+ echo " -r recursively descends the path listing all znodes. Optional"
+ echo " <path>: The Zookeeper path to use as the root."
+ echo ""
+ echo " Only the node names are listed, not data"
+ echo ""
+ echo " mkroot makes a znode on Zookeeper with no data. Can be used to make a path of arbitrary"
+ echo " depth but primarily intended to create a 'chroot'."
echo ""
- echo " -upconfig to move a configset from the local machine to Zookeeper."
+ echo " <path>: The Zookeeper path to create. Leading slash is assumed if not present."
+ echo " Intermediate nodes are created as needed if not present."
echo ""
- echo " -downconfig to move a configset from Zookeeper to the local machine."
+ elif [ "$CMD" == "auth" ]; then
echo ""
- echo " -n configName Name of the configset in Zookeeper that will be the destinatino of"
- echo " 'upconfig' and the source for 'downconfig'."
+ echo "Usage: solr auth enable [-type basicAuth] -credentials user:pass [-blockUnknown <true|false>] [-updateIncludeFileOnly <true|false>]"
+ echo " solr auth enable [-type basicAuth] -prompt <true|false> [-blockUnknown <true|false>] [-updateIncludeFileOnly <true|false>]"
+ echo " solr auth disable [-updateIncludeFileOnly <true|false>]"
echo ""
- echo " -d confdir The local directory the configuration will be uploaded from for"
- echo " 'upconfig' or downloaded to for 'downconfig'. For 'upconfig', this"
- echo " can be one of the example configsets, basic_configs, data_driven_schema_configs or"
- echo " sample_techproducts_configs or an arbitrary directory."
+ echo " -type <type> The authentication mechanism to enable. Defaults to 'basicAuth'."
echo ""
- echo " -z zkHost Zookeeper connection string."
+ echo " -credentials <user:pass> The username and password of the initial user"
+ echo " Note: only one of -prompt or -credentials must be provided"
echo ""
- echo " NOTE: Solr must have been started least once (or have it running) before using this command."
- echo " This initialized Zookeeper for Solr"
+ echo " -prompt <true|false> Prompts the user to provide the credentials"
+ echo " Note: only one of -prompt or -credentials must be provided"
+ echo ""
+ echo " -blockUnknown <true|false> When true, this blocks out access to unauthenticated users. When not provided,"
+ echo " this defaults to false (i.e. unauthenticated users can access all endpoints, except the"
+ echo " operations like collection-edit, security-edit, core-admin-edit etc.). Check the reference"
+ echo " guide for Basic Authentication for more details."
+ echo ""
+ echo " -updateIncludeFileOnly <true|false> Only update the solr.in.sh or solr.in.cmd file, and skip actual enabling/disabling"
+ echo " authentication (i.e. don't update security.json)"
+ echo ""
+ echo " -z zkHost Zookeeper connection string"
+ echo ""
+ echo " -d <dir> Specify the Solr server directory"
+ echo ""
+ echo " -s <dir> Specify the Solr home directory. This is where any credentials or authentication"
+ echo " configuration files (e.g. basicAuth.conf) would be placed."
echo ""
fi
} # end print_usage
+function print_short_zk_usage() {
+
+ if [ "$1" != "" ]; then
+ echo -e "\nERROR: $1\n"
+ fi
+
+ echo " Usage: solr zk upconfig|downconfig -d <confdir> -n <configName> [-z zkHost]"
+ echo " solr zk cp [-r] <src> <dest> [-z zkHost]"
+ echo " solr zk rm [-r] <path> [-z zkHost]"
+ echo " solr zk mv <src> <dest> [-z zkHost]"
+ echo " solr zk ls [-r] <path> [-z zkHost]"
+ echo " solr zk mkroot <path> [-z zkHost]"
+ echo ""
+
+ if [ "$1" == "" ]; then
+ echo "Type bin/solr zk -help for full usage help"
+ else
+ exit 1
+ fi
+}
+
# used to show the script is still alive when waiting on work to complete
function spinner() {
local pid=$1
@@ -407,7 +624,7 @@ function solr_pid_by_port() {
# extract the value of the -Djetty.port parameter from a running Solr process
function jetty_port() {
SOLR_PID="$1"
- SOLR_PROC=`ps auxww | grep -w $SOLR_PID | grep start\.jar | grep jetty.port`
+ SOLR_PROC=`ps auxww | grep -w $SOLR_PID | grep start\.jar | grep jetty\.port`
IFS=' ' read -a proc_args <<< "$SOLR_PROC"
for arg in "${proc_args[@]}"
do
@@ -455,10 +672,10 @@ function get_info() {
done < <(find "$SOLR_PID_DIR" -name "solr-*.pid" -type f)
else
# no pid files but check using ps just to be sure
- numSolrs=`ps auxww | grep start\.jar | grep solr.solr.home | grep -v grep | wc -l | sed -e 's/^[ \t]*//'`
+ numSolrs=`ps auxww | grep start\.jar | grep solr\.solr\.home | grep -v grep | wc -l | sed -e 's/^[ \t]*//'`
if [ "$numSolrs" != "0" ]; then
echo -e "\nFound $numSolrs Solr nodes: "
- PROCESSES=$(ps auxww | grep start\.jar | grep solr.solr.home | grep -v grep | awk '{print $2}' | sort -r)
+ PROCESSES=$(ps auxww | grep start\.jar | grep solr\.solr\.home | grep -v grep | awk '{print $2}' | sort -r)
for ID in $PROCESSES
do
port=`jetty_port "$ID"`
@@ -490,9 +707,24 @@ function stop_solr() {
SOLR_PID="$4"
if [ "$SOLR_PID" != "" ]; then
- echo -e "Sending stop command to Solr running on port $SOLR_PORT ... waiting 5 seconds to allow Jetty process $SOLR_PID to stop gracefully."
+ echo -e "Sending stop command to Solr running on port $SOLR_PORT ... waiting up to $SOLR_STOP_WAIT seconds to allow Jetty process $SOLR_PID to stop gracefully."
"$JAVA" $SOLR_SSL_OPTS $AUTHC_OPTS -jar "$DIR/start.jar" "STOP.PORT=$STOP_PORT" "STOP.KEY=$STOP_KEY" --stop || true
- (sleep 5) &
+ (loops=0
+ while true
+ do
+ CHECK_PID=`ps auxww | awk '{print $2}' | grep -w $SOLR_PID | sort -r | tr -d ' '`
+ if [ "$CHECK_PID" != "" ]; then
+ slept=$((loops * 2))
+ if [ $slept -lt $SOLR_STOP_WAIT ]; then
+ sleep 2
+ loops=$[$loops+1]
+ else
+ exit # subshell!
+ fi
+ else
+ exit # subshell!
+ fi
+ done) &
spinner $!
rm -f "$SOLR_PID_DIR/solr-$SOLR_PORT.pid"
else
@@ -555,6 +787,12 @@ if [ "$SCRIPT_CMD" == "status" ]; then
exit
fi
+# assert tool
+if [ "$SCRIPT_CMD" == "assert" ]; then
+ run_tool assert $*
+ exit $?
+fi
+
# run a healthcheck and exit if requested
if [ "$SCRIPT_CMD" == "healthcheck" ]; then
@@ -571,7 +809,7 @@ if [ "$SCRIPT_CMD" == "healthcheck" ]; then
;;
-z|-zkhost)
if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
- print_usage "$SCRIPT_CMD" "ZooKeepeer connection string is required when using the $1 option!"
+ print_usage "$SCRIPT_CMD" "ZooKeeper connection string is required when using the $1 option!"
exit 1
fi
ZK_HOST="$2"
@@ -617,6 +855,7 @@ if [[ "$SCRIPT_CMD" == "create" || "$SCRIPT_CMD" == "create_core" || "$SCRIPT_CM
CREATE_NUM_SHARDS=1
CREATE_REPFACT=1
+ FORCE=false
if [ $# -gt 0 ]; then
while true; do
@@ -669,6 +908,10 @@ if [[ "$SCRIPT_CMD" == "create" || "$SCRIPT_CMD" == "create_core" || "$SCRIPT_CM
CREATE_PORT="$2"
shift 2
;;
+ -force)
+ FORCE=true
+ shift
+ ;;
-help|-usage)
print_usage "$SCRIPT_CMD"
exit 0
@@ -726,6 +969,11 @@ if [[ "$SCRIPT_CMD" == "create" || "$SCRIPT_CMD" == "create_core" || "$SCRIPT_CM
exit 1
fi
+ if [[ "$(whoami)" == "root" ]] && [[ "$FORCE" == "false" ]] ; then
+ echo "WARNING: Creating cores as the root user can cause Solr to fail and is not advisable. Exiting."
+ echo " If you started Solr as root (not advisable either), force core creation by adding argument -force"
+ exit 1
+ fi
if [ "$SCRIPT_CMD" == "create_core" ]; then
run_tool create_core -name "$CREATE_NAME" -solrUrl "$SOLR_URL_SCHEME://$SOLR_TOOL_HOST:$CREATE_PORT/solr" \
-confdir "$CREATE_CONFDIR" -configsetsDir "$SOLR_TIP/server/solr/configsets"
@@ -821,105 +1069,285 @@ if [[ "$SCRIPT_CMD" == "delete" ]]; then
exit $?
fi
-# Upload or download a configset to Zookeeper
+ZK_RECURSE=false
+# Zookeeper file maintenance (upconfig, downconfig, files up/down etc.)
+# It's a little clumsy to have the parsing go round and round for upconfig and downconfig, but that's
+# necessary for back-compat
if [[ "$SCRIPT_CMD" == "zk" ]]; then
if [ $# -gt 0 ]; then
while true; do
case "$1" in
- -z|-zkhost)
- if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
- print_usage "$SCRIPT_CMD" "ZooKeepeer connection string is required when using the $1 option!"
- exit 1
- fi
- ZK_HOST="$2"
- shift 2
- ;;
- -n|-confname)
- if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
- print_usage "$SCRIPT_CMD" "Configuration name is required when using the $1 option!"
- exit 1
- fi
- CONFIGSET_CONFNAME="$2"
- shift 2
- ;;
- -d|-confdir)
- if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
- print_usage "$SCRIPT_CMD" "Configuration directory is required when using the $1 option!"
- exit 1
- fi
- CONFIGSET_CONFDIR="$2"
- shift 2
- ;;
- -upconfig)
- ZK_OP="upconfig"
- shift 1
- ;;
- -downconfig)
- ZK_OP="downconfig"
- shift 1
- ;;
- -help|-usage|-h)
- print_usage "$SCRIPT_CMD"
- exit 0
- ;;
- --)
- shift
- break
- ;;
- *)
- if [ "$1" != "" ]; then
- print_usage "$SCRIPT_CMD" "Unrecognized or misplaced argument: $1!"
- exit 1
+ -upconfig|upconfig|-downconfig|downconfig|cp|rm|mv|ls|mkroot)
+ if [ "${1:0:1}" == "-" ]; then
+ ZK_OP=${1:1}
+ else
+ ZK_OP=$1
+ fi
+ shift 1
+ ;;
+ -z|-zkhost)
+ if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+ print_short_zk_usage "$SCRIPT_CMD" "ZooKeeper connection string is required when using the $1 option!"
+ fi
+ ZK_HOST="$2"
+ shift 2
+ ;;
+ -n|-confname)
+ if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+ print_short_zk_usage "$SCRIPT_CMD" "Configuration name is required when using the $1 option!"
+ fi
+ CONFIGSET_CONFNAME="$2"
+ shift 2
+ ;;
+ -d|-confdir)
+ if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+ print_short_zk_usage "$SCRIPT_CMD" "Configuration directory is required when using the $1 option!"
+ fi
+ CONFIGSET_CONFDIR="$2"
+ shift 2
+ ;;
+ -r)
+ ZK_RECURSE="true"
+ shift
+ ;;
+ -help|-usage|-h)
+ print_usage "$SCRIPT_CMD"
+ exit 0
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *) # Pick up <src> <dst> or <path> params for rm, ls, cp, mv, mkroot.
+ if [ "$1" == "" ]; then
+ break # out-of-args, stop looping
+ fi
+ if [ -z "$ZK_SRC" ]; then
+ ZK_SRC=$1
+ else
+ if [ -z "$ZK_DST" ]; then
+ ZK_DST=$1
else
- break # out-of-args, stop looping
+ print_short_zk_usage "Unrecognized or misplaced command $1. 'cp' with trailing asterisk requires quoting, see help text."
fi
- ;;
+ fi
+ shift
+ ;;
esac
done
fi
if [ -z "$ZK_OP" ]; then
- echo "Zookeeper operation (one of '-upconfig' or '-downconfig') is required!"
- print_usage "$SCRIPT_CMD"
- exit 1
+ print_short_zk_usage "Zookeeper operation (one of 'upconfig', 'downconfig', 'rm', 'mv', 'cp', 'ls', 'mkroot') is required!"
fi
if [ -z "$ZK_HOST" ]; then
- echo "Zookeeper address (-z) argument is required!"
- print_usage "$SCRIPT_CMD"
- exit 1
+ print_short_zk_usage "Zookeeper address (-z) argument is required or ZK_HOST must be specified in the solr.in.sh file."
fi
- if [ -z "$CONFIGSET_CONFDIR" ]; then
- echo "Local directory of the configset (-d) argument is required!"
- print_usage "$SCRIPT_CMD"
- exit 1
+ if [[ "$ZK_OP" == "upconfig" || "$ZK_OP" == "downconfig" ]]; then
+ if [ -z "$CONFIGSET_CONFDIR" ]; then
+ print_short_zk_usage "Local directory of the configset (-d) argument is required!"
+ fi
+
+ if [ -z "$CONFIGSET_CONFNAME" ]; then
+ print_short_zk_usage "Configset name on Zookeeper (-n) argument is required!"
+ fi
fi
- if [ -z "$CONFIGSET_CONFNAME" ]; then
- echo "Configset name on Zookeeper (-n) argument is required!"
- print_usage "$SCRIPT_CMD"
- exit 1
+ if [[ "$ZK_OP" == "cp" || "$ZK_OP" == "mv" ]]; then
+ if [[ -z "$ZK_SRC" || -z "$ZK_DST" ]]; then
+ print_short_zk_usage "<source> and <destination> must be specified when using either the 'mv' or 'cp' commands."
+ fi
+ if [[ "$ZK_OP" == "cp" && "${ZK_SRC:0:3}" != "zk:" && "${ZK_DST:0:3}" != "zk:" ]]; then
+ print_short_zk_usage "One of the source or desintation paths must be prefixed by 'zk:' for the 'cp' command."
+ fi
fi
- if [ "$ZK_OP" == "upconfig" ]; then
- run_tool "$ZK_OP" -confname "$CONFIGSET_CONFNAME" -confdir "$CONFIGSET_CONFDIR" -zkHost "$ZK_HOST" -configsetsDir "$SOLR_TIP/server/solr/configsets"
+ if [[ "$ZK_OP" == "mkroot" ]]; then
+ if [[ -z "$ZK_SRC" ]]; then
+ print_short_zk_usage "<path> must be specified when using the 'mkroot' command."
+ fi
+ fi
+
+
+ case "$ZK_OP" in
+ upconfig)
+ run_tool "$ZK_OP" -confname "$CONFIGSET_CONFNAME" -confdir "$CONFIGSET_CONFDIR" -zkHost "$ZK_HOST" -configsetsDir "$SOLR_TIP/server/solr/configsets"
+ ;;
+ downconfig)
+ run_tool "$ZK_OP" -confname "$CONFIGSET_CONFNAME" -confdir "$CONFIGSET_CONFDIR" -zkHost "$ZK_HOST"
+ ;;
+ rm)
+ if [ -z "$ZK_SRC" ]; then
+ print_short_zk_usage "Zookeeper path to remove must be specified when using the 'rm' command"
+ fi
+ run_tool "$ZK_OP" -path "$ZK_SRC" -zkHost "$ZK_HOST" -recurse "$ZK_RECURSE"
+ ;;
+ mv)
+ run_tool "$ZK_OP" -src "$ZK_SRC" -dst "$ZK_DST" -zkHost "$ZK_HOST"
+ ;;
+ cp)
+ run_tool "$ZK_OP" -src "$ZK_SRC" -dst "$ZK_DST" -zkHost "$ZK_HOST" -recurse "$ZK_RECURSE"
+ ;;
+ ls)
+ if [ -z "$ZK_SRC" ]; then
+ print_short_zk_usage "Zookeeper path to list must be specified when using the 'ls' command"
+ fi
+ run_tool "$ZK_OP" -path "$ZK_SRC" -recurse "$ZK_RECURSE" -zkHost "$ZK_HOST"
+ ;;
+ mkroot)
+ if [ -z "$ZK_SRC" ]; then
+ print_short_zk_usage "Zookeeper path to list must be specified when using the 'mkroot' command"
+ fi
+ run_tool "$ZK_OP" -path "$ZK_SRC" -zkHost "$ZK_HOST"
+ ;;
+ *)
+ print_short_zk_usage "Unrecognized Zookeeper operation $ZK_OP"
+ ;;
+ esac
+
+ exit $?
+fi
+
+if [[ "$SCRIPT_CMD" == "auth" ]]; then
+ declare -a AUTH_PARAMS
+ if [ $# -gt 0 ]; then
+ while true; do
+ case "$1" in
+ enable|disable)
+ AUTH_OP=$1
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "$AUTH_OP")
+ shift
+ ;;
+ -z|-zkhost|zkHost)
+ ZK_HOST="$2"
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-zkHost" "$ZK_HOST")
+ shift 2
+ ;;
+ -t|-type)
+ AUTH_TYPE="$2"
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-type" "$AUTH_TYPE")
+ shift 2
+ ;;
+ -credentials)
+ AUTH_CREDENTIALS="$2"
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-credentials" "$AUTH_CREDENTIALS")
+ shift 2
+ ;;
+ -solrIncludeFile)
+ SOLR_INCLUDE="$2"
+ shift 2
+ ;;
+ -prompt)
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-prompt" "$2")
+ shift
+ ;;
+ -blockUnknown)
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-blockUnknown" "$2")
+ shift
+ break
+ ;;
+ -updateIncludeFileOnly)
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-updateIncludeFileOnly" "$2")
+ shift
+ break
+ ;;
+ -d|-dir)
+ if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+ print_usage "$SCRIPT_CMD" "Server directory is required when using the $1 option!"
+ exit 1
+ fi
+
+ if [[ "$2" == "." || "$2" == "./" || "$2" == ".." || "$2" == "../" ]]; then
+ SOLR_SERVER_DIR="$(pwd)/$2"
+ else
+ # see if the arg value is relative to the tip vs full path
+ if [[ "$2" != /* ]] && [[ -d "$SOLR_TIP/$2" ]]; then
+ SOLR_SERVER_DIR="$SOLR_TIP/$2"
+ else
+ SOLR_SERVER_DIR="$2"
+ fi
+ fi
+ # resolve it to an absolute path
+ SOLR_SERVER_DIR="$(cd "$SOLR_SERVER_DIR"; pwd)"
+ shift 2
+ ;;
+ -s|-solr.home)
+ if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+ print_usage "$SCRIPT_CMD" "Solr home directory is required when using the $1 option!"
+ exit 1
+ fi
+
+ SOLR_HOME="$2"
+ shift 2
+ ;;
+ -help|-usage|-h)
+ print_usage "$SCRIPT_CMD"
+ exit 0
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *)
+ shift
+ break
+ ;;
+ esac
+ done
+ fi
+
+ if [ -z "$SOLR_SERVER_DIR" ]; then
+ SOLR_SERVER_DIR="$DEFAULT_SERVER_DIR"
+ fi
+ if [ ! -e "$SOLR_SERVER_DIR" ]; then
+ echo -e "\nSolr server directory $SOLR_SERVER_DIR not found!\n"
+ exit 1
+ fi
+ if [ -z "$SOLR_HOME" ]; then
+ SOLR_HOME="$SOLR_SERVER_DIR/solr"
else
- run_tool "$ZK_OP" -confname "$CONFIGSET_CONFNAME" -confdir "$CONFIGSET_CONFDIR" -zkHost "$ZK_HOST"
+ if [[ $SOLR_HOME != /* ]] && [[ -d "$SOLR_SERVER_DIR/$SOLR_HOME" ]]; then
+ SOLR_HOME="$SOLR_SERVER_DIR/$SOLR_HOME"
+ SOLR_PID_DIR="$SOLR_HOME"
+ elif [[ $SOLR_HOME != /* ]] && [[ -d "`pwd`/$SOLR_HOME" ]]; then
+ SOLR_HOME="$(pwd)/$SOLR_HOME"
+ fi
fi
+ if [ -z "$AUTH_OP" ]; then
+ print_usage "$SCRIPT_CMD"
+ exit 0
+ fi
+
+ AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-solrIncludeFile" "$SOLR_INCLUDE")
+
+ if [ -z "$AUTH_PORT" ]; then
+ for ID in `ps auxww | grep java | grep start\.jar | awk '{print $2}' | sort -r`
+ do
+ port=`jetty_port "$ID"`
+ if [ "$port" != "" ]; then
+ AUTH_PORT=$port
+ break
+ fi
+ done
+ fi
+ run_tool auth ${AUTH_PARAMS[@]} -solrUrl "$SOLR_URL_SCHEME://$SOLR_TOOL_HOST:$AUTH_PORT/solr" -authConfDir "$SOLR_HOME"
exit $?
fi
+
# verify the command given is supported
-if [ "$SCRIPT_CMD" != "stop" ] && [ "$SCRIPT_CMD" != "start" ] && [ "$SCRIPT_CMD" != "restart" ] && [ "$SCRIPT_CMD" != "status" ]; then
+if [ "$SCRIPT_CMD" != "stop" ] && [ "$SCRIPT_CMD" != "start" ] && [ "$SCRIPT_CMD" != "restart" ] && [ "$SCRIPT_CMD" != "status" ] && [ "$SCRIPT_CMD" != "assert" ]; then
print_usage "" "$SCRIPT_CMD is not a valid command!"
exit 1
fi
# Run in foreground (default is to run in the background)
FG="false"
+FORCE=false
noprompt=false
SOLR_OPTS=($SOLR_OPTS)
PASS_TO_RUN_EXAMPLE=
@@ -1033,10 +1461,22 @@ if [ $# -gt 0 ]; then
PASS_TO_RUN_EXAMPLE+=" --verbose"
shift
;;
+ -v)
+ SOLR_LOG_LEVEL=DEBUG
+ shift
+ ;;
+ -q)
+ SOLR_LOG_LEVEL=WARN
+ shift
+ ;;
-all)
stop_all=true
shift
;;
+ -force)
+ FORCE=true
+ shift
+ ;;
--)
shift
break
@@ -1060,6 +1500,10 @@ if [ $# -gt 0 ]; then
done
fi
+if [[ $SOLR_LOG_LEVEL ]] ; then
+ SOLR_LOG_LEVEL_OPT="-Dsolr.log.level=$SOLR_LOG_LEVEL"
+fi
+
if [ -z "$SOLR_SERVER_DIR" ]; then
SOLR_SERVER_DIR="$DEFAULT_SERVER_DIR"
fi
@@ -1157,13 +1601,21 @@ if [ -z "$STOP_PORT" ]; then
STOP_PORT=`expr $SOLR_PORT - 1000`
fi
+if [ "$SCRIPT_CMD" == "start" ] || [ "$SCRIPT_CMD" == "restart" ] ; then
+ if [[ "$(whoami)" == "root" ]] && [[ "$FORCE" == "false" ]] ; then
+ echo "WARNING: Starting Solr as the root user is a security risk and not considered best practice. Exiting."
+ echo " Please consult the Reference Guide. To override this check, start with argument '-force'"
+ exit 1
+ fi
+fi
+
if [[ "$SCRIPT_CMD" == "start" ]]; then
# see if Solr is already running
SOLR_PID=`solr_pid_by_port "$SOLR_PORT"`
if [ -z "$SOLR_PID" ]; then
# not found using the pid file ... but use ps to ensure not found
- SOLR_PID=`ps auxww | grep start\.jar | grep -w $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+ SOLR_PID=`ps auxww | grep start\.jar | grep -w "\-Djetty\.port=$SOLR_PORT" | grep -v grep | awk '{print $2}' | sort -r`
fi
if [ "$SOLR_PID" != "" ]; then
@@ -1176,7 +1628,7 @@ else
SOLR_PID=`solr_pid_by_port "$SOLR_PORT"`
if [ -z "$SOLR_PID" ]; then
# not found using the pid file ... but use ps to ensure not found
- SOLR_PID=`ps auxww | grep start\.jar | grep -w $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+ SOLR_PID=`ps auxww | grep start\.jar | grep -w "\-Djetty\.port=$SOLR_PORT" | grep -v grep | awk '{print $2}' | sort -r`
fi
if [ "$SOLR_PID" != "" ]; then
stop_solr "$SOLR_SERVER_DIR" "$SOLR_PORT" "$STOP_KEY" "$SOLR_PID"
@@ -1226,38 +1678,49 @@ if [ ! -e "$SOLR_HOME" ]; then
echo -e "\nSolr home directory $SOLR_HOME not found!\n"
exit 1
fi
-
-# backup the log files before starting
-if [ -f "$SOLR_LOGS_DIR/solr.log" ]; then
- if $verbose ; then
- echo "Backing up $SOLR_LOGS_DIR/solr.log"
- fi
- mv "$SOLR_LOGS_DIR/solr.log" "$SOLR_LOGS_DIR/solr_log_$(date +"%Y%m%d_%H%M")"
+if $verbose ; then
+ q=""
+else
+ q="-q"
fi
-
-if [ -f "$SOLR_LOGS_DIR/solr_gc.log" ]; then
- if $verbose ; then
- echo "Backing up $SOLR_LOGS_DIR/solr_gc.log"
- fi
- mv "$SOLR_LOGS_DIR/solr_gc.log" "$SOLR_LOGS_DIR/solr_gc_log_$(date +"%Y%m%d_%H%M")"
+if [ "${SOLR_LOG_PRESTART_ROTATION:=true}" == "true" ]; then
+ run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" $q -remove_old_solr_logs 7 || echo "Failed removing old solr logs"
+ run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" $q -archive_gc_logs $q || echo "Failed archiving old GC logs"
+ run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" $q -archive_console_logs || echo "Failed archiving old console logs"
+ run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" $q -rotate_solr_logs 9 || echo "Failed rotating old solr logs"
fi
-java_ver_out=`echo "$("$JAVA" -version 2>&1)"`
-JAVA_VERSION=`echo $java_ver_out | grep "java version" | awk '{ print substr($3, 2, length($3)-2); }'`
-JAVA_VENDOR="Oracle"
-if [ "`echo $java_ver_out | grep -i "IBM J9"`" != "" ]; then
- JAVA_VENDOR="IBM J9"
+# Establish default GC logging opts if no env var set (otherwise init to sensible default)
+if [ -z ${GC_LOG_OPTS+x} ]; then
+ if [[ "$JAVA_VER_NUM" < "9" ]] ; then
+ GC_LOG_OPTS=('-verbose:gc' '-XX:+PrintHeapAtGC' '-XX:+PrintGCDetails' \
+ '-XX:+PrintGCDateStamps' '-XX:+PrintGCTimeStamps' '-XX:+PrintTenuringDistribution' \
+ '-XX:+PrintGCApplicationStoppedTime')
+ else
+ GC_LOG_OPTS=('-Xlog:gc*')
+ fi
+else
+ GC_LOG_OPTS=($GC_LOG_OPTS)
fi
-# if verbose gc logging enabled, setup the location of the log file
+# if verbose gc logging enabled, setup the location of the log file and rotation
if [ "$GC_LOG_OPTS" != "" ]; then
- gc_log_flag="-Xloggc"
- if [ "$JAVA_VENDOR" == "IBM J9" ]; then
- gc_log_flag="-Xverbosegclog"
+ if [[ "$JAVA_VER_NUM" < "9" ]] ; then
+ gc_log_flag="-Xloggc"
+ if [ "$JAVA_VENDOR" == "IBM J9" ]; then
+ gc_log_flag="-Xverbosegclog"
+ fi
+ GC_LOG_OPTS+=("$gc_log_flag:$SOLR_LOGS_DIR/solr_gc.log" '-XX:+UseGCLogFileRotation' '-XX:NumberOfGCLogFiles=9' '-XX:GCLogFileSize=20M')
+ else
+ # http://openjdk.java.net/jeps/158
+ for i in "${!GC_LOG_OPTS[@]}";
+ do
+ # for simplicity, we only look at the prefix '-Xlog:gc'
+ # (if 'all' or multiple tags are used starting with anything other then 'gc' the user is on their own)
+ # if a single additional ':' exists in param, then there is already an explicit output specifier
+ GC_LOG_OPTS[$i]=$(echo ${GC_LOG_OPTS[$i]} | sed "s|^\(-Xlog:gc[^:]*$\)|\1:file=$SOLR_LOGS_DIR/solr_gc.log:time,uptime:filecount=9,filesize=20000|")
+ done
fi
- GC_LOG_OPTS=($GC_LOG_OPTS "$gc_log_flag:$SOLR_LOGS_DIR/solr_gc.log")
-else
- GC_LOG_OPTS=()
fi
# If ZK_HOST is defined, the assume SolrCloud mode
@@ -1298,7 +1761,11 @@ fi
if [ "$ENABLE_REMOTE_JMX_OPTS" == "true" ]; then
if [ -z "$RMI_PORT" ]; then
- RMI_PORT="1$SOLR_PORT"
+ RMI_PORT=`expr $SOLR_PORT + 10000`
+ if [ $RMI_PORT -gt 65535 ]; then
+ echo -e "\nRMI_PORT is $RMI_PORT, which is invalid!\n"
+ exit 1
+ fi
fi
REMOTE_JMX_OPTS=('-Dcom.sun.management.jmxremote' \
@@ -1324,6 +1791,12 @@ else
JAVA_MEM_OPTS=("-Xms$SOLR_HEAP" "-Xmx$SOLR_HEAP")
fi
+# Pick default for Java thread stack size, and then add to SOLR_OPTS
+if [ -z ${SOLR_JAVA_STACK_SIZE+x} ]; then
+ SOLR_JAVA_STACK_SIZE='-Xss256k'
+fi
+SOLR_OPTS+=($SOLR_JAVA_STACK_SIZE)
+
if [ -z "$SOLR_TIMEZONE" ]; then
SOLR_TIMEZONE='UTC'
fi
@@ -1336,20 +1809,28 @@ function launch_solr() {
SOLR_ADDL_ARGS="$2"
- GC_TUNE=($GC_TUNE)
- # deal with Java version specific GC and other flags
- if [ "${JAVA_VERSION:0:3}" == "1.7" ]; then
- # Specific Java version hacking
- GC_TUNE+=('-XX:CMSFullGCsBeforeCompaction=1' '-XX:CMSTriggerPermRatio=80')
- if [ "$JAVA_VENDOR" != "IBM J9" ]; then
- JAVA_MINOR_VERSION=${JAVA_VERSION:(-2)}
- if [[ $JAVA_MINOR_VERSION -ge 40 && $JAVA_MINOR_VERSION -le 51 ]]; then
- GC_TUNE+=('-XX:-UseSuperWord')
- echo -e "\nWARNING: Java version $JAVA_VERSION has known bugs with Lucene and requires the -XX:-UseSuperWord flag. Please consider upgrading your JVM.\n"
- fi
- fi
+ # define default GC_TUNE
+ if [ -z ${GC_TUNE+x} ]; then
+ GC_TUNE=('-XX:NewRatio=3' \
+ '-XX:SurvivorRatio=4' \
+ '-XX:TargetSurvivorRatio=90' \
+ '-XX:MaxTenuringThreshold=8' \
+ '-XX:+UseConcMarkSweepGC' \
+ '-XX:+UseParNewGC' \
+ '-XX:ConcGCThreads=4' '-XX:ParallelGCThreads=4' \
+ '-XX:+CMSScavengeBeforeRemark' \
+ '-XX:PretenureSizeThreshold=64m' \
+ '-XX:+UseCMSInitiatingOccupancyOnly' \
+ '-XX:CMSInitiatingOccupancyFraction=50' \
+ '-XX:CMSMaxAbortablePrecleanTime=6000' \
+ '-XX:+CMSParallelRemarkEnabled' \
+ '-XX:+ParallelRefProcEnabled' \
+ '-XX:-OmitStackTraceInFastThrow')
+ else
+ GC_TUNE=($GC_TUNE)
fi
+
# If SSL-related system props are set, add them to SOLR_OPTS
if [ -n "$SOLR_SSL_OPTS" ]; then
# If using SSL and solr.jetty.https.port not set explicitly, use the jetty.port
@@ -1380,17 +1861,22 @@ function launch_solr() {
fi
if [ "$SOLR_OPTS" != "" ]; then
- echo -e " SOLR_OPTS = ${SOLR_OPTS[@]}"
+ echo -e " SOLR_OPTS = ${SOLR_OPTS[@]}"
fi
if [ "$SOLR_ADDL_ARGS" != "" ]; then
- echo -e " SOLR_ADDL_ARGS = $SOLR_ADDL_ARGS"
+ echo -e " SOLR_ADDL_ARGS = $SOLR_ADDL_ARGS"
fi
if [ "$ENABLE_REMOTE_JMX_OPTS" == "true" ]; then
echo -e " RMI_PORT = $RMI_PORT"
echo -e " REMOTE_JMX_OPTS = ${REMOTE_JMX_OPTS[@]}"
fi
+
+ if [ "$SOLR_LOG_LEVEL" != "" ]; then
+ echo -e " SOLR_LOG_LEVEL = $SOLR_LOG_LEVEL"
+ fi
+
echo -e "\n"
fi
@@ -1403,7 +1889,7 @@ function launch_solr() {
fi
SOLR_START_OPTS=('-server' "${JAVA_MEM_OPTS[@]}" "${GC_TUNE[@]}" "${GC_LOG_OPTS[@]}" \
- "${REMOTE_JMX_OPTS[@]}" "${CLOUD_MODE_OPTS[@]}" \
+ "${REMOTE_JMX_OPTS[@]}" "${CLOUD_MODE_OPTS[@]}" $SOLR_LOG_LEVEL_OPT -Dsolr.log.dir="$SOLR_LOGS_DIR" \
"-Djetty.port=$SOLR_PORT" "-DSTOP.PORT=$stop_port" "-DSTOP.KEY=$STOP_KEY" \
"${SOLR_HOST_ARG[@]}" "-Duser.timezone=$SOLR_TIMEZONE" \
"-Djetty.home=$SOLR_SERVER_DIR" "-Dsolr.solr.home=$SOLR_HOME" "-Dsolr.install.dir=$SOLR_TIP" \
@@ -1413,37 +1899,57 @@ function launch_solr() {
IN_CLOUD_MODE=" in SolrCloud mode"
fi
- mkdir -p "$SOLR_LOGS_DIR"
+ mkdir -p "$SOLR_LOGS_DIR" 2>/dev/null
+ if [ $? -ne 0 ]; then
+ echo -e "\nERROR: Logs directory $SOLR_LOGS_DIR could not be created. Exiting"
+ exit 1
+ fi
+ if [ ! -w "$SOLR_LOGS_DIR" ]; then
+ echo -e "\nERROR: Logs directory $SOLR_LOGS_DIR is not writable. Exiting"
+ exit 1
+ fi
+ case "$SOLR_LOGS_DIR" in
+ contexts|etc|lib|modules|resources|scripts|solr|solr-webapp)
+ echo -e "\nERROR: Logs directory $SOLR_LOGS_DIR is invalid. Reserved for the system. Exiting"
+ exit 1
+ ;;
+ esac
if [ "$run_in_foreground" == "true" ]; then
- echo -e "\nStarting Solr$IN_CLOUD_MODE on port $SOLR_PORT from $SOLR_SERVER_DIR\n"
- exec "$JAVA" "${SOLR_START_OPTS[@]}" $SOLR_ADDL_ARGS -jar start.jar "${SOLR_JETTY_CONFIG[@]}"
+ exec "$JAVA" "${SOLR_START_OPTS[@]}" $SOLR_ADDL_ARGS -Dsolr.kerberos.name.rules="$SOLR_KERB_NAME_RULES" -jar start.jar "${SOLR_JETTY_CONFIG[@]}"
else
# run Solr in the background
- nohup "$JAVA" "${SOLR_START_OPTS[@]}" $SOLR_ADDL_ARGS -Dsolr.kerberos.name.rules="$SOLR_KERB_NAME_RULES" \
+ nohup "$JAVA" "${SOLR_START_OPTS[@]}" $SOLR_ADDL_ARGS -Dsolr.kerberos.name.rules="$SOLR_KERB_NAME_RULES" -Dsolr.log.muteconsole \
"-XX:OnOutOfMemoryError=$SOLR_TIP/bin/oom_solr.sh $SOLR_PORT $SOLR_LOGS_DIR" \
-jar start.jar "${SOLR_JETTY_CONFIG[@]}" \
1>"$SOLR_LOGS_DIR/solr-$SOLR_PORT-console.log" 2>&1 & echo $! > "$SOLR_PID_DIR/solr-$SOLR_PORT.pid"
+ # check if /proc/sys/kernel/random/entropy_avail exists then check output of cat /proc/sys/kernel/random/entropy_avail to see if less than 300
+ if [[ -f /proc/sys/kernel/random/entropy_avail ]] && (( `cat /proc/sys/kernel/random/entropy_avail` < 300)); then
+ echo "Warning: Available entropy is low. As a result, use of the UUIDField, SSL, or any other features that require"
+ echo "RNG might not work properly. To check for the amount of available entropy, use 'cat /proc/sys/kernel/random/entropy_avail'."
+ echo ""
+ fi
# no lsof on cygwin though
if hash lsof 2>/dev/null ; then # hash returns true if lsof is on the path
- echo -n "Waiting up to 30 seconds to see Solr running on port $SOLR_PORT"
+ echo -n "Waiting up to $SOLR_STOP_WAIT seconds to see Solr running on port $SOLR_PORT"
# Launch in a subshell to show the spinner
(loops=0
while true
do
running=`lsof -PniTCP:$SOLR_PORT -sTCP:LISTEN`
if [ -z "$running" ]; then
- if [ $loops -lt 6 ]; then
- sleep 5
+ slept=$((loops * 2))
+ if [ $slept -lt $SOLR_STOP_WAIT ]; then
+ sleep 2
loops=$[$loops+1]
else
- echo -e "Still not seeing Solr listening on $SOLR_PORT after 30 seconds!"
+ echo -e "Still not seeing Solr listening on $SOLR_PORT after $SOLR_STOP_WAIT seconds!"
tail -30 "$SOLR_LOGS_DIR/solr.log"
exit # subshell!
fi
else
- SOLR_PID=`ps auxww | grep start\.jar | grep -w $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+ SOLR_PID=`ps auxww | grep start\.jar | grep -w "\-Djetty\.port=$SOLR_PORT" | grep -v grep | awk '{print $2}' | sort -r`
echo -e "\nStarted Solr server on port $SOLR_PORT (pid=$SOLR_PID). Happy searching!\n"
exit # subshell!
fi
@@ -1452,7 +1958,7 @@ function launch_solr() {
else
echo -e "NOTE: Please install lsof as this script needs it to determine if Solr is listening on port $SOLR_PORT."
sleep 10
- SOLR_PID=`ps auxww | grep start\.jar | grep -w $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+ SOLR_PID=`ps auxww | grep start\.jar | grep -w "\-Djetty\.port=$SOLR_PORT" | grep -v grep | awk '{print $2}' | sort -r`
echo -e "\nStarted Solr server on port $SOLR_PORT (pid=$SOLR_PID). Happy searching!\n"
return;
fi
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java
index e3a1e79..70bc232 100644
--- a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java
+++ b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java
@@ -52,12 +52,14 @@ public class AmbariSolrCloudCLI {
private static final String UNSECURE_ZNODE_COMMAND = "unsecure-znode";
private static final String SECURE_SOLR_ZNODE_COMMAND = "secure-solr-znode";
private static final String SECURITY_JSON_LOCATION = "security-json-location";
+ private static final String REMOVE_ADMIN_HANDLERS = "remove-admin-handlers";
private static final String CMD_LINE_SYNTAX =
"\n./solrCloudCli.sh --create-collection -z host1:2181,host2:2181/ambari-solr -c collection -cs conf_set"
+ "\n./solrCloudCli.sh --upload-config -z host1:2181,host2:2181/ambari-solr -d /tmp/myconfig_dir -cs config_set"
+ "\n./solrCloudCli.sh --download-config -z host1:2181,host2:2181/ambari-solr -cs config_set -d /tmp/myonfig_dir"
+ "\n./solrCloudCli.sh --check-config -z host1:2181,host2:2181/ambari-solr -cs config_set"
+ "\n./solrCloudCli.sh --create-shard -z host1:2181,host2:2181/ambari-solr -c collection -sn myshard"
+ + "\n./solrCloudCli.sh --remove-admin-handlers -z host1:2181,host2:2181/ambari-solr -c collection"
+ "\n./solrCloudCli.sh --create-znode -z host1:2181,host2:2181 -zn /ambari-solr"
+ "\n./solrCloudCli.sh --check-znode -z host1:2181,host2:2181 -zn /ambari-solr"
+ "\n./solrCloudCli.sh --cluster-prop -z host1:2181,host2:2181/ambari-solr -cpn urlScheme -cpn http"
@@ -137,6 +139,11 @@ public class AmbariSolrCloudCLI {
.desc("Disable security for znode")
.build();
+ final Option removeAdminHandlerOption = Option.builder("rah")
+ .longOpt(REMOVE_ADMIN_HANDLERS)
+ .desc("Remove AdminHandlers request handler from solrconfig.xml")
+ .build();
+
final Option shardNameOption = Option.builder("sn")
.longOpt("shard-name")
.desc("Name of the shard for create-shard command")
@@ -328,6 +335,7 @@ public class AmbariSolrCloudCLI {
options.addOption(helpOption);
options.addOption(retryOption);
+ options.addOption(removeAdminHandlerOption);
options.addOption(intervalOption);
options.addOption(zkConnectStringOption);
options.addOption(configSetOption);
@@ -414,6 +422,9 @@ public class AmbariSolrCloudCLI {
} else if (cli.hasOption("uz")) {
command = UNSECURE_ZNODE_COMMAND;
validateRequiredOptions(cli, command, zkConnectStringOption, znodeOption, jaasFileOption);
+ } else if (cli.hasOption("rah")) {
+ command = REMOVE_ADMIN_HANDLERS;
+ validateRequiredOptions(cli, command, zkConnectStringOption, collectionOption);
} else {
List<String> commands = Arrays.asList(CREATE_COLLECTION_COMMAND, CREATE_SHARD_COMMAND, UPLOAD_CONFIG_COMMAND,
DOWNLOAD_CONFIG_COMMAND, CONFIG_CHECK_COMMAND, SET_CLUSTER_PROP, CREATE_ZNODE, SECURE_ZNODE_COMMAND, UNSECURE_ZNODE_COMMAND,
@@ -539,6 +550,9 @@ public class AmbariSolrCloudCLI {
case SECURE_SOLR_ZNODE_COMMAND:
solrCloudClient = clientBuilder.build();
solrCloudClient.secureSolrZnode();
+ case REMOVE_ADMIN_HANDLERS:
+ solrCloudClient = clientBuilder.build();
+ solrCloudClient.removeAdminHandlerFromCollectionConfig();
break;
default:
throw new AmbariSolrCloudClientException(String.format("Not found command: '%s'", command));
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
index 9479679..96c07a3 100644
--- a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
+++ b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
@@ -27,6 +27,7 @@ import org.apache.ambari.infra.solr.commands.EnableKerberosPluginSolrZkCommand;
import org.apache.ambari.infra.solr.commands.GetShardsCommand;
import org.apache.ambari.infra.solr.commands.GetSolrHostsCommand;
import org.apache.ambari.infra.solr.commands.ListCollectionCommand;
+import org.apache.ambari.infra.solr.commands.RemoveAdminHandlersCommand;
import org.apache.ambari.infra.solr.commands.SecureSolrZNodeZkCommand;
import org.apache.ambari.infra.solr.commands.SecureZNodeZkCommand;
import org.apache.ambari.infra.solr.commands.SetClusterPropertyZkCommand;
@@ -257,6 +258,13 @@ public class AmbariSolrCloudClient {
return new GetSolrHostsCommand(getRetryTimes(), getInterval()).run(this);
}
+ /**
+ * Remove solr.admin.AdminHandlers requestHandler from solrconfi.xml
+ */
+ public boolean removeAdminHandlerFromCollectionConfig() throws Exception {
+ return new RemoveAdminHandlersCommand(getRetryTimes(), getInterval()).run(this);
+ }
+
public String getZkConnectString() {
return zkConnectString;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/RemoveAdminHandlersCommand.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/RemoveAdminHandlersCommand.java b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/RemoveAdminHandlersCommand.java
new file mode 100644
index 0000000..32fae7b
--- /dev/null
+++ b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/RemoveAdminHandlersCommand.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.solr.commands;
+
+import org.apache.ambari.infra.solr.AmbariSolrCloudClient;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.SolrZooKeeper;
+import org.apache.zookeeper.data.Stat;
+
+public class RemoveAdminHandlersCommand extends AbstractZookeeperRetryCommand<Boolean> {
+
+ public RemoveAdminHandlersCommand(int maxRetries, int interval) {
+ super(maxRetries, interval);
+ }
+
+ @Override
+ protected Boolean executeZkCommand(AmbariSolrCloudClient client, SolrZkClient zkClient, SolrZooKeeper solrZooKeeper) throws Exception {
+ String solrConfigXmlPath = String.format("/configs/%s/solrconfig.xml", client.getCollection());
+ if (zkClient.exists(solrConfigXmlPath, true)) {
+ Stat stat = new Stat();
+ byte[] solrConfigXmlBytes = zkClient.getData(solrConfigXmlPath, null, stat, true);
+ String solrConfigStr = new String(solrConfigXmlBytes);
+ if (solrConfigStr.contains("class=\"solr.admin.AdminHandlers\"")) {
+ byte[] newSolrConfigXmlBytes = new String(solrConfigXmlBytes).replaceAll("(?s)<requestHandler name=\"/admin/\".*?class=\"solr.admin.AdminHandlers\" />", "").getBytes();
+ zkClient.setData(solrConfigXmlPath, newSolrConfigXmlBytes, stat.getVersion() + 1, true);
+ }
+ }
+ return true;
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java
index 34597c6..e79773e 100644
--- a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java
+++ b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java
@@ -19,9 +19,9 @@
package org.apache.ambari.infra.solr.commands;
import org.apache.ambari.infra.solr.AmbariSolrCloudClient;
+import org.apache.solr.common.cloud.ClusterProperties;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.SolrZooKeeper;
-import org.apache.solr.common.cloud.ZkStateReader;
public class SetClusterPropertyZkCommand extends AbstractZookeeperRetryCommand<String>{
@@ -33,8 +33,8 @@ public class SetClusterPropertyZkCommand extends AbstractZookeeperRetryCommand<S
protected String executeZkCommand(AmbariSolrCloudClient client, SolrZkClient zkClient, SolrZooKeeper solrZooKeeper) throws Exception {
String propertyName = client.getPropName();
String propertyValue = client.getPropValue();
- ZkStateReader reader = new ZkStateReader(zkClient);
- reader.setClusterProperty(propertyName, propertyValue);
+ ClusterProperties clusterProperties = new ClusterProperties(zkClient);
+ clusterProperties.setClusterProperty(propertyName, propertyValue);
return propertyValue;
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java b/ambari-infra/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java
index ee84969..f1f842d 100644
--- a/ambari-infra/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java
+++ b/ambari-infra/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java
@@ -242,6 +242,11 @@ public class InfraRuleBasedAuthorizationPluginTest {
public String getResource() {
return (String) values.get("resource");
}
+
+ @Override
+ public Object getHandler() {
+ return null;
+ }
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/pom.xml b/ambari-infra/pom.xml
index 4f5c29c..908abb4 100644
--- a/ambari-infra/pom.xml
+++ b/ambari-infra/pom.xml
@@ -31,7 +31,7 @@
<properties>
<jdk.version>1.8</jdk.version>
- <solr.version>5.5.2</solr.version>
+ <solr.version>6.6.0</solr.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<python.ver>python >= 2.6</python.ver>
<deb.python.ver>python (>= 2.6)</deb.python.ver>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
index 7af91df..b1290a4 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
@@ -1063,8 +1063,7 @@
Admin Handlers - This will register all the standard admin
RequestHandlers.
-->
- <requestHandler name="/admin/"
- class="solr.admin.AdminHandlers" />
+
<!-- This single handler is equivalent to the following... -->
<!--
<requestHandler name="/admin/luke" class="solr.admin.LukeRequestHandler" />
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
index 59f778f..f0e46a0 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
@@ -1063,8 +1063,7 @@
Admin Handlers - This will register all the standard admin
RequestHandlers.
-->
- <requestHandler name="/admin/"
- class="solr.admin.AdminHandlers" />
+
<!-- This single handler is equivalent to the following... -->
<!--
<requestHandler name="/admin/luke" class="solr.admin.LukeRequestHandler" />
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
index 8244a08..1827444 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
@@ -1063,8 +1063,7 @@
Admin Handlers - This will register all the standard admin
RequestHandlers.
-->
- <requestHandler name="/admin/"
- class="solr.admin.AdminHandlers" />
+
<!-- This single handler is equivalent to the following... -->
<!--
<requestHandler name="/admin/luke" class="solr.admin.LukeRequestHandler" />
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/SolrSchemaFieldDao.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/SolrSchemaFieldDao.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/SolrSchemaFieldDao.java
index d99694b..71f9f29 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/SolrSchemaFieldDao.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/SolrSchemaFieldDao.java
@@ -141,7 +141,7 @@ public class SolrSchemaFieldDao {
try (CloseableHttpClient httpClient = HttpClientUtil.createClient(null)) {
HttpGet request = new HttpGet(replica.getCoreUrl() + LUKE_REQUEST_URL_SUFFIX);
HttpResponse response = httpClient.execute(request);
- NamedList<Object> lukeData = (NamedList<Object>) new JavaBinCodec(null, null).unmarshal(response.getEntity().getContent());
+ NamedList<Object> lukeData = (NamedList<Object>) new JavaBinCodec().unmarshal(response.getEntity().getContent());
LukeResponse lukeResponse = new LukeResponse();
lukeResponse.setResponse(lukeData);
lukeResponses.add(lukeResponse);
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/docker/Dockerfile
----------------------------------------------------------------------
diff --git a/ambari-logsearch/docker/Dockerfile b/ambari-logsearch/docker/Dockerfile
index 2b8fd5d..1e4135e 100644
--- a/ambari-logsearch/docker/Dockerfile
+++ b/ambari-logsearch/docker/Dockerfile
@@ -60,7 +60,7 @@ RUN npm install -g npm@2.1.11
RUN npm install -g brunch@1.7.20
# Install Solr
-ENV SOLR_VERSION 5.5.2
+ENV SOLR_VERSION 6.6.0
RUN wget --no-check-certificate -O /root/solr-$SOLR_VERSION.tgz http://public-repo-1.hortonworks.com/ARTIFACTS/dist/lucene/solr/$SOLR_VERSION/solr-$SOLR_VERSION.tgz
RUN cd /root && tar -zxvf /root/solr-$SOLR_VERSION.tgz
ADD bin/start.sh /root/start.sh
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/docker/bin/start.sh
----------------------------------------------------------------------
diff --git a/ambari-logsearch/docker/bin/start.sh b/ambari-logsearch/docker/bin/start.sh
index 28ebf65..c4ee06c 100644
--- a/ambari-logsearch/docker/bin/start.sh
+++ b/ambari-logsearch/docker/bin/start.sh
@@ -70,7 +70,7 @@ function generate_keys() {
function start_solr() {
echo "Starting Solr..."
- /root/solr-$SOLR_VERSION/bin/solr start -cloud -s /root/logsearch_solr_index/data -verbose
+ /root/solr-$SOLR_VERSION/bin/solr start -cloud -s /root/logsearch_solr_index/data -verbose -force
touch /var/log/ambari-logsearch-solr/solr.log
if [ $LOGSEARCH_SOLR_SSL_ENABLED == 'true' ]
@@ -78,7 +78,7 @@ function start_solr() {
echo "Setting urlScheme as https and restarting solr..."
$ZKCLI -zkhost localhost:9983 -cmd clusterprop -name urlScheme -val https
/root/solr-$SOLR_VERSION/bin/solr stop
- /root/solr-$SOLR_VERSION/bin/solr start -cloud -s /root/logsearch_solr_index/data -verbose
+ /root/solr-$SOLR_VERSION/bin/solr start -cloud -s /root/logsearch_solr_index/data -verbose -force
fi
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/pom.xml b/ambari-logsearch/pom.xml
index 82943e4..2be11ee 100644
--- a/ambari-logsearch/pom.xml
+++ b/ambari-logsearch/pom.xml
@@ -45,7 +45,7 @@
<deb.python.ver>python (>= 2.6)</deb.python.ver>
<deb.architecture>amd64</deb.architecture>
<deb.dependency.list>${deb.python.ver}</deb.dependency.list>
- <solr.version>5.5.2</solr.version>
+ <solr.version>6.6.0</solr.version>
<hadoop.version>2.7.2</hadoop.version>
<common.io.version>2.5</common.io.version>
</properties>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
index 5fdc885..b4502d6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
@@ -369,6 +369,24 @@ public class UpgradeCatalog300 extends AbstractUpgradeCatalog {
updateConfigurationPropertiesForCluster(cluster, "logsearch-log4j", Collections.singletonMap("content", content), true, true);
}
}
+
+ Config logsearchServiceLogsConfig = cluster.getDesiredConfigByType("logsearch-service_logs-solrconfig");
+ if (logsearchServiceLogsConfig != null) {
+ String content = logsearchServiceLogsConfig.getProperties().get("content");
+ if (content.contains("class=\"solr.admin.AdminHandlers\"")) {
+ content = content.replaceAll("(?s)<requestHandler name=\"/admin/\".*?class=\"solr.admin.AdminHandlers\" />", "");
+ updateConfigurationPropertiesForCluster(cluster, "logsearch-service_logs-solrconfig", Collections.singletonMap("content", content), true, true);
+ }
+ }
+
+ Config logsearchAuditLogsConfig = cluster.getDesiredConfigByType("logsearch-audit_logs-solrconfig");
+ if (logsearchAuditLogsConfig != null) {
+ String content = logsearchAuditLogsConfig.getProperties().get("content");
+ if (content.contains("class=\"solr.admin.AdminHandlers\"")) {
+ content = content.replaceAll("(?s)<requestHandler name=\"/admin/\".*?class=\"solr.admin.AdminHandlers\" />", "");
+ updateConfigurationPropertiesForCluster(cluster, "logsearch-audit_logs-solrconfig", Collections.singletonMap("content", content), true, true);
+ }
+ }
}
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
index 5f547f3..e4ea885 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
@@ -80,6 +80,9 @@ solr_client_dir = '/usr/lib/ambari-infra-solr-client'
solr_bindir = solr_dir + '/bin'
cloud_scripts = solr_dir + '/server/scripts/cloud-scripts'
+logsearch_hosts = default("/clusterHostInfo/logsearch_server_hosts", [])
+has_logsearch = len(logsearch_hosts) > 0
+
if "infra-solr-env" in config['configurations']:
infra_solr_hosts = config['clusterHostInfo']['infra_solr_hosts']
infra_solr_znode = config['configurations']['infra-solr-env']['infra_solr_znode']
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
index f3dbcf3..7427584 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
@@ -85,6 +85,11 @@ def setup_infra_solr(name = None):
create_ambari_solr_znode()
+ if params.has_logsearch:
+ cleanup_logsearch_collections(params.logsearch_service_logs_collection, jaas_file)
+ cleanup_logsearch_collections(params.logsearch_audit_logs_collection, jaas_file)
+ cleanup_logsearch_collections('history', jaas_file)
+
security_json_file_location = custom_security_json_location \
if params.infra_solr_security_json_content and str(params.infra_solr_security_json_content).strip() \
else format("{infra_solr_conf}/security.json") # security.json file to upload
@@ -141,4 +146,14 @@ def create_ambari_solr_znode():
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.infra_solr_znode,
java64_home=params.java64_home,
- retry=30, interval=5)
\ No newline at end of file
+ retry=30, interval=5)
+
+def cleanup_logsearch_collections(collection, jaas_file):
+ import params
+ solr_cloud_util.remove_admin_handlers(
+ zookeeper_quorum=params.zookeeper_quorum,
+ solr_znode=params.infra_solr_znode,
+ java64_home=params.java64_home,
+ jaas_file=jaas_file,
+ collection=collection
+ )
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/audit_logs-solrconfig.xml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/audit_logs-solrconfig.xml.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/audit_logs-solrconfig.xml.j2
index 63879e7..d56990a 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/audit_logs-solrconfig.xml.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/audit_logs-solrconfig.xml.j2
@@ -1063,8 +1063,7 @@ this file, see http://wiki.apache.org/solr/SolrConfigXml.
Admin Handlers - This will register all the standard admin
RequestHandlers.
-->
- <requestHandler name="/admin/"
- class="solr.admin.AdminHandlers"/>
+
<!-- This single handler is equivalent to the following... -->
<!--
<requestHandler name="/admin/luke" class="solr.admin.LukeRequestHandler" />
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/service_logs-solrconfig.xml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/service_logs-solrconfig.xml.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/service_logs-solrconfig.xml.j2
index b6a4d1d..ed80e84 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/service_logs-solrconfig.xml.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/service_logs-solrconfig.xml.j2
@@ -1063,8 +1063,7 @@ this file, see http://wiki.apache.org/solr/SolrConfigXml.
Admin Handlers - This will register all the standard admin
RequestHandlers.
-->
- <requestHandler name="/admin/"
- class="solr.admin.AdminHandlers"/>
+
<!-- This single handler is equivalent to the following... -->
<!--
<requestHandler name="/admin/luke" class="solr.admin.LukeRequestHandler" />
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
index a342baa..d7bdf75 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
@@ -351,11 +351,38 @@ public class UpgradeCatalog300Test {
expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchLog4jCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
+ Map<String, String> oldLogSearchServiceLogsConf = ImmutableMap.of(
+ "content", "<before/><requestHandler name=\"/admin/\" class=\"solr.admin.AdminHandlers\" /><after/>");
+
+ Map<String, String> expectedLogSearchServiceLogsConf = ImmutableMap.of(
+ "content", "<before/><after/>");
+
+ Config confLogSearchServiceLogsConf = easyMockSupport.createNiceMock(Config.class);
+ expect(cluster.getDesiredConfigByType("logsearch-service_logs-solrconfig")).andReturn(confLogSearchServiceLogsConf).atLeastOnce();
+ expect(confLogSearchServiceLogsConf.getProperties()).andReturn(oldLogSearchServiceLogsConf).anyTimes();
+ Capture<Map<String, String>> logSearchServiceLogsConfCapture = EasyMock.newCapture();
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchServiceLogsConfCapture), anyString(),
+ EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
+
+ Map<String, String> oldLogSearchAuditLogsConf = ImmutableMap.of(
+ "content", "<before/><requestHandler name=\"/admin/\" class=\"solr.admin.AdminHandlers\" /><after/>");
+
+ Map<String, String> expectedLogSearchAuditLogsConf = ImmutableMap.of(
+ "content", "<before/><after/>");
+
+ Config confLogSearchAuditLogsConf = easyMockSupport.createNiceMock(Config.class);
+ expect(cluster.getDesiredConfigByType("logsearch-audit_logs-solrconfig")).andReturn(confLogSearchAuditLogsConf).atLeastOnce();
+ expect(confLogSearchAuditLogsConf.getProperties()).andReturn(oldLogSearchAuditLogsConf).anyTimes();
+ Capture<Map<String, String>> logSearchAuditLogsConfCapture = EasyMock.newCapture();
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchAuditLogsConfCapture), anyString(),
+ EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
+
replay(clusters, cluster);
replay(controller, injector2);
replay(confSomethingElse1, confSomethingElse2, confLogSearchConf1, confLogSearchConf2);
replay(logSearchPropertiesConf, logFeederPropertiesConf);
replay(mockLogFeederLog4j, mockLogSearchLog4j);
+ replay(confLogSearchServiceLogsConf, confLogSearchAuditLogsConf);
new UpgradeCatalog300(injector2).updateLogSearchConfigs();
easyMockSupport.verifyAll();
@@ -376,5 +403,11 @@ public class UpgradeCatalog300Test {
Map<String, String> updatedLogSearchLog4j = logSearchLog4jCapture.getValue();
assertTrue(Maps.difference(expectedLogSearchLog4j, updatedLogSearchLog4j).areEqual());
+
+ Map<String, String> updatedServiceLogsConf = logSearchServiceLogsConfCapture.getValue();
+ assertTrue(Maps.difference(expectedLogSearchServiceLogsConf, updatedServiceLogsConf).areEqual());
+
+ Map<String, String> updatedAuditLogsConf = logSearchAuditLogsConfCapture.getValue();
+ assertTrue(Maps.difference(expectedLogSearchAuditLogsConf, updatedAuditLogsConf).areEqual());
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py b/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
index 87304cd..e1fa1d8 100644
--- a/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
+++ b/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
@@ -106,6 +106,9 @@ class TestInfraSolr(RMFTestCase):
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --create-znode --retry 30 --interval 5')
+ self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --remove-admin-handlers --collection hadoop_logs --retry 5 --interval 10')
+ self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --remove-admin-handlers --collection audit_logs --retry 5 --interval 10')
+ self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --remove-admin-handlers --collection history --retry 5 --interval 10')
self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --cluster-prop --property-name urlScheme --property-value http')
self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --setup-kerberos-plugin')
[03/18] ambari git commit: AMBARI-21430 - Allow Multiple Versions of
Stack Tools to Co-Exist (jonathanhurley)
Posted by rl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index 282b542..2f3794d 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -102,8 +102,12 @@ class RMFTestCase(TestCase):
else:
raise RuntimeError("Please specify either config_file_path or config_dict parameter")
- self.config_dict["configurations"]["cluster-env"]["stack_tools"] = RMFTestCase.get_stack_tools()
- self.config_dict["configurations"]["cluster-env"]["stack_features"] = RMFTestCase.get_stack_features()
+ # add the stack tools & features from the stack if the test case's JSON file didn't have them
+ if "stack_tools" not in self.config_dict["configurations"]["cluster-env"]:
+ self.config_dict["configurations"]["cluster-env"]["stack_tools"] = RMFTestCase.get_stack_tools()
+
+ if "stack_features" not in self.config_dict["configurations"]["cluster-env"]:
+ self.config_dict["configurations"]["cluster-env"]["stack_features"] = RMFTestCase.get_stack_features()
if config_overrides:
for key, value in config_overrides.iteritems():
[08/18] ambari git commit: AMBARI-21430 - Allow Multiple Versions of
Stack Tools to Co-Exist (jonathanhurley)
Posted by rl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
index f959b1f..7f1e549 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
@@ -1,159 +1,159 @@
{
"localComponents": [
- "SECONDARY_NAMENODE",
- "HDFS_CLIENT",
- "DATANODE",
- "NAMENODE",
- "RANGER_ADMIN",
- "RANGER_TAGSYNC",
- "RANGER_USERSYNC",
- "ZOOKEEPER_SERVER",
- "ZOOKEEPER_CLIENT",
+ "SECONDARY_NAMENODE",
+ "HDFS_CLIENT",
+ "DATANODE",
+ "NAMENODE",
+ "RANGER_ADMIN",
+ "RANGER_TAGSYNC",
+ "RANGER_USERSYNC",
+ "ZOOKEEPER_SERVER",
+ "ZOOKEEPER_CLIENT",
"KERBEROS_CLIENT",
"LOGSEARCH_SOLR",
"LOGSEARCH_SOLR_CLIENT"
- ],
+ ],
"configuration_attributes": {
- "ranger-hdfs-audit": {},
- "ssl-client": {},
- "ranger-admin-site": {},
- "ranger-hdfs-policymgr-ssl": {},
- "tagsync-application-properties": {},
- "ranger-env": {},
- "usersync-log4j": {},
- "ranger-hdfs-plugin-properties": {},
- "kerberos-env": {},
- "admin-properties": {},
- "ranger-ugsync-site": {},
+ "ranger-hdfs-audit": {},
+ "ssl-client": {},
+ "ranger-admin-site": {},
+ "ranger-hdfs-policymgr-ssl": {},
+ "tagsync-application-properties": {},
+ "ranger-env": {},
+ "usersync-log4j": {},
+ "ranger-hdfs-plugin-properties": {},
+ "kerberos-env": {},
+ "admin-properties": {},
+ "ranger-ugsync-site": {},
"hdfs-site": {
"final": {
- "dfs.datanode.data.dir": "true",
- "dfs.namenode.http-address": "true",
- "dfs.datanode.failed.volumes.tolerated": "true",
- "dfs.support.append": "true",
- "dfs.namenode.name.dir": "true",
+ "dfs.datanode.data.dir": "true",
+ "dfs.namenode.http-address": "true",
+ "dfs.datanode.failed.volumes.tolerated": "true",
+ "dfs.support.append": "true",
+ "dfs.namenode.name.dir": "true",
"dfs.webhdfs.enabled": "true"
}
- },
- "ranger-tagsync-site": {},
- "zoo.cfg": {},
- "hadoop-policy": {},
- "hdfs-log4j": {},
- "krb5-conf": {},
+ },
+ "ranger-tagsync-site": {},
+ "zoo.cfg": {},
+ "hadoop-policy": {},
+ "hdfs-log4j": {},
+ "krb5-conf": {},
"core-site": {
"final": {
"fs.defaultFS": "true"
}
- },
- "hadoop-env": {},
- "zookeeper-log4j": {},
- "ssl-server": {},
- "ranger-site": {},
- "admin-log4j": {},
- "tagsync-log4j": {},
- "ranger-hdfs-security": {},
- "usersync-properties": {},
+ },
+ "hadoop-env": {},
+ "zookeeper-log4j": {},
+ "ssl-server": {},
+ "ranger-site": {},
+ "admin-log4j": {},
+ "tagsync-log4j": {},
+ "ranger-hdfs-security": {},
+ "usersync-properties": {},
"zookeeper-env": {},
"infra-solr-env": {},
"infra-solr-client-log4j": {},
"cluster-env": {}
- },
- "public_hostname": "c6401.ambari.apache.org",
- "commandId": "41-2",
- "hostname": "c6401.ambari.apache.org",
- "kerberosCommandParams": [],
- "serviceName": "RANGER",
- "role": "RANGER_ADMIN",
- "forceRefreshConfigTagsBeforeExecution": [],
- "requestId": 41,
+ },
+ "public_hostname": "c6401.ambari.apache.org",
+ "commandId": "41-2",
+ "hostname": "c6401.ambari.apache.org",
+ "kerberosCommandParams": [],
+ "serviceName": "RANGER",
+ "role": "RANGER_ADMIN",
+ "forceRefreshConfigTagsBeforeExecution": [],
+ "requestId": 41,
"agentConfigParams": {
"agent": {
"parallel_execution": 0
}
- },
- "clusterName": "test_Cluster01",
- "commandType": "EXECUTION_COMMAND",
- "taskId": 186,
- "roleParams": {},
+ },
+ "clusterName": "test_Cluster01",
+ "commandType": "EXECUTION_COMMAND",
+ "taskId": 186,
+ "roleParams": {},
"configurationTags": {
"ranger-hdfs-audit": {
"tag": "version1466705299922"
- },
+ },
"ssl-client": {
"tag": "version1"
- },
+ },
"ranger-admin-site": {
"tag": "version1467016680635"
- },
+ },
"ranger-hdfs-policymgr-ssl": {
"tag": "version1466705299922"
- },
+ },
"tagsync-application-properties": {
"tag": "version1467016680511"
- },
+ },
"ranger-env": {
"tag": "version1466705299949"
- },
+ },
"ranger-ugsync-site": {
"tag": "version1467016680537"
- },
+ },
"ranger-hdfs-plugin-properties": {
"tag": "version1466705299922"
- },
+ },
"kerberos-env": {
"tag": "version1467016537243"
- },
+ },
"admin-properties": {
"tag": "version1466705299949"
- },
+ },
"hdfs-site": {
"tag": "version1467016680401"
- },
+ },
"ranger-tagsync-site": {
"tag": "version1467016680586"
- },
+ },
"zoo.cfg": {
"tag": "version1"
- },
+ },
"hadoop-policy": {
"tag": "version1"
- },
+ },
"hdfs-log4j": {
"tag": "version1"
- },
+ },
"usersync-log4j": {
"tag": "version1466705299949"
- },
+ },
"krb5-conf": {
"tag": "version1467016537243"
- },
+ },
"core-site": {
"tag": "version1467016680612"
- },
+ },
"hadoop-env": {
"tag": "version1467016680446"
- },
+ },
"zookeeper-log4j": {
"tag": "version1"
- },
+ },
"ssl-server": {
"tag": "version1"
- },
+ },
"ranger-site": {
"tag": "version1466705299949"
- },
+ },
"admin-log4j": {
"tag": "version1466705299949"
- },
+ },
"tagsync-log4j": {
"tag": "version1466705299949"
- },
+ },
"ranger-hdfs-security": {
"tag": "version1466705299922"
- },
+ },
"usersync-properties": {
"tag": "version1466705299949"
- },
+ },
"zookeeper-env": {
"tag": "version1467016680492"
},
@@ -166,550 +166,550 @@
"cluster-env": {
"tag": "version1467016680567"
}
- },
- "roleCommand": "START",
+ },
+ "roleCommand": "START",
"hostLevelParams": {
- "agent_stack_retry_on_unavailability": "false",
- "stack_name": "HDP",
- "package_version": "2_5_0_0_*",
+ "agent_stack_retry_on_unavailability": "false",
+ "stack_name": "HDP",
+ "package_version": "2_5_0_0_*",
"custom_mysql_jdbc_name": "mysql-connector-java.jar",
"previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
- "host_sys_prepped": "false",
- "ambari_db_rca_username": "mapred",
- "current_version": "2.5.0.0-801",
- "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
- "agent_stack_retry_count": "5",
- "stack_version": "2.5",
- "jdk_name": "jdk-8u60-linux-x64.tar.gz",
- "ambari_db_rca_driver": "org.postgresql.Driver",
+ "host_sys_prepped": "false",
+ "ambari_db_rca_username": "mapred",
+ "current_version": "2.5.0.0-801",
+ "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+ "agent_stack_retry_count": "5",
+ "stack_version": "2.5",
+ "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+ "ambari_db_rca_driver": "org.postgresql.Driver",
"java_home": "/usr/jdk64/jdk1.7.0_45",
- "repository_version_id": "1",
- "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
- "not_managed_hdfs_path_list": "[\"/tmp\"]",
- "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
- "java_version": "8",
- "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
- "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
- "db_name": "ambari",
- "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
- "agentCacheDir": "/var/lib/ambari-agent/cache",
- "ambari_db_rca_password": "mapred",
- "jce_name": "jce_policy-8.zip",
- "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
- "db_driver_filename": "mysql-connector-java.jar",
- "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
+ "repository_version_id": "1",
+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+ "not_managed_hdfs_path_list": "[\"/tmp\"]",
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+ "java_version": "8",
+ "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+ "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
+ "db_name": "ambari",
+ "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+ "agentCacheDir": "/var/lib/ambari-agent/cache",
+ "ambari_db_rca_password": "mapred",
+ "jce_name": "jce_policy-8.zip",
+ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+ "db_driver_filename": "mysql-connector-java.jar",
+ "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
"clientsToUpdateConfigs": "[\"*\"]"
- },
+ },
"commandParams": {
- "service_package_folder": "common-services/RANGER/0.4.0/package",
- "script": "scripts/ranger_admin.py",
- "hooks_folder": "HDP/2.0.6/hooks",
- "version": "2.5.0.0-801",
- "max_duration_for_retries": "0",
- "command_retry_enabled": "false",
- "command_timeout": "600",
+ "service_package_folder": "common-services/RANGER/0.4.0/package",
+ "script": "scripts/ranger_admin.py",
+ "hooks_folder": "HDP/2.0.6/hooks",
+ "version": "2.5.0.0-801",
+ "max_duration_for_retries": "0",
+ "command_retry_enabled": "false",
+ "command_timeout": "600",
"script_type": "PYTHON"
- },
- "forceRefreshConfigTags": [],
- "stageId": 2,
+ },
+ "forceRefreshConfigTags": [],
+ "stageId": 2,
"clusterHostInfo": {
"snamenode_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"ambari_server_use_ssl": [
"false"
- ],
+ ],
"all_ping_ports": [
"8670"
- ],
+ ],
"ranger_tagsync_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"ranger_usersync_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"all_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"slave_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"namenode_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"ambari_server_port": [
"8080"
- ],
+ ],
"ranger_admin_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"all_racks": [
"/default-rack"
- ],
+ ],
"all_ipv4_ips": [
"172.22.83.73"
- ],
+ ],
"ambari_server_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"zookeeper_hosts": [
"c6401.ambari.apache.org"
],
"infra_solr_hosts": [
"c6401.ambari.apache.org"
]
- },
+ },
"configurations": {
"ranger-hdfs-audit": {
- "xasecure.audit.destination.solr.zookeepers": "NONE",
- "xasecure.audit.destination.solr.urls": "",
- "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+ "xasecure.audit.destination.solr.zookeepers": "NONE",
+ "xasecure.audit.destination.solr.urls": "",
+ "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
"xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
- "xasecure.audit.destination.hdfs": "true",
+ "xasecure.audit.destination.hdfs": "true",
"xasecure.audit.destination.solr": "false",
- "xasecure.audit.provider.summary.enabled": "false",
- "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+ "xasecure.audit.provider.summary.enabled": "false",
+ "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
"xasecure.audit.is.enabled": "true"
- },
+ },
"ssl-client": {
- "ssl.client.truststore.reload.interval": "10000",
- "ssl.client.keystore.password": "bigdata",
- "ssl.client.truststore.type": "jks",
- "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
- "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
- "ssl.client.truststore.password": "bigdata",
+ "ssl.client.truststore.reload.interval": "10000",
+ "ssl.client.keystore.password": "bigdata",
+ "ssl.client.truststore.type": "jks",
+ "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+ "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+ "ssl.client.truststore.password": "bigdata",
"ssl.client.keystore.type": "jks"
- },
+ },
"ranger-admin-site": {
"ranger.is.solr.kerberised": "true",
- "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
- "ranger.kms.service.user.hdfs": "hdfs",
- "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
- "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
- "ranger.plugins.hive.serviceuser": "hive",
- "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
- "ranger.plugins.kms.serviceuser": "kms",
- "ranger.service.https.attrib.ssl.enabled": "false",
- "ranger.sso.browser.useragent": "Mozilla,chrome",
- "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
- "ranger.plugins.hbase.serviceuser": "hbase",
- "ranger.plugins.hdfs.serviceuser": "hdfs",
- "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
- "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
- "ranger.plugins.knox.serviceuser": "knox",
- "ranger.ldap.base.dn": "dc=example,dc=com",
- "ranger.sso.publicKey": "",
- "ranger.admin.kerberos.cookie.path": "/",
- "ranger.service.https.attrib.clientAuth": "want",
- "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
- "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
- "ranger.ldap.group.roleattribute": "cn",
- "ranger.plugins.kafka.serviceuser": "kafka",
- "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
- "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+ "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
+ "ranger.kms.service.user.hdfs": "hdfs",
+ "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+ "ranger.plugins.hive.serviceuser": "hive",
+ "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
+ "ranger.plugins.kms.serviceuser": "kms",
+ "ranger.service.https.attrib.ssl.enabled": "false",
+ "ranger.sso.browser.useragent": "Mozilla,chrome",
+ "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+ "ranger.plugins.hbase.serviceuser": "hbase",
+ "ranger.plugins.hdfs.serviceuser": "hdfs",
+ "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+ "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+ "ranger.plugins.knox.serviceuser": "knox",
+ "ranger.ldap.base.dn": "dc=example,dc=com",
+ "ranger.sso.publicKey": "",
+ "ranger.admin.kerberos.cookie.path": "/",
+ "ranger.service.https.attrib.clientAuth": "want",
+ "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+ "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+ "ranger.ldap.group.roleattribute": "cn",
+ "ranger.plugins.kafka.serviceuser": "kafka",
+ "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
+ "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
"ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
- "ranger.ldap.referral": "ignore",
- "ranger.service.http.port": "6080",
- "ranger.ldap.user.searchfilter": "(uid={0})",
- "ranger.plugins.atlas.serviceuser": "atlas",
- "ranger.truststore.password": "changeit",
- "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
- "ranger.audit.solr.password": "NONE",
- "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr",
+ "ranger.ldap.referral": "ignore",
+ "ranger.service.http.port": "6080",
+ "ranger.ldap.user.searchfilter": "(uid={0})",
+ "ranger.plugins.atlas.serviceuser": "atlas",
+ "ranger.truststore.password": "changeit",
+ "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+ "ranger.audit.solr.password": "NONE",
+ "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr",
"ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
- "ranger.service.https.port": "6182",
- "ranger.plugins.storm.serviceuser": "storm",
- "ranger.externalurl": "{{ranger_external_url}}",
- "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
- "ranger.kms.service.user.hive": "",
- "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
- "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
- "ranger.service.host": "{{ranger_host}}",
- "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
- "ranger.service.https.attrib.keystore.pass": "xasecure",
- "ranger.unixauth.remote.login.enabled": "true",
+ "ranger.service.https.port": "6182",
+ "ranger.plugins.storm.serviceuser": "storm",
+ "ranger.externalurl": "{{ranger_external_url}}",
+ "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+ "ranger.kms.service.user.hive": "",
+ "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+ "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+ "ranger.service.host": "{{ranger_host}}",
+ "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+ "ranger.service.https.attrib.keystore.pass": "xasecure",
+ "ranger.unixauth.remote.login.enabled": "true",
"ranger.jpa.jdbc.credential.alias": "rangeradmin",
- "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
- "ranger.audit.solr.username": "ranger_solr",
- "ranger.sso.enabled": "false",
- "ranger.audit.solr.urls": "",
- "ranger.ldap.ad.domain": "",
- "ranger.plugins.yarn.serviceuser": "yarn",
- "ranger.audit.source.type": "solr",
- "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
- "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
- "ranger.authentication.method": "UNIX",
- "ranger.service.http.enabled": "true",
- "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
- "ranger.ldap.ad.referral": "ignore",
- "ranger.ldap.ad.base.dn": "dc=example,dc=com",
- "ranger.jpa.jdbc.password": "_",
- "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
- "ranger.sso.providerurl": "",
- "ranger.unixauth.service.hostname": "{{ugsync_host}}",
- "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
- "ranger.admin.kerberos.token.valid.seconds": "30",
- "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
+ "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+ "ranger.audit.solr.username": "ranger_solr",
+ "ranger.sso.enabled": "false",
+ "ranger.audit.solr.urls": "",
+ "ranger.ldap.ad.domain": "",
+ "ranger.plugins.yarn.serviceuser": "yarn",
+ "ranger.audit.source.type": "solr",
+ "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+ "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+ "ranger.authentication.method": "UNIX",
+ "ranger.service.http.enabled": "true",
+ "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+ "ranger.ldap.ad.referral": "ignore",
+ "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+ "ranger.jpa.jdbc.password": "_",
+ "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "ranger.sso.providerurl": "",
+ "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+ "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
+ "ranger.admin.kerberos.token.valid.seconds": "30",
+ "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
"ranger.unixauth.service.port": "5151"
- },
+ },
"ranger-hdfs-policymgr-ssl": {
- "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
- "xasecure.policymgr.clientssl.truststore.password": "changeit",
- "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
- "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
- "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+ "xasecure.policymgr.clientssl.truststore.password": "changeit",
+ "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+ "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
- },
+ },
"tagsync-application-properties": {
- "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
- "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
- "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
- "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
- "atlas.kafka.entities.group.id": "ranger_entities_consumer",
- "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
- "atlas.jaas.KafkaClient.option.serviceName": "kafka",
- "atlas.kafka.bootstrap.servers": "localhost:6667",
- "atlas.jaas.KafkaClient.option.useKeyTab": "true",
- "atlas.jaas.KafkaClient.option.storeKey": "true",
- "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
+ "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+ "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
+ "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
+ "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
+ "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+ "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+ "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+ "atlas.kafka.bootstrap.servers": "localhost:6667",
+ "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+ "atlas.jaas.KafkaClient.option.storeKey": "true",
+ "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
"atlas.kafka.sasl.kerberos.service.name": "kafka"
- },
+ },
"ranger-env": {
- "ranger_solr_shards": "1",
- "ranger_solr_config_set": "ranger_audits",
- "ranger_user": "ranger",
+ "ranger_solr_shards": "1",
+ "ranger_solr_config_set": "ranger_audits",
+ "ranger_user": "ranger",
"ranger_solr_replication_factor": "1",
- "xml_configurations_supported": "true",
- "ranger-atlas-plugin-enabled": "No",
- "ranger-hbase-plugin-enabled": "No",
- "ranger-yarn-plugin-enabled": "No",
- "bind_anonymous": "false",
- "ranger_admin_username": "amb_ranger_admin",
- "admin_password": "admin",
- "is_solrCloud_enabled": "true",
- "ranger-storm-plugin-enabled": "No",
- "ranger-hdfs-plugin-enabled": "No",
- "ranger_group": "ranger",
- "ranger-knox-plugin-enabled": "No",
- "ranger_admin_log_dir": "/var/log/ranger/admin",
- "ranger-kafka-plugin-enabled": "No",
- "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
- "ranger-hive-plugin-enabled": "No",
- "xasecure.audit.destination.solr": "true",
- "ranger_pid_dir": "/var/run/ranger",
- "xasecure.audit.destination.hdfs": "true",
- "admin_username": "admin",
- "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
- "create_db_dbuser": "true",
- "ranger_solr_collection_name": "ranger_audits",
- "ranger_admin_password": "P1!q9xa96SMi5NCl",
+ "xml_configurations_supported": "true",
+ "ranger-atlas-plugin-enabled": "No",
+ "ranger-hbase-plugin-enabled": "No",
+ "ranger-yarn-plugin-enabled": "No",
+ "bind_anonymous": "false",
+ "ranger_admin_username": "amb_ranger_admin",
+ "admin_password": "admin",
+ "is_solrCloud_enabled": "true",
+ "ranger-storm-plugin-enabled": "No",
+ "ranger-hdfs-plugin-enabled": "No",
+ "ranger_group": "ranger",
+ "ranger-knox-plugin-enabled": "No",
+ "ranger_admin_log_dir": "/var/log/ranger/admin",
+ "ranger-kafka-plugin-enabled": "No",
+ "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+ "ranger-hive-plugin-enabled": "No",
+ "xasecure.audit.destination.solr": "true",
+ "ranger_pid_dir": "/var/run/ranger",
+ "xasecure.audit.destination.hdfs": "true",
+ "admin_username": "admin",
+ "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+ "create_db_dbuser": "true",
+ "ranger_solr_collection_name": "ranger_audits",
+ "ranger_admin_password": "P1!q9xa96SMi5NCl",
"ranger_usersync_log_dir": "/var/log/ranger/usersync"
- },
+ },
"usersync-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
- },
+ },
"ranger-hdfs-plugin-properties": {
- "hadoop.rpc.protection": "authentication",
- "ranger-hdfs-plugin-enabled": "No",
- "REPOSITORY_CONFIG_USERNAME": "hadoop",
- "policy_user": "ambari-qa",
- "common.name.for.certificate": "",
+ "hadoop.rpc.protection": "authentication",
+ "ranger-hdfs-plugin-enabled": "No",
+ "REPOSITORY_CONFIG_USERNAME": "hadoop",
+ "policy_user": "ambari-qa",
+ "common.name.for.certificate": "",
"REPOSITORY_CONFIG_PASSWORD": "hadoop"
- },
+ },
"kerberos-env": {
- "kdc_hosts": "c6401.ambari.apache.org",
- "manage_auth_to_local": "true",
- "install_packages": "true",
- "realm": "EXAMPLE.COM",
- "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
- "ad_create_attributes_template": "\n{\n \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n \"cn\": \"$principal_name\",\n #if( $is_service )\n \"servicePrincipalName\": \"$principal_name\",\n #end\n \"userPrincipalName\": \"$normalized_principal\",\n \"unicodePwd\": \"$password\",\n \"accountExpires\": \"0\",\n \"userAccountControl\": \"66048\"\n}",
- "kdc_create_attributes": "",
- "admin_server_host": "c6401.ambari.apache.org",
- "group": "ambari-managed-principals",
- "password_length": "20",
- "ldap_url": "",
- "manage_identities": "true",
- "password_min_lowercase_letters": "1",
- "create_ambari_principal": "true",
- "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
- "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
- "password_chat_timeout": "5",
- "kdc_type": "mit-kdc",
- "set_password_expiry": "false",
- "password_min_punctuation": "1",
- "container_dn": "",
- "case_insensitive_username_rules": "false",
- "password_min_whitespace": "0",
- "password_min_uppercase_letters": "1",
+ "kdc_hosts": "c6401.ambari.apache.org",
+ "manage_auth_to_local": "true",
+ "install_packages": "true",
+ "realm": "EXAMPLE.COM",
+ "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+ "ad_create_attributes_template": "\n{\n \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n \"cn\": \"$principal_name\",\n #if( $is_service )\n \"servicePrincipalName\": \"$principal_name\",\n #end\n \"userPrincipalName\": \"$normalized_principal\",\n \"unicodePwd\": \"$password\",\n \"accountExpires\": \"0\",\n \"userAccountControl\": \"66048\"\n}",
+ "kdc_create_attributes": "",
+ "admin_server_host": "c6401.ambari.apache.org",
+ "group": "ambari-managed-principals",
+ "password_length": "20",
+ "ldap_url": "",
+ "manage_identities": "true",
+ "password_min_lowercase_letters": "1",
+ "create_ambari_principal": "true",
+ "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+ "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+ "password_chat_timeout": "5",
+ "kdc_type": "mit-kdc",
+ "set_password_expiry": "false",
+ "password_min_punctuation": "1",
+ "container_dn": "",
+ "case_insensitive_username_rules": "false",
+ "password_min_whitespace": "0",
+ "password_min_uppercase_letters": "1",
"password_min_digits": "1"
- },
+ },
"admin-properties": {
- "db_user": "rangeradmin01",
- "DB_FLAVOR": "MYSQL",
- "db_password": "rangeradmin01",
- "db_root_user": "root",
- "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
- "db_name": "ranger01",
- "db_host": "c6401.ambari.apache.org",
- "db_root_password": "vagrant",
+ "db_user": "rangeradmin01",
+ "DB_FLAVOR": "MYSQL",
+ "db_password": "rangeradmin01",
+ "db_root_user": "root",
+ "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+ "db_name": "ranger01",
+ "db_host": "c6401.ambari.apache.org",
+ "db_root_password": "vagrant",
"SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
- },
+ },
"ranger-ugsync-site": {
- "ranger.usersync.ldap.binddn": "",
- "ranger.usersync.policymgr.username": "rangerusersync",
- "ranger.usersync.policymanager.mockrun": "false",
- "ranger.usersync.group.searchbase": "",
- "ranger.usersync.ldap.bindalias": "testldapalias",
- "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
- "ranger.usersync.port": "5151",
- "ranger.usersync.pagedresultssize": "500",
- "ranger.usersync.group.memberattributename": "",
- "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
- "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
- "ranger.usersync.ldap.referral": "ignore",
- "ranger.usersync.group.searchfilter": "",
- "ranger.usersync.ldap.user.objectclass": "person",
- "ranger.usersync.logdir": "{{usersync_log_dir}}",
- "ranger.usersync.ldap.user.searchfilter": "",
- "ranger.usersync.ldap.groupname.caseconversion": "none",
- "ranger.usersync.ldap.ldapbindpassword": "",
- "ranger.usersync.unix.minUserId": "500",
- "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
- "ranger.usersync.group.nameattribute": "",
- "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
- "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
- "ranger.usersync.user.searchenabled": "false",
- "ranger.usersync.group.usermapsyncenabled": "true",
- "ranger.usersync.ldap.bindkeystore": "",
- "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
- "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
- "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
- "ranger.usersync.group.objectclass": "",
- "ranger.usersync.ldap.user.searchscope": "sub",
- "ranger.usersync.unix.password.file": "/etc/passwd",
- "ranger.usersync.ldap.user.nameattribute": "",
- "ranger.usersync.pagedresultsenabled": "true",
- "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
- "ranger.usersync.group.search.first.enabled": "false",
- "ranger.usersync.group.searchenabled": "false",
- "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
- "ranger.usersync.ssl": "true",
- "ranger.usersync.ldap.url": "",
- "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
- "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
- "ranger.usersync.ldap.user.searchbase": "",
- "ranger.usersync.ldap.username.caseconversion": "none",
- "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
- "ranger.usersync.keystore.password": "UnIx529p",
- "ranger.usersync.unix.group.file": "/etc/group",
- "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
- "ranger.usersync.group.searchscope": "",
- "ranger.usersync.truststore.password": "changeit",
- "ranger.usersync.enabled": "true",
- "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
+ "ranger.usersync.ldap.binddn": "",
+ "ranger.usersync.policymgr.username": "rangerusersync",
+ "ranger.usersync.policymanager.mockrun": "false",
+ "ranger.usersync.group.searchbase": "",
+ "ranger.usersync.ldap.bindalias": "testldapalias",
+ "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+ "ranger.usersync.port": "5151",
+ "ranger.usersync.pagedresultssize": "500",
+ "ranger.usersync.group.memberattributename": "",
+ "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
+ "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+ "ranger.usersync.ldap.referral": "ignore",
+ "ranger.usersync.group.searchfilter": "",
+ "ranger.usersync.ldap.user.objectclass": "person",
+ "ranger.usersync.logdir": "{{usersync_log_dir}}",
+ "ranger.usersync.ldap.user.searchfilter": "",
+ "ranger.usersync.ldap.groupname.caseconversion": "none",
+ "ranger.usersync.ldap.ldapbindpassword": "",
+ "ranger.usersync.unix.minUserId": "500",
+ "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+ "ranger.usersync.group.nameattribute": "",
+ "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+ "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+ "ranger.usersync.user.searchenabled": "false",
+ "ranger.usersync.group.usermapsyncenabled": "true",
+ "ranger.usersync.ldap.bindkeystore": "",
+ "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+ "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
+ "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+ "ranger.usersync.group.objectclass": "",
+ "ranger.usersync.ldap.user.searchscope": "sub",
+ "ranger.usersync.unix.password.file": "/etc/passwd",
+ "ranger.usersync.ldap.user.nameattribute": "",
+ "ranger.usersync.pagedresultsenabled": "true",
+ "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+ "ranger.usersync.group.search.first.enabled": "false",
+ "ranger.usersync.group.searchenabled": "false",
+ "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+ "ranger.usersync.ssl": "true",
+ "ranger.usersync.ldap.url": "",
+ "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+ "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+ "ranger.usersync.ldap.user.searchbase": "",
+ "ranger.usersync.ldap.username.caseconversion": "none",
+ "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+ "ranger.usersync.keystore.password": "UnIx529p",
+ "ranger.usersync.unix.group.file": "/etc/group",
+ "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+ "ranger.usersync.group.searchscope": "",
+ "ranger.usersync.truststore.password": "changeit",
+ "ranger.usersync.enabled": "true",
+ "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
"ranger.usersync.filesource.text.delimiter": ","
- },
+ },
"hdfs-site": {
- "dfs.namenode.checkpoint.period": "21600",
- "dfs.namenode.avoid.write.stale.datanode": "true",
- "dfs.permissions.superusergroup": "hdfs",
- "dfs.namenode.startup.delay.block.deletion.sec": "3600",
- "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
- "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
- "dfs.heartbeat.interval": "3",
- "dfs.content-summary.limit": "5000",
- "dfs.support.append": "true",
- "dfs.datanode.address": "0.0.0.0:1019",
- "dfs.cluster.administrators": " hdfs",
- "dfs.namenode.audit.log.async": "true",
- "dfs.datanode.balance.bandwidthPerSec": "6250000",
- "dfs.namenode.safemode.threshold-pct": "1",
- "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
- "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
- "dfs.permissions.enabled": "true",
- "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
- "dfs.client.read.shortcircuit": "true",
- "dfs.https.port": "50470",
- "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
- "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
- "dfs.blocksize": "134217728",
- "dfs.blockreport.initialDelay": "120",
- "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
- "dfs.namenode.fslock.fair": "false",
- "dfs.datanode.max.transfer.threads": "4096",
- "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
- "dfs.replication": "3",
- "dfs.namenode.handler.count": "50",
- "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
- "fs.permissions.umask-mode": "022",
- "dfs.namenode.stale.datanode.interval": "30000",
- "dfs.datanode.ipc.address": "0.0.0.0:8010",
- "dfs.datanode.failed.volumes.tolerated": "0",
- "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
- "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
- "dfs.webhdfs.enabled": "true",
- "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
- "dfs.namenode.accesstime.precision": "0",
- "dfs.namenode.write.stale.datanode.ratio": "1.0f",
- "dfs.datanode.https.address": "0.0.0.0:50475",
- "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
- "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
- "nfs.exports.allowed.hosts": "* rw",
- "dfs.namenode.checkpoint.txns": "1000000",
- "dfs.datanode.http.address": "0.0.0.0:1022",
- "dfs.datanode.du.reserved": "33011188224",
- "dfs.client.read.shortcircuit.streams.cache.size": "4096",
- "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
- "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
- "dfs.http.policy": "HTTP_ONLY",
- "dfs.block.access.token.enable": "true",
- "dfs.client.retry.policy.enabled": "false",
- "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
- "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
- "dfs.namenode.name.dir.restore": "true",
- "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
- "dfs.journalnode.https-address": "0.0.0.0:8481",
- "dfs.journalnode.http-address": "0.0.0.0:8480",
- "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
- "dfs.namenode.avoid.read.stale.datanode": "true",
- "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
- "dfs.datanode.data.dir.perm": "750",
- "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
- "dfs.replication.max": "50",
+ "dfs.namenode.checkpoint.period": "21600",
+ "dfs.namenode.avoid.write.stale.datanode": "true",
+ "dfs.permissions.superusergroup": "hdfs",
+ "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+ "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+ "dfs.heartbeat.interval": "3",
+ "dfs.content-summary.limit": "5000",
+ "dfs.support.append": "true",
+ "dfs.datanode.address": "0.0.0.0:1019",
+ "dfs.cluster.administrators": " hdfs",
+ "dfs.namenode.audit.log.async": "true",
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
+ "dfs.namenode.safemode.threshold-pct": "1",
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+ "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+ "dfs.permissions.enabled": "true",
+ "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "dfs.client.read.shortcircuit": "true",
+ "dfs.https.port": "50470",
+ "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+ "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+ "dfs.blocksize": "134217728",
+ "dfs.blockreport.initialDelay": "120",
+ "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+ "dfs.namenode.fslock.fair": "false",
+ "dfs.datanode.max.transfer.threads": "4096",
+ "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.replication": "3",
+ "dfs.namenode.handler.count": "50",
+ "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "fs.permissions.umask-mode": "022",
+ "dfs.namenode.stale.datanode.interval": "30000",
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
+ "dfs.datanode.failed.volumes.tolerated": "0",
+ "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+ "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+ "dfs.webhdfs.enabled": "true",
+ "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+ "dfs.namenode.accesstime.precision": "0",
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+ "dfs.datanode.https.address": "0.0.0.0:50475",
+ "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+ "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+ "nfs.exports.allowed.hosts": "* rw",
+ "dfs.namenode.checkpoint.txns": "1000000",
+ "dfs.datanode.http.address": "0.0.0.0:1022",
+ "dfs.datanode.du.reserved": "33011188224",
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+ "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.http.policy": "HTTP_ONLY",
+ "dfs.block.access.token.enable": "true",
+ "dfs.client.retry.policy.enabled": "false",
+ "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+ "dfs.namenode.name.dir.restore": "true",
+ "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.journalnode.https-address": "0.0.0.0:8481",
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+ "dfs.namenode.avoid.read.stale.datanode": "true",
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+ "dfs.datanode.data.dir.perm": "750",
+ "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
+ "dfs.replication.max": "50",
"dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
- },
+ },
"ranger-tagsync-site": {
- "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
- "ranger.tagsync.source.atlasrest.username": "",
- "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
- "ranger.tagsync.source.atlasrest.download.interval.millis": "",
- "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
- "ranger.tagsync.source.file.check.interval.millis": "",
- "ranger.tagsync.source.atlasrest.endpoint": "",
- "ranger.tagsync.dest.ranger.username": "rangertagsync",
- "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
- "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
- "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
- "ranger.tagsync.source.atlas": "false",
- "ranger.tagsync.source.atlasrest": "false",
- "ranger.tagsync.source.file": "false",
+ "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+ "ranger.tagsync.source.atlasrest.username": "",
+ "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+ "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+ "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+ "ranger.tagsync.source.file.check.interval.millis": "",
+ "ranger.tagsync.source.atlasrest.endpoint": "",
+ "ranger.tagsync.dest.ranger.username": "rangertagsync",
+ "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+ "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
+ "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
+ "ranger.tagsync.source.atlas": "false",
+ "ranger.tagsync.source.atlasrest": "false",
+ "ranger.tagsync.source.file": "false",
"ranger.tagsync.source.file.filename": ""
- },
+ },
"zoo.cfg": {
- "clientPort": "2181",
- "autopurge.purgeInterval": "24",
- "syncLimit": "5",
- "dataDir": "/grid/0/hadoop/zookeeper",
- "initLimit": "10",
- "tickTime": "2000",
+ "clientPort": "2181",
+ "autopurge.purgeInterval": "24",
+ "syncLimit": "5",
+ "dataDir": "/grid/0/hadoop/zookeeper",
+ "initLimit": "10",
+ "tickTime": "2000",
"autopurge.snapRetainCount": "30"
- },
+ },
"hadoop-policy": {
- "security.job.client.protocol.acl": "*",
- "security.job.task.protocol.acl": "*",
- "security.datanode.protocol.acl": "*",
- "security.namenode.protocol.acl": "*",
- "security.client.datanode.protocol.acl": "*",
- "security.inter.tracker.protocol.acl": "*",
- "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
- "security.client.protocol.acl": "*",
- "security.refresh.policy.protocol.acl": "hadoop",
- "security.admin.operations.protocol.acl": "hadoop",
+ "security.job.client.protocol.acl": "*",
+ "security.job.task.protocol.acl": "*",
+ "security.datanode.protocol.acl": "*",
+ "security.namenode.protocol.acl": "*",
+ "security.client.datanode.protocol.acl": "*",
+ "security.inter.tracker.protocol.acl": "*",
+ "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+ "security.client.protocol.acl": "*",
+ "security.refresh.policy.protocol.acl": "hadoop",
+ "security.admin.operations.protocol.acl": "hadoop",
"security.inter.datanode.protocol.acl": "*"
- },
+ },
"hdfs-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
ange=WARN"
- },
+ },
"krb5-conf": {
- "domains": "",
- "manage_krb5_conf": "true",
- "content": "\n[libdefaults]\n renew_lifetime = 7d\n forwardable = true\n default_realm = {{realm}}\n ticket_lifetime = 24h\n dns_lookup_realm = false\n dns_lookup_kdc = false\n default_ccache_name = /tmp/krb5cc_%{uid}\n #default_tgs_enctypes = {{encryption_types}}\n #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n default = FILE:/var/log/krb5kdc.log\n admin_server = FILE:/var/log/kadmind.log\n kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',') -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n }\n\
n{# Append additional realm declarations below #}",
+ "domains": "",
+ "manage_krb5_conf": "true",
+ "content": "\n[libdefaults]\n renew_lifetime = 7d\n forwardable = true\n default_realm = {{realm}}\n ticket_lifetime = 24h\n dns_lookup_realm = false\n dns_lookup_kdc = false\n default_ccache_name = /tmp/krb5cc_%{uid}\n #default_tgs_enctypes = {{encryption_types}}\n #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n default = FILE:/var/log/krb5kdc.log\n admin_server = FILE:/var/log/kadmind.log\n kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',') -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n }\n\
n{# Append additional realm declarations below #}",
"conf_dir": "/etc"
- },
+ },
"core-site": {
- "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
- "hadoop.proxyuser.hdfs.groups": "*",
- "fs.trash.interval": "360",
- "ipc.server.tcpnodelay": "true",
- "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
- "ipc.client.idlethreshold": "8000",
- "io.file.buffer.size": "131072",
- "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
- "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
- "hadoop.security.authentication": "kerberos",
- "mapreduce.jobtracker.webinterface.trusted": "false",
- "hadoop.proxyuser.hdfs.hosts": "*",
- "hadoop.proxyuser.HTTP.groups": "users",
- "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
- "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
- "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
- "hadoop.security.authorization": "true",
- "hadoop.http.authentication.simple.anonymous.allowed": "true",
- "ipc.client.connect.max.retries": "50",
- "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
- "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
+ "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+ "hadoop.proxyuser.hdfs.groups": "*",
+ "fs.trash.interval": "360",
+ "ipc.server.tcpnodelay": "true",
+ "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+ "ipc.client.idlethreshold": "8000",
+ "io.file.buffer.size": "131072",
+ "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+ "hadoop.security.authentication": "kerberos",
+ "mapreduce.jobtracker.webinterface.trusted": "false",
+ "hadoop.proxyuser.hdfs.hosts": "*",
+ "hadoop.proxyuser.HTTP.groups": "users",
+ "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+ "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+ "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
+ "hadoop.security.authorization": "true",
+ "hadoop.http.authentication.simple.anonymous.allowed": "true",
+ "ipc.client.connect.max.retries": "50",
+ "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+ "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
"ipc.client.connection.maxidletime": "30000"
- },
+ },
"hadoop-env": {
- "keyserver_port": "",
- "proxyuser_group": "users",
- "hdfs_user_nproc_limit": "65536",
- "hdfs_log_dir_prefix": "/var/log/hadoop",
- "hdfs_user_nofile_limit": "128000",
- "hdfs_user": "hdfs",
- "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM",
- "keyserver_host": " ",
- "namenode_opt_maxnewsize": "128m",
- "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
- "namenode_opt_maxpermsize": "256m",
- "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
N_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
ing priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n do\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
ECURE_DN_USER\" ]; then\n ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
- "namenode_heapsize": "1024m",
- "namenode_opt_newsize": "128m",
- "nfsgateway_heapsize": "1024",
- "dtnode_heapsize": "1024m",
- "hadoop_root_logger": "INFO,RFA",
- "hadoop_heapsize": "1024",
- "hadoop_pid_dir_prefix": "/var/run/hadoop",
- "namenode_opt_permsize": "128m",
+ "keyserver_port": "",
+ "proxyuser_group": "users",
+ "hdfs_user_nproc_limit": "65536",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
+ "hdfs_user_nofile_limit": "128000",
+ "hdfs_user": "hdfs",
+ "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM",
+ "keyserver_host": " ",
+ "namenode_opt_maxnewsize": "128m",
+ "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+ "namenode_opt_maxpermsize": "256m",
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
N_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
ing priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n do\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
ECURE_DN_USER\" ]; then\n ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+ "namenode_heapsize": "1024m",
+ "namenode_opt_newsize": "128m",
+ "nfsgateway_heapsize": "1024",
+ "dtnode_heapsize": "1024m",
+ "hadoop_root_logger": "INFO,RFA",
+ "hadoop_heapsize": "1024",
+ "hadoop_pid_dir_prefix": "/var/run/hadoop",
+ "namenode_opt_permsize": "128m",
"hdfs_tmp_dir": "/tmp"
- },
+ },
"zookeeper-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Softwa
<TRUNCATED>
[11/18] ambari git commit: AMBARI-21430 - Allow Multiple Versions of
Stack Tools to Co-Exist (jonathanhurley)
Posted by rl...@apache.org.
AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f33a250c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f33a250c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f33a250c
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: f33a250c0e7624b6cbc0a11ffce12506eaa95d9a
Parents: a795f38
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Jul 7 14:36:05 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Jul 7 23:00:23 2017 -0400
----------------------------------------------------------------------
.../libraries/functions/stack_features.py | 13 +
.../libraries/functions/stack_tools.py | 39 +
.../libraries/script/script.py | 19 +-
.../server/api/query/JpaPredicateVisitor.java | 8 +-
.../controller/ActionExecutionContext.java | 26 +
.../controller/AmbariActionExecutionHelper.java | 26 +-
.../BlueprintConfigurationProcessor.java | 59 +-
.../ClusterStackVersionResourceProvider.java | 163 ++-
.../ambari/server/state/ConfigHelper.java | 32 +
.../ambari/server/topology/AmbariContext.java | 18 +
.../server/upgrade/UpgradeCatalog252.java | 61 +
.../package/alerts/alert_hive_metastore.py | 11 +-
.../package/alerts/alert_llap_app_status.py | 12 +-
.../package/alerts/alert_check_oozie_server.py | 8 +-
.../resources/host_scripts/alert_disk_space.py | 10 +-
.../host_scripts/alert_version_select.py | 16 +-
.../HDP/2.0.6/configuration/cluster-env.xml | 16 +-
.../HDP/2.0.6/properties/stack_features.json | 852 +++++------
.../HDP/2.0.6/properties/stack_tools.json | 16 +-
.../PERF/1.0/configuration/cluster-env.xml | 16 +-
.../PERF/1.0/properties/stack_features.json | 38 +-
.../stacks/PERF/1.0/properties/stack_tools.json | 16 +-
.../BlueprintConfigurationProcessorTest.java | 41 +-
...ClusterStackVersionResourceProviderTest.java | 4 +-
.../ClusterConfigurationRequestTest.java | 60 +-
.../common-services/configs/hawq_default.json | 6 +-
.../python/host_scripts/TestAlertDiskSpace.py | 16 +-
.../2.5/configs/ranger-admin-default.json | 990 ++++++-------
.../2.5/configs/ranger-admin-secured.json | 1108 +++++++--------
.../stacks/2.5/configs/ranger-kms-default.json | 1158 +++++++--------
.../stacks/2.5/configs/ranger-kms-secured.json | 1320 +++++++++---------
.../2.6/configs/ranger-admin-default.json | 953 +++++++------
.../2.6/configs/ranger-admin-secured.json | 1066 +++++++-------
.../src/test/python/stacks/utils/RMFTestCase.py | 8 +-
34 files changed, 4353 insertions(+), 3852 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index cbd32e7..576c138 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -43,6 +43,12 @@ def check_stack_feature(stack_feature, stack_version):
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.version import compare_versions
+
+ stack_name = default("/hostLevelParams/stack_name", None)
+ if stack_name is None:
+ Logger.warning("Cannot find the stack name in the command. Stack features cannot be loaded")
+ return False
+
stack_features_config = default("/configurations/cluster-env/stack_features", None)
if not stack_version:
@@ -51,6 +57,13 @@ def check_stack_feature(stack_feature, stack_version):
if stack_features_config:
data = json.loads(stack_features_config)
+
+ if stack_name not in data:
+ Logger.warning("Cannot find stack features for the stack named {0}".format(stack_name))
+ return False
+
+ data = data[stack_name]
+
for feature in data["stack_features"]:
if feature["name"] == stack_feature:
if "min_version" in feature:
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
index 02ae62d..420ae11 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -39,15 +39,33 @@ def get_stack_tool(name):
:return: tool_name, tool_path, tool_package
"""
from resource_management.libraries.functions.default import default
+
+ stack_name = default("/hostLevelParams/stack_name", None)
+ if stack_name is None:
+ Logger.warning("Cannot find the stack name in the command. Stack tools cannot be loaded")
+ return (None, None, None)
+
stack_tools = None
stack_tools_config = default("/configurations/cluster-env/stack_tools", None)
if stack_tools_config:
stack_tools = json.loads(stack_tools_config)
+ if stack_tools is None:
+ Logger.warning("The stack tools could not be found in cluster-env")
+ return (None, None, None)
+
+ if stack_name not in stack_tools:
+ Logger.warning("Cannot find stack tools for the stack named {0}".format(stack_name))
+ return (None, None, None)
+
+ # load the stack tooks keyed by the stack name
+ stack_tools = stack_tools[stack_name]
+
if not stack_tools or not name or name.lower() not in stack_tools:
Logger.warning("Cannot find config for {0} stack tool in {1}".format(str(name), str(stack_tools)))
return (None, None, None)
+
tool_config = stack_tools[name.lower()]
# Return fixed length (tool_name, tool_path tool_package) tuple
@@ -81,3 +99,24 @@ def get_stack_tool_package(name):
"""
(tool_name, tool_path, tool_package) = get_stack_tool(name)
return tool_package
+
+
+def get_stack_root(stack_name, stack_root_json):
+ """
+ Get the stack-specific install root directory from the raw, JSON-escaped properties.
+ :param stack_name:
+ :param stack_root_json:
+ :return: stack_root
+ """
+ from resource_management.libraries.functions.default import default
+
+ if stack_root_json is None:
+ return "/usr/{0}".format(stack_name.lower())
+
+ stack_root = json.loads(stack_root_json)
+
+ if stack_name not in stack_root:
+ Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
+ return "/usr/{0}".format(stack_name.lower())
+
+ return stack_root[stack_name]
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 2c56a13..2b374c5 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -597,7 +597,11 @@ class Script(object):
:return: a stack name or None
"""
from resource_management.libraries.functions.default import default
- return default("/hostLevelParams/stack_name", "HDP")
+ stack_name = default("/hostLevelParams/stack_name", None)
+ if stack_name is None:
+ stack_name = default("/configurations/cluster-env/stack_name", "HDP")
+
+ return stack_name
@staticmethod
def get_stack_root():
@@ -607,7 +611,18 @@ class Script(object):
"""
from resource_management.libraries.functions.default import default
stack_name = Script.get_stack_name()
- return default("/configurations/cluster-env/stack_root", "/usr/{0}".format(stack_name.lower()))
+ stack_root_json = default("/configurations/cluster-env/stack_root", None)
+
+ if stack_root_json is None:
+ return "/usr/{0}".format(stack_name.lower())
+
+ stack_root = json.loads(stack_root_json)
+
+ if stack_name not in stack_root:
+ Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
+ return "/usr/{0}".format(stack_name.lower())
+
+ return stack_root[stack_name]
@staticmethod
def get_stack_version():
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
index 984dc3b..84e9dd9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
@@ -63,11 +63,6 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
final private CriteriaQuery<T> m_query;
/**
- * The entity class that the root of the query is built from.
- */
- final private Class<T> m_entityClass;
-
- /**
* The last calculated predicate.
*/
private javax.persistence.criteria.Predicate m_lastPredicate = null;
@@ -92,7 +87,6 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
public JpaPredicateVisitor(EntityManager entityManager, Class<T> entityClass) {
m_entityManager = entityManager;
m_builder = m_entityManager.getCriteriaBuilder();
- m_entityClass = entityClass;
m_query = m_builder.createQuery(entityClass);
m_root = m_query.from(entityClass);
}
@@ -178,7 +172,7 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
}
String operator = predicate.getOperator();
- Comparable<?> value = predicate.getValue();
+ Comparable value = predicate.getValue();
// convert string to enum for proper JPA comparisons
if (lastSingularAttribute != null) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index 42a95c0..34d6db9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@ -27,6 +27,7 @@ import org.apache.ambari.server.actionmanager.TargetHostType;
import org.apache.ambari.server.agent.ExecutionCommand;
import org.apache.ambari.server.controller.internal.RequestOperationLevel;
import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+import org.apache.ambari.server.state.StackId;
/**
* The context required to create tasks and stages for a custom action
@@ -43,6 +44,7 @@ public class ActionExecutionContext {
private String expectedComponentName;
private boolean hostsInMaintenanceModeExcluded = true;
private boolean allowRetry = false;
+ private StackId stackId;
private List<ExecutionCommandVisitor> m_visitors = new ArrayList<>();
@@ -173,6 +175,30 @@ public class ActionExecutionContext {
}
/**
+ * Gets the stack to use for generating stack-associated values for a command.
+ * In some cases the cluster's stack is not the correct one to use, such as
+ * when distributing a repository.
+ *
+ * @return the stackId the stack to use when generating stack-specific content
+ * for the command.
+ */
+ public StackId getStackId() {
+ return stackId;
+ }
+
+ /**
+ * Sets the stack to use for generating stack-associated values for a command.
+ * In some cases the cluster's stack is not the correct one to use, such as
+ * when distributing a repository.
+ *
+ * @param stackId
+ * the stackId to use for stack-based properties on the command.
+ */
+ public void setStackId(StackId stackId) {
+ this.stackId = stackId;
+ }
+
+ /**
* Adds a command visitor that will be invoked after a command is created. Provides access
* to the command.
*
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 8f522b0..391daa9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -465,7 +465,10 @@ public class AmbariActionExecutionHelper {
if (StringUtils.isNotBlank(serviceName)) {
Service service = cluster.getService(serviceName);
- addRepoInfoToHostLevelParams(service.getDesiredRepositoryVersion(), hostLevelParams, hostName);
+ addRepoInfoToHostLevelParams(actionContext, service.getDesiredRepositoryVersion(),
+ hostLevelParams, hostName);
+ } else {
+ addRepoInfoToHostLevelParams(actionContext, null, hostLevelParams, hostName);
}
@@ -529,9 +532,19 @@ public class AmbariActionExecutionHelper {
*
* */
- private void addRepoInfoToHostLevelParams(RepositoryVersionEntity repositoryVersion,
- Map<String, String> hostLevelParams, String hostName) throws AmbariException {
+ private void addRepoInfoToHostLevelParams(ActionExecutionContext actionContext,
+ RepositoryVersionEntity repositoryVersion, Map<String, String> hostLevelParams,
+ String hostName) throws AmbariException {
+
+ // if the repo is null, see if any values from the context should go on the
+ // host params and then return
if (null == repositoryVersion) {
+ if (null != actionContext.getStackId()) {
+ StackId stackId = actionContext.getStackId();
+ hostLevelParams.put(STACK_NAME, stackId.getStackName());
+ hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
+ }
+
return;
}
@@ -557,7 +570,10 @@ public class AmbariActionExecutionHelper {
hostLevelParams.put(REPO_INFO, rootJsonObject.toString());
- hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
- hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
+ // set the host level params if not already set by whoever is creating this command
+ if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
+ hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
+ hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
+ }
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index e93b2f7..37284be 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -36,7 +36,9 @@ import java.util.regex.Pattern;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.PropertyDependencyInfo;
+import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.ValueAttributesInfo;
import org.apache.ambari.server.topology.AdvisedConfiguration;
import org.apache.ambari.server.topology.Blueprint;
@@ -356,7 +358,7 @@ public class BlueprintConfigurationProcessor {
final String originalValue = typeMap.get(propertyName);
final String updatedValue =
updater.updateForClusterCreate(propertyName, originalValue, clusterProps, clusterTopology);
-
+
if(updatedValue == null ) {
continue;
}
@@ -419,6 +421,7 @@ public class BlueprintConfigurationProcessor {
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
+ setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
@@ -531,7 +534,7 @@ public class BlueprintConfigurationProcessor {
try {
String clusterName = clusterTopology.getAmbariContext().getClusterName(clusterTopology.getClusterId());
Cluster cluster = clusterTopology.getAmbariContext().getController().getClusters().getCluster(clusterName);
- authToLocalPerClusterMap = new HashMap<Long, Set<String>>();
+ authToLocalPerClusterMap = new HashMap<>();
authToLocalPerClusterMap.put(Long.valueOf(clusterTopology.getClusterId()), clusterTopology.getAmbariContext().getController().getKerberosHelper().getKerberosDescriptor(cluster).getAllAuthToLocalProperties());
} catch (AmbariException e) {
LOG.error("Error while getting authToLocal properties. ", e);
@@ -2186,8 +2189,9 @@ public class BlueprintConfigurationProcessor {
StringBuilder sb = new StringBuilder();
Matcher m = REGEX_IN_BRACKETS.matcher(origValue);
- if (m.matches())
+ if (m.matches()) {
origValue = m.group("INNER");
+ }
if (origValue != null) {
sb.append("[");
@@ -2195,8 +2199,9 @@ public class BlueprintConfigurationProcessor {
for (String value : origValue.split(",")) {
m = REGEX_IN_QUOTES.matcher(value);
- if (m.matches())
+ if (m.matches()) {
value = m.group("INNER");
+ }
if (!isFirst) {
sb.append(",");
@@ -2230,6 +2235,7 @@ public class BlueprintConfigurationProcessor {
*/
private static class OriginalValuePropertyUpdater implements PropertyUpdater {
+ @Override
public String updateForClusterCreate(String propertyName,
String origValue,
Map<String, Map<String, String>> properties,
@@ -2950,6 +2956,49 @@ public class BlueprintConfigurationProcessor {
/**
+ * Sets the read-only properties for stack features & tools, overriding
+ * anything provided in the blueprint.
+ *
+ * @param configuration
+ * the configuration to update with values from the stack.
+ * @param configTypesUpdated
+ * the list of configuration types updated (cluster-env will be added
+ * to this).
+ * @throws ConfigurationTopologyException
+ */
+ private void setStackToolsAndFeatures(Configuration configuration, Set<String> configTypesUpdated)
+ throws ConfigurationTopologyException {
+ ConfigHelper configHelper = clusterTopology.getAmbariContext().getConfigHelper();
+ Stack stack = clusterTopology.getBlueprint().getStack();
+ String stackName = stack.getName();
+ String stackVersion = stack.getVersion();
+
+ StackId stackId = new StackId(stackName, stackVersion);
+
+ Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_NAME_PROPERTY,
+ ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY, ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
+ ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
+
+ try {
+ Map<String, Map<String, String>> defaultStackProperties = configHelper.getDefaultStackProperties(stackId);
+ Map<String,String> clusterEnvDefaultProperties = defaultStackProperties.get(CLUSTER_ENV_CONFIG_TYPE_NAME);
+
+ for( String property : properties ){
+ if (defaultStackProperties.containsKey(property)) {
+ configuration.setProperty(CLUSTER_ENV_CONFIG_TYPE_NAME, property,
+ clusterEnvDefaultProperties.get(property));
+
+ // make sure to include the configuration type as being updated
+ configTypesUpdated.add(CLUSTER_ENV_CONFIG_TYPE_NAME);
+ }
+ }
+ } catch( AmbariException ambariException ){
+ throw new ConfigurationTopologyException("Unable to retrieve the stack tools and features",
+ ambariException);
+ }
+ }
+
+ /**
* Ensure that the specified property exists.
* If not, set a default value.
*
@@ -3099,7 +3148,7 @@ public class BlueprintConfigurationProcessor {
@Override
public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
- return !(this.propertyConfigType.equals(configType) &&
+ return !(propertyConfigType.equals(configType) &&
this.propertyName.equals(propertyName));
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 93c02be..c4fce8a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -67,11 +67,13 @@ import org.apache.ambari.server.security.authorization.RoleAuthorization;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.RepositoryType;
import org.apache.ambari.server.state.RepositoryVersionState;
-import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceOsSpecific;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.repository.VersionDefinitionXml;
import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
@@ -83,6 +85,7 @@ import org.apache.commons.lang.math.NumberUtils;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
+import com.google.gson.Gson;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.persist.Transactional;
@@ -171,12 +174,20 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
@Inject
private static RepositoryVersionHelper repoVersionHelper;
-
+ @Inject
+ private static Gson gson;
@Inject
private static Provider<Clusters> clusters;
/**
+ * Used for updating the existing stack tools with those of the stack being
+ * distributed.
+ */
+ @Inject
+ private static Provider<ConfigHelper> configHelperProvider;
+
+ /**
* Constructor.
*/
public ClusterStackVersionResourceProvider(
@@ -287,8 +298,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
String clName;
final String desiredRepoVersion;
- String stackName;
- String stackVersion;
Map<String, Object> propertyMap = iterator.next();
@@ -327,30 +336,30 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
cluster.getClusterName(), entity.getDirection().getText(false)));
}
- Set<StackId> stackIds = new HashSet<>();
- if (propertyMap.containsKey(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID) &&
- propertyMap.containsKey(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID)) {
- stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
- stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
- StackId stackId = new StackId(stackName, stackVersion);
- if (! ami.isSupportedStack(stackName, stackVersion)) {
- throw new NoSuchParentResourceException(String.format("Stack %s is not supported",
- stackId));
- }
- stackIds.add(stackId);
- } else { // Using stack that is current for cluster
- for (Service service : cluster.getServices().values()) {
- stackIds.add(service.getDesiredStackId());
- }
+ String stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
+ String stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+ if (StringUtils.isBlank(stackName) || StringUtils.isBlank(stackVersion)) {
+ String message = String.format(
+ "Both the %s and %s properties are required when distributing a new stack",
+ CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+
+ throw new SystemException(message);
}
- if (stackIds.size() > 1) {
- throw new SystemException("Could not determine stack to add out of " + StringUtils.join(stackIds, ','));
+ StackId stackId = new StackId(stackName, stackVersion);
+
+ if (!ami.isSupportedStack(stackName, stackVersion)) {
+ throw new NoSuchParentResourceException(String.format("Stack %s is not supported", stackId));
}
- StackId stackId = stackIds.iterator().next();
- stackName = stackId.getStackName();
- stackVersion = stackId.getStackVersion();
+ // bootstrap the stack tools if necessary for the stack which is being
+ // distributed
+ try {
+ bootstrapStackTools(stackId, cluster);
+ } catch (AmbariException ambariException) {
+ throw new SystemException("Unable to modify stack tools for new stack being distributed",
+ ambariException);
+ }
RepositoryVersionEntity repoVersionEntity = repositoryVersionDAO.findByStackAndVersion(
stackId, desiredRepoVersion);
@@ -580,6 +589,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
}
// determine packages for all services that are installed on host
+ List<ServiceOsSpecific.Package> packages = new ArrayList<>();
Set<String> servicesOnHost = new HashSet<>();
List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
for (ServiceComponentHost component : components) {
@@ -600,16 +610,15 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
RequestResourceFilter filter = new RequestResourceFilter(null, null,
Collections.singletonList(host.getHostName()));
- ActionExecutionContext actionContext = new ActionExecutionContext(
- cluster.getClusterName(), INSTALL_PACKAGES_ACTION,
- Collections.singletonList(filter),
- roleParams);
+ ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
+ INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), roleParams);
+
+ actionContext.setStackId(stackId);
actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
repoVersionHelper.addCommandRepository(actionContext, osFamily, repoVersion, repoInfo);
return actionContext;
-
}
@@ -698,4 +707,100 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
}
+ /**
+ * Ensures that the stack tools and stack features are set on
+ * {@link ConfigHelper#CLUSTER_ENV} for the stack of the repository being
+ * distributed. This step ensures that the new repository can be distributed
+ * with the correct tools.
+ * <p/>
+ * If the cluster's current stack name matches that of the new stack or the
+ * new stack's tools are already added in the configuration, then this method
+ * will not change anything.
+ *
+ * @param stackId
+ * the stack of the repository being distributed (not {@code null}).
+ * @param cluster
+ * the cluster the new stack/repo is being distributed for (not
+ * {@code null}).
+ * @throws AmbariException
+ */
+ private void bootstrapStackTools(StackId stackId, Cluster cluster) throws AmbariException {
+ // if the stack name is the same as the cluster's current stack name, then
+ // there's no work to do
+ if (StringUtils.equals(stackId.getStackName(),
+ cluster.getCurrentStackVersion().getStackName())) {
+ return;
+ }
+
+ ConfigHelper configHelper = configHelperProvider.get();
+
+ // get the stack tools/features for the stack being distributed
+ Map<String, Map<String, String>> defaultStackConfigurationsByType = configHelper.getDefaultStackProperties(stackId);
+
+ Map<String, String> clusterEnvDefaults = defaultStackConfigurationsByType.get(
+ ConfigHelper.CLUSTER_ENV);
+
+ Config clusterEnv = cluster.getDesiredConfigByType(ConfigHelper.CLUSTER_ENV);
+ Map<String, String> clusterEnvProperties = clusterEnv.getProperties();
+
+ // the 3 properties we need to check and update
+ Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY,
+ ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
+ ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
+
+ // any updates are stored here and merged into the existing config type
+ Map<String, String> updatedProperties = new HashMap<>();
+
+ for (String property : properties) {
+ // determine if the property exists in the stack being distributed (it
+ // kind of has to, but we'll be safe if it's not found)
+ String newStackDefaultJson = clusterEnvDefaults.get(property);
+ if (StringUtils.isBlank(newStackDefaultJson)) {
+ continue;
+ }
+
+ String existingPropertyJson = clusterEnvProperties.get(property);
+
+ // if the stack tools/features property doesn't exist, then just set the
+ // one from the new stack
+ if (StringUtils.isBlank(existingPropertyJson)) {
+ updatedProperties.put(property, newStackDefaultJson);
+ continue;
+ }
+
+ // now is the hard part - we need to check to see if the new stack tools
+ // exists alongside the current tools and if it doesn't, then add the new
+ // tools in
+ final Map<String, Object> existingJson;
+ final Map<String, ?> newStackJsonAsObject;
+ if (StringUtils.equals(property, ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY)) {
+ existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson, Map.class);
+ newStackJsonAsObject = gson.<Map<String, String>> fromJson(newStackDefaultJson, Map.class);
+ } else {
+ existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson,
+ Map.class);
+
+ newStackJsonAsObject = gson.<Map<String, Map<Object, Object>>> fromJson(newStackDefaultJson,
+ Map.class);
+ }
+
+ if (existingJson.keySet().contains(stackId.getStackName())) {
+ continue;
+ }
+
+ existingJson.put(stackId.getStackName(), newStackJsonAsObject.get(stackId.getStackName()));
+
+ String newJson = gson.toJson(existingJson);
+ updatedProperties.put(property, newJson);
+ }
+
+ if (!updatedProperties.isEmpty()) {
+ AmbariManagementController amc = getManagementController();
+ String serviceNote = String.format(
+ "Adding stack tools for %s while distributing a new repository", stackId.toString());
+
+ configHelper.updateConfigType(cluster, stackId, amc, clusterEnv.getType(), updatedProperties,
+ null, amc.getAuthName(), serviceNote);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 9f75bf9..a3a676d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -88,8 +88,10 @@ public class ConfigHelper {
public static final String CLUSTER_ENV_RETRY_COMMANDS = "commands_to_retry";
public static final String CLUSTER_ENV_RETRY_MAX_TIME_IN_SEC = "command_retry_max_time_in_sec";
public static final String COMMAND_RETRY_MAX_TIME_IN_SEC_DEFAULT = "600";
+ public static final String CLUSTER_ENV_STACK_NAME_PROPERTY = "stack_name";
public static final String CLUSTER_ENV_STACK_FEATURES_PROPERTY = "stack_features";
public static final String CLUSTER_ENV_STACK_TOOLS_PROPERTY = "stack_tools";
+ public static final String CLUSTER_ENV_STACK_ROOT_PROPERTY = "stack_root";
public static final String HTTP_ONLY = "HTTP_ONLY";
public static final String HTTPS_ONLY = "HTTPS_ONLY";
@@ -1148,6 +1150,36 @@ public class ConfigHelper {
*
* @param stack
* the stack to pull stack-values from (not {@code null})
+ * @return a mapping of configuration type to map of key/value pairs for the
+ * default configurations.
+ * @throws AmbariException
+ */
+ public Map<String, Map<String, String>> getDefaultStackProperties(StackId stack)
+ throws AmbariException {
+ Map<String, Map<String, String>> defaultPropertiesByType = new HashMap<>();
+
+ // populate the stack (non-service related) properties
+ Set<org.apache.ambari.server.state.PropertyInfo> stackConfigurationProperties = ambariMetaInfo.getStackProperties(
+ stack.getStackName(), stack.getStackVersion());
+
+ for (PropertyInfo stackDefaultProperty : stackConfigurationProperties) {
+ String type = ConfigHelper.fileNameToConfigType(stackDefaultProperty.getFilename());
+
+ if (!defaultPropertiesByType.containsKey(type)) {
+ defaultPropertiesByType.put(type, new HashMap<String, String>());
+ }
+
+ defaultPropertiesByType.get(type).put(stackDefaultProperty.getName(),
+ stackDefaultProperty.getValue());
+ }
+
+ return defaultPropertiesByType;
+ }
+
+ /**
+ *
+ * @param stack
+ * the stack to pull stack-values from (not {@code null})
* @param serviceName
* the service name {@code null}).
* @return a mapping of configuration type to map of key/value pairs for the
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 0467b9b..9b64edc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -69,6 +69,7 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.DesiredConfig;
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.SecurityType;
@@ -80,6 +81,7 @@ import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
+import com.google.inject.Provider;
/**
@@ -100,6 +102,12 @@ public class AmbariContext {
@Inject
ConfigFactory configFactory;
+ /**
+ * Used for getting configuration property values from stack and services.
+ */
+ @Inject
+ private Provider<ConfigHelper> configHelper;
+
private static AmbariManagementController controller;
private static ClusterController clusterController;
//todo: task id's. Use existing mechanism for getting next task id sequence
@@ -674,6 +682,16 @@ public class AmbariContext {
return String.format("%s:%s", bpName, hostGroupName);
}
+ /**
+ * Gets an instance of {@link ConfigHelper} for classes which are not
+ * dependency injected.
+ *
+ * @return a {@link ConfigHelper} instance.
+ */
+ public ConfigHelper getConfigHelper() {
+ return configHelper.get();
+ }
+
private synchronized HostResourceProvider getHostResourceProvider() {
if (hostResourceProvider == null) {
hostResourceProvider = (HostResourceProvider)
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 74f8f35..fa3aea3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -18,10 +18,20 @@
package org.apache.ambari.server.upgrade;
import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.hadoop.metrics2.sink.relocated.commons.lang.StringUtils;
+import com.google.common.collect.Sets;
import com.google.inject.Inject;
import com.google.inject.Injector;
@@ -33,6 +43,8 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
static final String CLUSTERCONFIG_TABLE = "clusterconfig";
static final String SERVICE_DELETED_COLUMN = "service_deleted";
+ private static final String CLUSTER_ENV = "cluster-env";
+
/**
* Constructor.
*
@@ -79,6 +91,7 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
*/
@Override
protected void executeDMLUpdates() throws AmbariException, SQLException {
+ resetStackToolsAndFeatures();
}
/**
@@ -91,4 +104,52 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
dbAccessor.addColumn(CLUSTERCONFIG_TABLE,
new DBColumnInfo(SERVICE_DELETED_COLUMN, Short.class, null, 0, false));
}
+
+ /**
+ * Resets the following properties in {@code cluster-env} to their new
+ * defaults:
+ * <ul>
+ * <li>stack_root
+ * <li>stack_tools
+ * <li>stack_features
+ * <ul>
+ *
+ * @throws AmbariException
+ */
+ private void resetStackToolsAndFeatures() throws AmbariException {
+ Set<String> propertiesToReset = Sets.newHashSet("stack_tools", "stack_features", "stack_root");
+
+ Clusters clusters = injector.getInstance(Clusters.class);
+ ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
+
+ Map<String, Cluster> clusterMap = clusters.getClusters();
+ for (Cluster cluster : clusterMap.values()) {
+ Config clusterEnv = cluster.getDesiredConfigByType(CLUSTER_ENV);
+ if (null == clusterEnv) {
+ continue;
+ }
+
+ Map<String, String> newStackProperties = new HashMap<>();
+ Set<PropertyInfo> stackProperties = configHelper.getStackProperties(cluster);
+ if (null == stackProperties) {
+ continue;
+ }
+
+ for (PropertyInfo propertyInfo : stackProperties) {
+ String fileName = propertyInfo.getFilename();
+ if (StringUtils.isEmpty(fileName)) {
+ continue;
+ }
+
+ if (StringUtils.equals(ConfigHelper.fileNameToConfigType(fileName), CLUSTER_ENV)) {
+ String stackPropertyName = propertyInfo.getName();
+ if (propertiesToReset.contains(stackPropertyName)) {
+ newStackProperties.put(stackPropertyName, propertyInfo.getValue());
+ }
+ }
+ }
+
+ updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, newStackProperties, true, false);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
index 32df7d3..5b4fd68 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
@@ -27,6 +27,7 @@ import logging
from resource_management.core import global_lock
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import stack_tools
from resource_management.core.resources import Execute
from resource_management.core.signal_utils import TerminateStrategy
from ambari_commons.os_check import OSConst
@@ -56,6 +57,7 @@ SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
SMOKEUSER_DEFAULT = 'ambari-qa'
+STACK_NAME = '{{cluster-env/stack_name}}'
STACK_ROOT = '{{cluster-env/stack_root}}'
HIVE_CONF_DIR_LEGACY = '/etc/hive/conf.server'
@@ -78,7 +80,7 @@ def get_tokens():
"""
return (SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
HIVE_METASTORE_URIS_KEY, SMOKEUSER_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
- STACK_ROOT)
+ STACK_NAME, STACK_ROOT)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def get_tokens():
@@ -175,9 +177,10 @@ def execute(configurations={}, parameters={}, host_name=None):
bin_dir = HIVE_BIN_DIR_LEGACY
- if STACK_ROOT in configurations:
- hive_conf_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/conf")
- hive_bin_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/bin")
+ if STACK_NAME in configurations and STACK_ROOT in configurations:
+ stack_root = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
+ hive_conf_dir = stack_root + format("/current/hive-metastore/conf")
+ hive_bin_dir = stack_root + format("/current/hive-metastore/bin")
if os.path.exists(hive_conf_dir):
conf_dir = hive_conf_dir
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
index 98d1899..e46c896 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
@@ -26,7 +26,7 @@ import subprocess
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
-from ambari_commons.os_check import OSConst
+from resource_management.libraries.functions import stack_tools
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from resource_management.core import shell
from resource_management.core.resources import Execute
@@ -58,6 +58,7 @@ HIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
HIVE_USER_KEY = '{{hive-env/hive_user}}'
HIVE_USER_DEFAULT = 'default.smoke.user'
+STACK_NAME = '{{cluster-env/stack_name}}'
STACK_ROOT = '{{cluster-env/stack_root}}'
STACK_ROOT_DEFAULT = Script.get_stack_root()
@@ -88,7 +89,7 @@ def get_tokens():
to build the dictionary passed into execute
"""
return (SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_PRINCIPAL_KEY, HIVE_PRINCIPAL_KEYTAB_KEY,
- HIVE_USER_KEY, STACK_ROOT, LLAP_APP_NAME_KEY)
+ HIVE_USER_KEY, STACK_NAME, STACK_ROOT, LLAP_APP_NAME_KEY)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -159,8 +160,11 @@ def execute(configurations={}, parameters={}, host_name=None):
start_time = time.time()
- if STACK_ROOT in configurations:
- llap_status_cmd = configurations[STACK_ROOT] + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
+ if STACK_NAME in configurations and STACK_ROOT in configurations:
+ stack_root = stack_tools.get_stack_root(configurations[STACK_NAME],
+ configurations[STACK_ROOT])
+
+ llap_status_cmd = stack_root + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
else:
llap_status_cmd = STACK_ROOT_DEFAULT + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
index 0e9fe74..54eef18 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
@@ -26,6 +26,7 @@ from resource_management.core.resources import Execute
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import get_klist_path
+from resource_management.libraries.functions import stack_tools
from ambari_commons.os_check import OSConst, OSCheck
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from urlparse import urlparse
@@ -66,6 +67,7 @@ USER_PRINCIPAL_DEFAULT = 'oozie@EXAMPLE.COM'
# default user
USER_DEFAULT = 'oozie'
+STACK_NAME_KEY = '{{cluster-env/stack_name}}'
STACK_ROOT_KEY = '{{cluster-env/stack_root}}'
STACK_ROOT_DEFAULT = '/usr/hdp'
@@ -86,7 +88,7 @@ def get_tokens():
to build the dictionary passed into execute
"""
return (OOZIE_URL_KEY, USER_PRINCIPAL_KEY, SECURITY_ENABLED, USER_KEYTAB_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
- USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_ROOT_KEY)
+ USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_NAME_KEY, STACK_ROOT_KEY)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def get_check_command(oozie_url, host_name, configurations):
@@ -158,8 +160,8 @@ def get_check_command(oozie_url, host_name, configurations, parameters, only_kin
# Configure stack root
stack_root = STACK_ROOT_DEFAULT
- if STACK_ROOT_KEY in configurations:
- stack_root = configurations[STACK_ROOT_KEY].lower()
+ if STACK_NAME_KEY in configurations and STACK_ROOT_KEY in configurations:
+ stack_root = stack_tools.get_stack_root(configurations[STACK_NAME_KEY], configurations[STACK_ROOT_KEY]).lower()
# oozie configuration directory using a symlink
oozie_config_directory = OOZIE_CONF_DIR.replace(STACK_ROOT_PATTERN, stack_root)
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
index 4c5834f..f3c6406 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
@@ -23,6 +23,7 @@ import os
import platform
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
+from resource_management.libraries.functions import stack_tools
DiskInfo = collections.namedtuple('DiskInfo', 'total used free path')
@@ -36,6 +37,7 @@ MIN_FREE_SPACE_DEFAULT = 5000000000L
PERCENT_USED_WARNING_DEFAULT = 50
PERCENT_USED_CRITICAL_DEFAULT = 80
+STACK_NAME = '{{cluster-env/stack_name}}'
STACK_ROOT = '{{cluster-env/stack_root}}'
def get_tokens():
@@ -43,7 +45,7 @@ def get_tokens():
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
- return (STACK_ROOT, )
+ return (STACK_NAME, STACK_ROOT)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -64,10 +66,10 @@ def execute(configurations={}, parameters={}, host_name=None):
if configurations is None:
return (('UNKNOWN', ['There were no configurations supplied to the script.']))
- if not STACK_ROOT in configurations:
- return (('STACK_ROOT', ['cluster-env/stack_root is not specified']))
+ if not STACK_NAME in configurations or not STACK_ROOT in configurations:
+ return (('STACK_ROOT', ['cluster-env/stack_name and cluster-env/stack_root are required']))
- path = configurations[STACK_ROOT]
+ path = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
try:
disk_usage = _get_disk_usage(path)
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/host_scripts/alert_version_select.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_version_select.py b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
index 0ce79e7..f54ccad 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_version_select.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
@@ -31,6 +31,7 @@ RESULT_STATE_WARNING = 'WARNING'
RESULT_STATE_CRITICAL = 'CRITICAL'
RESULT_STATE_UNKNOWN = 'UNKNOWN'
+STACK_NAME = '{{cluster-env/stack_name}}'
STACK_TOOLS = '{{cluster-env/stack_tools}}'
@@ -42,7 +43,7 @@ def get_tokens():
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
- return (STACK_TOOLS,)
+ return (STACK_NAME, STACK_TOOLS)
def execute(configurations={}, parameters={}, host_name=None):
@@ -65,8 +66,10 @@ def execute(configurations={}, parameters={}, host_name=None):
if STACK_TOOLS not in configurations:
return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(STACK_TOOLS)])
+ stack_name = Script.get_stack_name()
+
# Of the form,
- # { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] }
+ # { "HDP" : { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] } }
stack_tools_str = configurations[STACK_TOOLS]
if stack_tools_str is None:
@@ -75,6 +78,7 @@ def execute(configurations={}, parameters={}, host_name=None):
distro_select = "unknown-distro-select"
try:
stack_tools = json.loads(stack_tools_str)
+ stack_tools = stack_tools[stack_name]
distro_select = stack_tools["stack_selector"][0]
except:
pass
@@ -87,18 +91,18 @@ def execute(configurations={}, parameters={}, host_name=None):
(code, out, versions) = unsafe_get_stack_versions()
if code == 0:
- msg.append("Ok. {0}".format(distro_select))
+ msg.append("{0} ".format(distro_select))
if versions is not None and type(versions) is list and len(versions) > 0:
- msg.append("Versions: {0}".format(", ".join(versions)))
+ msg.append("reported the following versions: {0}".format(", ".join(versions)))
return (RESULT_STATE_OK, ["\n".join(msg)])
else:
- msg.append("Failed, check dir {0} for unexpected contents.".format(stack_root_dir))
+ msg.append("{0} could not properly read {1}. Check this directory for unexpected contents.".format(distro_select, stack_root_dir))
if out is not None:
msg.append(out)
return (RESULT_STATE_CRITICAL, ["\n".join(msg)])
else:
- msg.append("Ok. No stack root {0} to check.".format(stack_root_dir))
+ msg.append("No stack root {0} to check.".format(stack_root_dir))
return (RESULT_STATE_OK, ["\n".join(msg)])
except Exception, e:
return (RESULT_STATE_CRITICAL, [e.message])
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index f7d5de5..e6ec285 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -220,6 +220,18 @@ gpgcheck=0</value>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
+ <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+ <property>
+ <name>stack_name</name>
+ <value>HDP</value>
+ <description>The name of the stack.</description>
+ <value-attributes>
+ <read-only>true</read-only>
+ <overridable>false</overridable>
+ <visible>false</visible>
+ </value-attributes>
+ <on-ambari-upgrade add="true"/>
+ </property>
<!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
<property>
<name>stack_tools</name>
@@ -252,8 +264,8 @@ gpgcheck=0</value>
</property>
<property>
<name>stack_root</name>
- <value>/usr/hdp</value>
- <description>Stack root folder</description>
+ <value>{"HDP":"/usr/hdp"}</value>
+ <description>JSON which defines the stack root by stack name</description>
<value-attributes>
<read-only>true</read-only>
<overridable>false</overridable>
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index 878645b..31cf0c8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -1,427 +1,429 @@
{
- "stack_features": [
- {
- "name": "snappy",
- "description": "Snappy compressor/decompressor support",
- "min_version": "2.0.0.0",
- "max_version": "2.2.0.0"
- },
- {
- "name": "lzo",
- "description": "LZO libraries support",
- "min_version": "2.2.1.0"
- },
- {
- "name": "express_upgrade",
- "description": "Express upgrade support",
- "min_version": "2.1.0.0"
- },
- {
- "name": "rolling_upgrade",
- "description": "Rolling upgrade support",
- "min_version": "2.2.0.0"
- },
- {
- "name": "kafka_acl_migration_support",
- "description": "ACL migration support",
- "min_version": "2.3.4.0"
- },
- {
- "name": "secure_zookeeper",
- "description": "Protect ZNodes with SASL acl in secure clusters",
- "min_version": "2.6.0.0"
- },
- {
- "name": "config_versioning",
- "description": "Configurable versions support",
- "min_version": "2.3.0.0"
- },
- {
- "name": "datanode_non_root",
- "description": "DataNode running as non-root support (AMBARI-7615)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "remove_ranger_hdfs_plugin_env",
- "description": "HDFS removes Ranger env files (AMBARI-14299)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "ranger",
- "description": "Ranger Service support",
- "min_version": "2.2.0.0"
- },
- {
- "name": "ranger_tagsync_component",
- "description": "Ranger Tagsync component support (AMBARI-14383)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "phoenix",
- "description": "Phoenix Service support",
- "min_version": "2.3.0.0"
- },
- {
- "name": "nfs",
- "description": "NFS support",
- "min_version": "2.3.0.0"
- },
- {
- "name": "tez_for_spark",
- "description": "Tez dependency for Spark",
- "min_version": "2.2.0.0",
- "max_version": "2.3.0.0"
- },
- {
- "name": "timeline_state_store",
- "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "copy_tarball_to_hdfs",
- "description": "Copy tarball to HDFS support (AMBARI-12113)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "spark_16plus",
- "description": "Spark 1.6+",
- "min_version": "2.4.0.0"
- },
- {
- "name": "spark_thriftserver",
- "description": "Spark Thrift Server",
- "min_version": "2.3.2.0"
- },
- {
- "name": "storm_kerberos",
- "description": "Storm Kerberos support (AMBARI-7570)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "storm_ams",
- "description": "Storm AMS integration (AMBARI-10710)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "create_kafka_broker_id",
- "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
- "min_version": "2.2.0.0",
- "max_version": "2.3.0.0"
- },
- {
- "name": "kafka_listeners",
- "description": "Kafka listeners (AMBARI-10984)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "kafka_kerberos",
- "description": "Kafka Kerberos support (AMBARI-10984)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "pig_on_tez",
- "description": "Pig on Tez support (AMBARI-7863)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "ranger_usersync_non_root",
- "description": "Ranger Usersync as non-root user (AMBARI-10416)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "ranger_audit_db_support",
- "description": "Ranger Audit to DB support",
- "min_version": "2.2.0.0",
- "max_version": "2.4.99.99"
- },
- {
- "name": "accumulo_kerberos_user_auth",
- "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "knox_versioned_data_dir",
- "description": "Use versioned data dir for Knox (AMBARI-13164)",
- "min_version": "2.3.2.0"
- },
- {
- "name": "knox_sso_topology",
- "description": "Knox SSO Topology support (AMBARI-13975)",
- "min_version": "2.3.8.0"
- },
- {
- "name": "atlas_rolling_upgrade",
- "description": "Rolling upgrade support for Atlas",
- "min_version": "2.3.0.0"
- },
- {
- "name": "oozie_admin_user",
- "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "oozie_create_hive_tez_configs",
- "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "oozie_setup_shared_lib",
- "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "oozie_host_kerberos",
- "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
- "min_version": "2.0.0.0"
- },
- {
- "name": "falcon_extensions",
- "description": "Falcon Extension",
- "min_version": "2.5.0.0"
- },
- {
- "name": "hive_metastore_upgrade_schema",
- "description": "Hive metastore upgrade schema support (AMBARI-11176)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "hive_server_interactive",
- "description": "Hive server interactive support (AMBARI-15573)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "hive_webhcat_specific_configs",
- "description": "Hive webhcat specific configurations support (AMBARI-12364)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "hive_purge_table",
- "description": "Hive purge table support (AMBARI-12260)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "hive_server2_kerberized_env",
- "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
- "min_version": "2.2.3.0",
- "max_version": "2.2.5.0"
- },
- {
- "name": "hive_env_heapsize",
- "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "ranger_kms_hsm_support",
- "description": "Ranger KMS HSM support (AMBARI-15752)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_log4j_support",
- "description": "Ranger supporting log-4j properties (AMBARI-15681)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_kerberos_support",
- "description": "Ranger Kerberos support",
- "min_version": "2.5.0.0"
- },
- {
- "name": "hive_metastore_site_support",
- "description": "Hive Metastore site support",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_usersync_password_jceks",
- "description": "Saving Ranger Usersync credentials in jceks",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_install_infra_client",
- "description": "Ambari Infra Service support",
- "min_version": "2.5.0.0"
- },
- {
- "name": "falcon_atlas_support_2_3",
- "description": "Falcon Atlas integration support for 2.3 stack",
- "min_version": "2.3.99.0",
- "max_version": "2.4.0.0"
- },
- {
- "name": "falcon_atlas_support",
- "description": "Falcon Atlas integration",
- "min_version": "2.5.0.0"
- },
- {
- "name": "hbase_home_directory",
- "description": "Hbase home directory in HDFS needed for HBASE backup",
- "min_version": "2.5.0.0"
- },
- {
- "name": "spark_livy",
- "description": "Livy as slave component of spark",
- "min_version": "2.5.0.0"
- },
- {
- "name": "spark_livy2",
- "description": "Livy as slave component of spark",
- "min_version": "2.6.0.0"
- },
- {
- "name": "atlas_ranger_plugin_support",
- "description": "Atlas Ranger plugin support",
- "min_version": "2.5.0.0"
- },
- {
- "name": "atlas_conf_dir_in_path",
- "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
- "min_version": "2.3.0.0",
- "max_version": "2.4.99.99"
- },
- {
- "name": "atlas_upgrade_support",
- "description": "Atlas supports express and rolling upgrades",
- "min_version": "2.5.0.0"
- },
- {
- "name": "atlas_hook_support",
- "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_pid_support",
- "description": "Ranger Service support pid generation AMBARI-16756",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_kms_pid_support",
- "description": "Ranger KMS Service support pid generation",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_admin_password_change",
- "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_setup_db_on_start",
- "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
- "min_version": "2.6.0.0"
- },
- {
- "name": "storm_metrics_apache_classes",
- "description": "Metrics sink for Storm that uses Apache class names",
- "min_version": "2.5.0.0"
- },
- {
- "name": "spark_java_opts_support",
- "description": "Allow Spark to generate java-opts file",
- "min_version": "2.2.0.0",
- "max_version": "2.4.0.0"
- },
- {
- "name": "atlas_hbase_setup",
- "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_hive_plugin_jdbc_url",
- "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "zkfc_version_advertised",
- "description": "ZKFC advertise version",
- "min_version": "2.5.0.0"
- },
- {
- "name": "phoenix_core_hdfs_site_required",
- "description": "HDFS and CORE site required for Phoenix",
- "max_version": "2.5.9.9"
- },
- {
- "name": "ranger_tagsync_ssl_xml_support",
- "description": "Ranger Tagsync ssl xml support.",
- "min_version": "2.6.0.0"
- },
- {
- "name": "ranger_xml_configuration",
- "description": "Ranger code base support xml configurations",
- "min_version": "2.3.0.0"
- },
- {
- "name": "kafka_ranger_plugin_support",
- "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "yarn_ranger_plugin_support",
- "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "ranger_solr_config_support",
- "description": "Showing Ranger solrconfig.xml on UI",
- "min_version": "2.6.0.0"
- },
- {
- "name": "hive_interactive_atlas_hook_required",
- "description": "Registering Atlas Hook for Hive Interactive.",
- "min_version": "2.6.0.0"
- },
- {
- "name": "core_site_for_ranger_plugins",
- "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
- "min_version": "2.6.0.0"
- },
- {
- "name": "atlas_install_hook_package_support",
- "description": "Stop installing packages from 2.6",
- "max_version": "2.5.9.9"
- },
- {
- "name": "atlas_hdfs_site_on_namenode_ha",
- "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
- "min_version": "2.6.0.0"
- },
- {
- "name": "hive_interactive_ga",
- "description": "Hive Interactive GA support",
- "min_version": "2.6.0.0"
- },
- {
- "name": "secure_ranger_ssl_password",
- "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
- "min_version": "2.6.0.0"
- },
- {
- "name": "ranger_kms_ssl",
- "description": "Ranger KMS SSL properties in ambari stack",
- "min_version": "2.6.0.0"
- },
- {
- "name": "nifi_encrypt_config",
- "description": "Encrypt sensitive properties written to nifi property file",
- "min_version": "2.6.0.0"
- },
- {
- "name": "toolkit_config_update",
- "description": "Support separate input and output for toolkit configuration",
- "min_version": "2.6.0.0"
- },
- {
- "name": "admin_toolkit_support",
- "description": "Supports the nifi admin toolkit",
- "min_version": "2.6.0.0"
- },
- {
- "name": "tls_toolkit_san",
- "description": "Support subject alternative name flag",
- "min_version": "2.6.0.0"
- },
- {
- "name": "nifi_jaas_conf_create",
- "description": "Create NIFI jaas configuration when kerberos is enabled",
- "min_version": "2.6.0.0"
- }
- ]
+ "HDP": {
+ "stack_features": [
+ {
+ "name": "snappy",
+ "description": "Snappy compressor/decompressor support",
+ "min_version": "2.0.0.0",
+ "max_version": "2.2.0.0"
+ },
+ {
+ "name": "lzo",
+ "description": "LZO libraries support",
+ "min_version": "2.2.1.0"
+ },
+ {
+ "name": "express_upgrade",
+ "description": "Express upgrade support",
+ "min_version": "2.1.0.0"
+ },
+ {
+ "name": "rolling_upgrade",
+ "description": "Rolling upgrade support",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "kafka_acl_migration_support",
+ "description": "ACL migration support",
+ "min_version": "2.3.4.0"
+ },
+ {
+ "name": "secure_zookeeper",
+ "description": "Protect ZNodes with SASL acl in secure clusters",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "config_versioning",
+ "description": "Configurable versions support",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "datanode_non_root",
+ "description": "DataNode running as non-root support (AMBARI-7615)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "remove_ranger_hdfs_plugin_env",
+ "description": "HDFS removes Ranger env files (AMBARI-14299)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "ranger",
+ "description": "Ranger Service support",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "ranger_tagsync_component",
+ "description": "Ranger Tagsync component support (AMBARI-14383)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "phoenix",
+ "description": "Phoenix Service support",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "nfs",
+ "description": "NFS support",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "tez_for_spark",
+ "description": "Tez dependency for Spark",
+ "min_version": "2.2.0.0",
+ "max_version": "2.3.0.0"
+ },
+ {
+ "name": "timeline_state_store",
+ "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "copy_tarball_to_hdfs",
+ "description": "Copy tarball to HDFS support (AMBARI-12113)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "spark_16plus",
+ "description": "Spark 1.6+",
+ "min_version": "2.4.0.0"
+ },
+ {
+ "name": "spark_thriftserver",
+ "description": "Spark Thrift Server",
+ "min_version": "2.3.2.0"
+ },
+ {
+ "name": "storm_kerberos",
+ "description": "Storm Kerberos support (AMBARI-7570)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "storm_ams",
+ "description": "Storm AMS integration (AMBARI-10710)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "create_kafka_broker_id",
+ "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+ "min_version": "2.2.0.0",
+ "max_version": "2.3.0.0"
+ },
+ {
+ "name": "kafka_listeners",
+ "description": "Kafka listeners (AMBARI-10984)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "kafka_kerberos",
+ "description": "Kafka Kerberos support (AMBARI-10984)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "pig_on_tez",
+ "description": "Pig on Tez support (AMBARI-7863)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "ranger_usersync_non_root",
+ "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "ranger_audit_db_support",
+ "description": "Ranger Audit to DB support",
+ "min_version": "2.2.0.0",
+ "max_version": "2.4.99.99"
+ },
+ {
+ "name": "accumulo_kerberos_user_auth",
+ "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "knox_versioned_data_dir",
+ "description": "Use versioned data dir for Knox (AMBARI-13164)",
+ "min_version": "2.3.2.0"
+ },
+ {
+ "name": "knox_sso_topology",
+ "description": "Knox SSO Topology support (AMBARI-13975)",
+ "min_version": "2.3.8.0"
+ },
+ {
+ "name": "atlas_rolling_upgrade",
+ "description": "Rolling upgrade support for Atlas",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "oozie_admin_user",
+ "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "oozie_create_hive_tez_configs",
+ "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "oozie_setup_shared_lib",
+ "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "oozie_host_kerberos",
+ "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+ "min_version": "2.0.0.0"
+ },
+ {
+ "name": "falcon_extensions",
+ "description": "Falcon Extension",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hive_metastore_upgrade_schema",
+ "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "hive_server_interactive",
+ "description": "Hive server interactive support (AMBARI-15573)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hive_webhcat_specific_configs",
+ "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "hive_purge_table",
+ "description": "Hive purge table support (AMBARI-12260)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "hive_server2_kerberized_env",
+ "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+ "min_version": "2.2.3.0",
+ "max_version": "2.2.5.0"
+ },
+ {
+ "name": "hive_env_heapsize",
+ "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "ranger_kms_hsm_support",
+ "description": "Ranger KMS HSM support (AMBARI-15752)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_log4j_support",
+ "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_kerberos_support",
+ "description": "Ranger Kerberos support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hive_metastore_site_support",
+ "description": "Hive Metastore site support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_usersync_password_jceks",
+ "description": "Saving Ranger Usersync credentials in jceks",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_install_infra_client",
+ "description": "Ambari Infra Service support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "falcon_atlas_support_2_3",
+ "description": "Falcon Atlas integration support for 2.3 stack",
+ "min_version": "2.3.99.0",
+ "max_version": "2.4.0.0"
+ },
+ {
+ "name": "falcon_atlas_support",
+ "description": "Falcon Atlas integration",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hbase_home_directory",
+ "description": "Hbase home directory in HDFS needed for HBASE backup",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "spark_livy",
+ "description": "Livy as slave component of spark",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "spark_livy2",
+ "description": "Livy as slave component of spark",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "atlas_ranger_plugin_support",
+ "description": "Atlas Ranger plugin support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "atlas_conf_dir_in_path",
+ "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+ "min_version": "2.3.0.0",
+ "max_version": "2.4.99.99"
+ },
+ {
+ "name": "atlas_upgrade_support",
+ "description": "Atlas supports express and rolling upgrades",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "atlas_hook_support",
+ "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_pid_support",
+ "description": "Ranger Service support pid generation AMBARI-16756",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_kms_pid_support",
+ "description": "Ranger KMS Service support pid generation",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_admin_password_change",
+ "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_setup_db_on_start",
+ "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "storm_metrics_apache_classes",
+ "description": "Metrics sink for Storm that uses Apache class names",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "spark_java_opts_support",
+ "description": "Allow Spark to generate java-opts file",
+ "min_version": "2.2.0.0",
+ "max_version": "2.4.0.0"
+ },
+ {
+ "name": "atlas_hbase_setup",
+ "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_hive_plugin_jdbc_url",
+ "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "zkfc_version_advertised",
+ "description": "ZKFC advertise version",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "phoenix_core_hdfs_site_required",
+ "description": "HDFS and CORE site required for Phoenix",
+ "max_version": "2.5.9.9"
+ },
+ {
+ "name": "ranger_tagsync_ssl_xml_support",
+ "description": "Ranger Tagsync ssl xml support.",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "ranger_xml_configuration",
+ "description": "Ranger code base support xml configurations",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "kafka_ranger_plugin_support",
+ "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "yarn_ranger_plugin_support",
+ "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "ranger_solr_config_support",
+ "description": "Showing Ranger solrconfig.xml on UI",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "hive_interactive_atlas_hook_required",
+ "description": "Registering Atlas Hook for Hive Interactive.",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "core_site_for_ranger_plugins",
+ "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "atlas_install_hook_package_support",
+ "description": "Stop installing packages from 2.6",
+ "max_version": "2.5.9.9"
+ },
+ {
+ "name": "atlas_hdfs_site_on_namenode_ha",
+ "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "hive_interactive_ga",
+ "description": "Hive Interactive GA support",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "secure_ranger_ssl_password",
+ "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "ranger_kms_ssl",
+ "description": "Ranger KMS SSL properties in ambari stack",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "nifi_encrypt_config",
+ "description": "Encrypt sensitive properties written to nifi property file",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "toolkit_config_update",
+ "description": "Support separate input and output for toolkit configuration",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "admin_toolkit_support",
+ "description": "Supports the nifi admin toolkit",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "tls_toolkit_san",
+ "description": "Support subject alternative name flag",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "nifi_jaas_conf_create",
+ "description": "Create NIFI jaas configuration when kerberos is enabled",
+ "min_version": "2.6.0.0"
+ }
+ ]
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
index d1aab4b..c515d57 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
@@ -1,4 +1,14 @@
{
- "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
- "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file
+ "HDP": {
+ "stack_selector": [
+ "hdp-select",
+ "/usr/bin/hdp-select",
+ "hdp-select"
+ ],
+ "conf_selector": [
+ "conf-select",
+ "/usr/bin/conf-select",
+ "conf-select"
+ ]
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
index 7df00ee..f19ac52 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
@@ -20,6 +20,18 @@
*/
-->
<configuration>
+ <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+ <property>
+ <name>stack_name</name>
+ <value>PERF</value>
+ <description>The name of the stack.</description>
+ <value-attributes>
+ <read-only>true</read-only>
+ <overridable>false</overridable>
+ <visible>false</visible>
+ </value-attributes>
+ <on-ambari-upgrade add="true"/>
+ </property>
<!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
<property>
@@ -55,8 +67,8 @@
<property>
<name>stack_root</name>
- <value>/usr/perf</value>
- <description>Stack root folder</description>
+ <value>{"PERF":"/usr/perf"}</value>
+ <description>JSON which defines the stack root by stack name</description>
<value-attributes>
<read-only>true</read-only>
<overridable>false</overridable>
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
index e9e0ed2..839e8e6 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
@@ -1,19 +1,21 @@
{
- "stack_features": [
- {
- "name": "rolling_upgrade",
- "description": "Rolling upgrade support",
- "min_version": "1.0.0.0"
- },
- {
- "name": "secure_zookeeper",
- "description": "Protect ZNodes with SASL acl in secure clusters",
- "min_version": "2.6.0.0"
- },
- {
- "name": "config_versioning",
- "description": "Configurable versions support",
- "min_version": "1.0.0.0"
- }
- ]
-}
+ "PERF": {
+ "stack_features": [
+ {
+ "name": "rolling_upgrade",
+ "description": "Rolling upgrade support",
+ "min_version": "1.0.0.0"
+ },
+ {
+ "name": "secure_zookeeper",
+ "description": "Protect ZNodes with SASL acl in secure clusters",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "config_versioning",
+ "description": "Configurable versions support",
+ "min_version": "1.0.0.0"
+ }
+ ]
+ }
+}
\ No newline at end of file
[05/18] ambari git commit: AMBARI-21430 - Allow Multiple Versions of
Stack Tools to Co-Exist (jonathanhurley)
Posted by rl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
index abe84ab..e5abe32 100644
--- a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
+++ b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
@@ -1,143 +1,143 @@
{
"localComponents": [
- "NAMENODE",
- "SECONDARY_NAMENODE",
- "ZOOKEEPER_SERVER",
- "DATANODE",
- "HDFS_CLIENT",
- "ZOOKEEPER_CLIENT",
- "RANGER_USERSYNC",
- "RANGER_ADMIN",
+ "NAMENODE",
+ "SECONDARY_NAMENODE",
+ "ZOOKEEPER_SERVER",
+ "DATANODE",
+ "HDFS_CLIENT",
+ "ZOOKEEPER_CLIENT",
+ "RANGER_USERSYNC",
+ "RANGER_ADMIN",
"RANGER_TAGSYNC",
"LOGSEARCH_SOLR",
"LOGSEARCH_SOLR_CLIENT"
- ],
+ ],
"configuration_attributes": {
- "ranger-hdfs-audit": {},
- "ssl-client": {},
- "ranger-admin-site": {},
- "ranger-hdfs-policymgr-ssl": {},
- "tagsync-application-properties": {},
- "ranger-env": {},
- "usersync-log4j": {},
- "admin-properties": {},
- "ranger-ugsync-site": {},
+ "ranger-hdfs-audit": {},
+ "ssl-client": {},
+ "ranger-admin-site": {},
+ "ranger-hdfs-policymgr-ssl": {},
+ "tagsync-application-properties": {},
+ "ranger-env": {},
+ "usersync-log4j": {},
+ "admin-properties": {},
+ "ranger-ugsync-site": {},
"hdfs-site": {
"final": {
- "dfs.datanode.data.dir": "true",
- "dfs.namenode.http-address": "true",
- "dfs.datanode.failed.volumes.tolerated": "true",
- "dfs.support.append": "true",
- "dfs.namenode.name.dir": "true",
+ "dfs.datanode.data.dir": "true",
+ "dfs.namenode.http-address": "true",
+ "dfs.datanode.failed.volumes.tolerated": "true",
+ "dfs.support.append": "true",
+ "dfs.namenode.name.dir": "true",
"dfs.webhdfs.enabled": "true"
}
- },
+ },
"ranger-tagsync-site": {},
"ranger-tagsync-policymgr-ssl": {},
"zoo.cfg": {},
"hadoop-policy": {},
- "hdfs-log4j": {},
- "ranger-hdfs-plugin-properties": {},
+ "hdfs-log4j": {},
+ "ranger-hdfs-plugin-properties": {},
"core-site": {
"final": {
"fs.defaultFS": "true"
}
- },
- "hadoop-env": {},
- "zookeeper-log4j": {},
- "ssl-server": {},
- "ranger-site": {},
- "admin-log4j": {},
- "tagsync-log4j": {},
- "ranger-hdfs-security": {},
+ },
+ "hadoop-env": {},
+ "zookeeper-log4j": {},
+ "ssl-server": {},
+ "ranger-site": {},
+ "admin-log4j": {},
+ "tagsync-log4j": {},
+ "ranger-hdfs-security": {},
"ranger-solr-configuration": {},
"usersync-properties": {},
"zookeeper-env": {},
"infra-solr-env": {},
"infra-solr-client-log4j": {},
"cluster-env": {}
- },
- "public_hostname": "c6401.ambari.apache.org",
- "commandId": "11-0",
- "hostname": "c6401.ambari.apache.org",
- "kerberosCommandParams": [],
- "serviceName": "RANGER",
- "role": "RANGER_ADMIN",
- "forceRefreshConfigTagsBeforeExecution": [],
- "requestId": 11,
+ },
+ "public_hostname": "c6401.ambari.apache.org",
+ "commandId": "11-0",
+ "hostname": "c6401.ambari.apache.org",
+ "kerberosCommandParams": [],
+ "serviceName": "RANGER",
+ "role": "RANGER_ADMIN",
+ "forceRefreshConfigTagsBeforeExecution": [],
+ "requestId": 11,
"agentConfigParams": {
"agent": {
"parallel_execution": 0
}
- },
- "clusterName": "c1",
- "commandType": "EXECUTION_COMMAND",
- "taskId": 31,
- "roleParams": {},
+ },
+ "clusterName": "c1",
+ "commandType": "EXECUTION_COMMAND",
+ "taskId": 31,
+ "roleParams": {},
"configurationTags": {
"ranger-hdfs-audit": {
"tag": "version1466705299922"
- },
+ },
"ssl-client": {
"tag": "version1"
- },
+ },
"ranger-admin-site": {
"tag": "version1466705299949"
- },
+ },
"ranger-hdfs-policymgr-ssl": {
"tag": "version1466705299922"
- },
+ },
"tagsync-application-properties": {
"tag": "version1466705299949"
- },
+ },
"ranger-env": {
"tag": "version1466705299949"
- },
+ },
"usersync-log4j": {
"tag": "version1466705299949"
- },
+ },
"admin-properties": {
"tag": "version1466705299949"
- },
+ },
"ranger-ugsync-site": {
"tag": "version1466705299949"
- },
+ },
"hdfs-site": {
"tag": "version1"
- },
+ },
"ranger-tagsync-site": {
"tag": "version1466705299949"
- },
+ },
"zoo.cfg": {
"tag": "version1"
- },
+ },
"hadoop-policy": {
"tag": "version1"
- },
+ },
"hdfs-log4j": {
"tag": "version1"
- },
+ },
"ranger-hdfs-plugin-properties": {
"tag": "version1466705299922"
- },
+ },
"core-site": {
"tag": "version1"
- },
+ },
"hadoop-env": {
"tag": "version1"
- },
+ },
"zookeeper-log4j": {
"tag": "version1"
- },
+ },
"ssl-server": {
"tag": "version1"
- },
+ },
"ranger-site": {
"tag": "version1466705299949"
- },
+ },
"admin-log4j": {
"tag": "version1466705299949"
- },
+ },
"tagsync-log4j": {
"tag": "version1466705299949"
},
@@ -146,7 +146,7 @@
},
"ranger-hdfs-security": {
"tag": "version1466705299922"
- },
+ },
"usersync-properties": {
"tag": "version1466705299949"
},
@@ -165,116 +165,116 @@
"cluster-env": {
"tag": "version1"
}
- },
- "roleCommand": "START",
+ },
+ "roleCommand": "START",
"hostLevelParams": {
- "agent_stack_retry_on_unavailability": "false",
- "stack_name": "HDP",
+ "agent_stack_retry_on_unavailability": "false",
+ "stack_name": "HDP",
"package_version": "2_6_0_0_*",
"custom_mysql_jdbc_name": "mysql-connector-java.jar",
"previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
- "host_sys_prepped": "false",
- "ambari_db_rca_username": "mapred",
+ "host_sys_prepped": "false",
+ "ambari_db_rca_username": "mapred",
"current_version": "2.6.0.0-801",
"mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
"agent_stack_retry_count": "5",
"stack_version": "2.6",
- "jdk_name": "jdk-8u60-linux-x64.tar.gz",
- "ambari_db_rca_driver": "org.postgresql.Driver",
- "java_home": "/usr/jdk64/jdk1.7.0_45",
- "repository_version_id": "1",
- "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
- "not_managed_hdfs_path_list": "[\"/tmp\"]",
- "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
- "java_version": "8",
+ "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+ "ambari_db_rca_driver": "org.postgresql.Driver",
+ "java_home": "/usr/jdk64/jdk1.7.0_45",
+ "repository_version_id": "1",
+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+ "not_managed_hdfs_path_list": "[\"/tmp\"]",
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+ "java_version": "8",
"repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.6.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
"package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
- "db_name": "ambari",
- "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
- "agentCacheDir": "/var/lib/ambari-agent/cache",
- "ambari_db_rca_password": "mapred",
- "jce_name": "jce_policy-8.zip",
- "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
- "db_driver_filename": "mysql-connector-java.jar",
- "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
+ "db_name": "ambari",
+ "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+ "agentCacheDir": "/var/lib/ambari-agent/cache",
+ "ambari_db_rca_password": "mapred",
+ "jce_name": "jce_policy-8.zip",
+ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+ "db_driver_filename": "mysql-connector-java.jar",
+ "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
"clientsToUpdateConfigs": "[\"*\"]"
- },
+ },
"commandParams": {
- "service_package_folder": "common-services/RANGER/0.4.0/package",
- "script": "scripts/ranger_admin.py",
+ "service_package_folder": "common-services/RANGER/0.4.0/package",
+ "script": "scripts/ranger_admin.py",
"hooks_folder": "HDP/2.0.6/hooks",
"version": "2.6.0.0-801",
- "max_duration_for_retries": "0",
- "command_retry_enabled": "false",
- "command_timeout": "600",
+ "max_duration_for_retries": "0",
+ "command_retry_enabled": "false",
+ "command_timeout": "600",
"script_type": "PYTHON"
- },
- "forceRefreshConfigTags": [],
- "stageId": 0,
+ },
+ "forceRefreshConfigTags": [],
+ "stageId": 0,
"clusterHostInfo": {
"snamenode_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"ambari_server_use_ssl": [
"false"
- ],
+ ],
"all_ping_ports": [
"8670"
- ],
+ ],
"ranger_tagsync_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"ranger_usersync_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"all_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"slave_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"namenode_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"ambari_server_port": [
"8080"
- ],
+ ],
"ranger_admin_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"all_racks": [
"/default-rack"
- ],
+ ],
"all_ipv4_ips": [
"172.22.83.73"
- ],
+ ],
"ambari_server_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"zookeeper_hosts": [
"c6401.ambari.apache.org"
],
"infra_solr_hosts": [
"c6401.ambari.apache.org"
]
- },
+ },
"configurations": {
"ranger-hdfs-audit": {
- "xasecure.audit.destination.solr.zookeepers": "NONE",
- "xasecure.audit.destination.solr.urls": "",
- "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
- "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
+ "xasecure.audit.destination.solr.zookeepers": "NONE",
+ "xasecure.audit.destination.solr.urls": "",
+ "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+ "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
"xasecure.audit.destination.hdfs": "true",
- "xasecure.audit.destination.solr": "false",
+ "xasecure.audit.destination.solr": "false",
"xasecure.audit.provider.summary.enabled": "false",
"xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
"xasecure.audit.is.enabled": "true"
},
"ranger-tagsync-policymgr-ssl": {
- "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks",
- "xasecure.policymgr.clientssl.truststore.password": "changeit",
+ "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks",
+ "xasecure.policymgr.clientssl.truststore.password": "changeit",
"xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
- "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks",
+ "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks",
"xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
},
@@ -287,143 +287,143 @@
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
},
"ssl-client": {
- "ssl.client.truststore.reload.interval": "10000",
- "ssl.client.keystore.password": "bigdata",
- "ssl.client.truststore.type": "jks",
- "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
- "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
- "ssl.client.truststore.password": "bigdata",
+ "ssl.client.truststore.reload.interval": "10000",
+ "ssl.client.keystore.password": "bigdata",
+ "ssl.client.truststore.type": "jks",
+ "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+ "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+ "ssl.client.truststore.password": "bigdata",
"ssl.client.keystore.type": "jks"
- },
+ },
"ranger-admin-site": {
"ranger.admin.kerberos.cookie.domain": "",
- "ranger.kms.service.user.hdfs": "hdfs",
- "ranger.spnego.kerberos.principal": "",
- "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
- "ranger.plugins.hive.serviceuser": "hive",
- "ranger.lookup.kerberos.keytab": "",
- "ranger.plugins.kms.serviceuser": "kms",
- "ranger.service.https.attrib.ssl.enabled": "false",
- "ranger.sso.browser.useragent": "Mozilla,chrome",
- "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
- "ranger.plugins.hbase.serviceuser": "hbase",
- "ranger.plugins.hdfs.serviceuser": "hdfs",
- "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
- "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
- "ranger.plugins.knox.serviceuser": "knox",
- "ranger.ldap.base.dn": "dc=example,dc=com",
- "ranger.sso.publicKey": "",
- "ranger.admin.kerberos.cookie.path": "/",
- "ranger.service.https.attrib.clientAuth": "want",
- "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
- "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
- "ranger.ldap.group.roleattribute": "cn",
- "ranger.plugins.kafka.serviceuser": "kafka",
- "ranger.admin.kerberos.principal": "",
- "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
- "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
- "ranger.ldap.referral": "ignore",
- "ranger.service.http.port": "6080",
- "ranger.ldap.user.searchfilter": "(uid={0})",
- "ranger.plugins.atlas.serviceuser": "atlas",
+ "ranger.kms.service.user.hdfs": "hdfs",
+ "ranger.spnego.kerberos.principal": "",
+ "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+ "ranger.plugins.hive.serviceuser": "hive",
+ "ranger.lookup.kerberos.keytab": "",
+ "ranger.plugins.kms.serviceuser": "kms",
+ "ranger.service.https.attrib.ssl.enabled": "false",
+ "ranger.sso.browser.useragent": "Mozilla,chrome",
+ "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+ "ranger.plugins.hbase.serviceuser": "hbase",
+ "ranger.plugins.hdfs.serviceuser": "hdfs",
+ "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+ "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+ "ranger.plugins.knox.serviceuser": "knox",
+ "ranger.ldap.base.dn": "dc=example,dc=com",
+ "ranger.sso.publicKey": "",
+ "ranger.admin.kerberos.cookie.path": "/",
+ "ranger.service.https.attrib.clientAuth": "want",
+ "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+ "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+ "ranger.ldap.group.roleattribute": "cn",
+ "ranger.plugins.kafka.serviceuser": "kafka",
+ "ranger.admin.kerberos.principal": "",
+ "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+ "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
+ "ranger.ldap.referral": "ignore",
+ "ranger.service.http.port": "6080",
+ "ranger.ldap.user.searchfilter": "(uid={0})",
+ "ranger.plugins.atlas.serviceuser": "atlas",
"ranger.truststore.password": "changeit",
"ranger.truststore.alias": "trustStoreAlias",
- "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
- "ranger.audit.solr.password": "NONE",
+ "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+ "ranger.audit.solr.password": "NONE",
"ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/infra-solr",
- "ranger.lookup.kerberos.principal": "",
- "ranger.service.https.port": "6182",
- "ranger.plugins.storm.serviceuser": "storm",
- "ranger.externalurl": "{{ranger_external_url}}",
- "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
- "ranger.kms.service.user.hive": "",
- "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
- "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
- "ranger.service.host": "{{ranger_host}}",
- "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
- "ranger.service.https.attrib.keystore.pass": "xasecure",
- "ranger.unixauth.remote.login.enabled": "true",
- "ranger.jpa.jdbc.credential.alias": "rangeradmin",
- "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
- "ranger.audit.solr.username": "ranger_solr",
- "ranger.sso.enabled": "false",
- "ranger.audit.solr.urls": "",
- "ranger.ldap.ad.domain": "",
- "ranger.plugins.yarn.serviceuser": "yarn",
- "ranger.audit.source.type": "solr",
- "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
- "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
- "ranger.authentication.method": "UNIX",
- "ranger.service.http.enabled": "true",
- "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
- "ranger.ldap.ad.referral": "ignore",
- "ranger.ldap.ad.base.dn": "dc=example,dc=com",
- "ranger.jpa.jdbc.password": "_",
- "ranger.spnego.kerberos.keytab": "",
- "ranger.sso.providerurl": "",
- "ranger.unixauth.service.hostname": "{{ugsync_host}}",
- "ranger.admin.kerberos.keytab": "",
- "ranger.admin.kerberos.token.valid.seconds": "30",
- "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
+ "ranger.lookup.kerberos.principal": "",
+ "ranger.service.https.port": "6182",
+ "ranger.plugins.storm.serviceuser": "storm",
+ "ranger.externalurl": "{{ranger_external_url}}",
+ "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+ "ranger.kms.service.user.hive": "",
+ "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+ "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+ "ranger.service.host": "{{ranger_host}}",
+ "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+ "ranger.service.https.attrib.keystore.pass": "xasecure",
+ "ranger.unixauth.remote.login.enabled": "true",
+ "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+ "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+ "ranger.audit.solr.username": "ranger_solr",
+ "ranger.sso.enabled": "false",
+ "ranger.audit.solr.urls": "",
+ "ranger.ldap.ad.domain": "",
+ "ranger.plugins.yarn.serviceuser": "yarn",
+ "ranger.audit.source.type": "solr",
+ "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+ "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+ "ranger.authentication.method": "UNIX",
+ "ranger.service.http.enabled": "true",
+ "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+ "ranger.ldap.ad.referral": "ignore",
+ "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+ "ranger.jpa.jdbc.password": "_",
+ "ranger.spnego.kerberos.keytab": "",
+ "ranger.sso.providerurl": "",
+ "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+ "ranger.admin.kerberos.keytab": "",
+ "ranger.admin.kerberos.token.valid.seconds": "30",
+ "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
"ranger.unixauth.service.port": "5151",
"ranger.service.https.attrib.keystore.credential.alias": "keyStoreCredentialAlias"
- },
+ },
"ranger-hdfs-policymgr-ssl": {
- "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
- "xasecure.policymgr.clientssl.truststore.password": "changeit",
- "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
- "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
- "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+ "xasecure.policymgr.clientssl.truststore.password": "changeit",
+ "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+ "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
- },
+ },
"tagsync-application-properties": {
- "atlas.kafka.entities.group.id": "ranger_entities_consumer",
- "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+ "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+ "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
"atlas.kafka.bootstrap.servers": "localhost:6667"
- },
+ },
"ranger-env": {
- "ranger_solr_shards": "1",
- "ranger_solr_config_set": "ranger_audits",
- "ranger_user": "ranger",
+ "ranger_solr_shards": "1",
+ "ranger_solr_config_set": "ranger_audits",
+ "ranger_user": "ranger",
"ranger_solr_replication_factor": "1",
- "xml_configurations_supported": "true",
- "ranger-atlas-plugin-enabled": "No",
- "ranger-hbase-plugin-enabled": "No",
- "ranger-yarn-plugin-enabled": "No",
- "bind_anonymous": "false",
- "ranger_admin_username": "amb_ranger_admin",
- "admin_password": "admin",
- "is_solrCloud_enabled": "true",
- "ranger-storm-plugin-enabled": "No",
- "ranger-hdfs-plugin-enabled": "No",
- "ranger_group": "ranger",
- "ranger-knox-plugin-enabled": "No",
- "ranger_admin_log_dir": "/var/log/ranger/admin",
- "ranger-kafka-plugin-enabled": "No",
- "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
- "ranger-hive-plugin-enabled": "No",
- "xasecure.audit.destination.solr": "true",
- "ranger_pid_dir": "/var/run/ranger",
- "xasecure.audit.destination.hdfs": "true",
- "admin_username": "admin",
- "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
- "create_db_dbuser": "true",
- "ranger_solr_collection_name": "ranger_audits",
- "ranger_admin_password": "P1!q9xa96SMi5NCl",
+ "xml_configurations_supported": "true",
+ "ranger-atlas-plugin-enabled": "No",
+ "ranger-hbase-plugin-enabled": "No",
+ "ranger-yarn-plugin-enabled": "No",
+ "bind_anonymous": "false",
+ "ranger_admin_username": "amb_ranger_admin",
+ "admin_password": "admin",
+ "is_solrCloud_enabled": "true",
+ "ranger-storm-plugin-enabled": "No",
+ "ranger-hdfs-plugin-enabled": "No",
+ "ranger_group": "ranger",
+ "ranger-knox-plugin-enabled": "No",
+ "ranger_admin_log_dir": "/var/log/ranger/admin",
+ "ranger-kafka-plugin-enabled": "No",
+ "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+ "ranger-hive-plugin-enabled": "No",
+ "xasecure.audit.destination.solr": "true",
+ "ranger_pid_dir": "/var/run/ranger",
+ "xasecure.audit.destination.hdfs": "true",
+ "admin_username": "admin",
+ "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+ "create_db_dbuser": "true",
+ "ranger_solr_collection_name": "ranger_audits",
+ "ranger_admin_password": "P1!q9xa96SMi5NCl",
"ranger_usersync_log_dir": "/var/log/ranger/usersync"
- },
+ },
"usersync-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
- },
+ },
"admin-properties": {
- "db_user": "rangeradmin01",
- "DB_FLAVOR": "MYSQL",
- "db_password": "rangeradmin01",
- "db_root_user": "root",
- "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
- "db_name": "ranger01",
- "db_host": "c6401.ambari.apache.org",
- "db_root_password": "vagrant",
+ "db_user": "rangeradmin01",
+ "DB_FLAVOR": "MYSQL",
+ "db_password": "rangeradmin01",
+ "db_root_user": "root",
+ "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+ "db_name": "ranger01",
+ "db_host": "c6401.ambari.apache.org",
+ "db_root_password": "vagrant",
"SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
},
"ranger-solr-configuration": {
@@ -432,248 +432,248 @@
"ranger_audit_logs_merge_factor": "5"
},
"ranger-ugsync-site": {
- "ranger.usersync.ldap.binddn": "",
- "ranger.usersync.policymgr.username": "rangerusersync",
- "ranger.usersync.policymanager.mockrun": "false",
- "ranger.usersync.group.searchbase": "",
- "ranger.usersync.ldap.bindalias": "testldapalias",
- "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
- "ranger.usersync.port": "5151",
- "ranger.usersync.pagedresultssize": "500",
- "ranger.usersync.group.memberattributename": "",
- "ranger.usersync.kerberos.principal": "",
- "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
- "ranger.usersync.ldap.referral": "ignore",
- "ranger.usersync.group.searchfilter": "",
- "ranger.usersync.ldap.user.objectclass": "person",
- "ranger.usersync.logdir": "{{usersync_log_dir}}",
- "ranger.usersync.ldap.user.searchfilter": "",
- "ranger.usersync.ldap.groupname.caseconversion": "none",
- "ranger.usersync.ldap.ldapbindpassword": "",
- "ranger.usersync.unix.minUserId": "500",
- "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
- "ranger.usersync.group.nameattribute": "",
- "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
- "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
- "ranger.usersync.user.searchenabled": "false",
- "ranger.usersync.group.usermapsyncenabled": "true",
- "ranger.usersync.ldap.bindkeystore": "",
- "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
- "ranger.usersync.kerberos.keytab": "",
- "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
- "ranger.usersync.group.objectclass": "",
- "ranger.usersync.ldap.user.searchscope": "sub",
- "ranger.usersync.unix.password.file": "/etc/passwd",
- "ranger.usersync.ldap.user.nameattribute": "",
- "ranger.usersync.pagedresultsenabled": "true",
- "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
- "ranger.usersync.group.search.first.enabled": "false",
- "ranger.usersync.group.searchenabled": "false",
- "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
- "ranger.usersync.ssl": "true",
- "ranger.usersync.ldap.url": "",
- "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
- "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
- "ranger.usersync.ldap.user.searchbase": "",
- "ranger.usersync.ldap.username.caseconversion": "none",
- "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
- "ranger.usersync.keystore.password": "UnIx529p",
- "ranger.usersync.unix.group.file": "/etc/group",
- "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
- "ranger.usersync.group.searchscope": "",
- "ranger.usersync.truststore.password": "changeit",
- "ranger.usersync.enabled": "true",
- "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
+ "ranger.usersync.ldap.binddn": "",
+ "ranger.usersync.policymgr.username": "rangerusersync",
+ "ranger.usersync.policymanager.mockrun": "false",
+ "ranger.usersync.group.searchbase": "",
+ "ranger.usersync.ldap.bindalias": "testldapalias",
+ "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+ "ranger.usersync.port": "5151",
+ "ranger.usersync.pagedresultssize": "500",
+ "ranger.usersync.group.memberattributename": "",
+ "ranger.usersync.kerberos.principal": "",
+ "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+ "ranger.usersync.ldap.referral": "ignore",
+ "ranger.usersync.group.searchfilter": "",
+ "ranger.usersync.ldap.user.objectclass": "person",
+ "ranger.usersync.logdir": "{{usersync_log_dir}}",
+ "ranger.usersync.ldap.user.searchfilter": "",
+ "ranger.usersync.ldap.groupname.caseconversion": "none",
+ "ranger.usersync.ldap.ldapbindpassword": "",
+ "ranger.usersync.unix.minUserId": "500",
+ "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+ "ranger.usersync.group.nameattribute": "",
+ "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+ "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+ "ranger.usersync.user.searchenabled": "false",
+ "ranger.usersync.group.usermapsyncenabled": "true",
+ "ranger.usersync.ldap.bindkeystore": "",
+ "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+ "ranger.usersync.kerberos.keytab": "",
+ "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+ "ranger.usersync.group.objectclass": "",
+ "ranger.usersync.ldap.user.searchscope": "sub",
+ "ranger.usersync.unix.password.file": "/etc/passwd",
+ "ranger.usersync.ldap.user.nameattribute": "",
+ "ranger.usersync.pagedresultsenabled": "true",
+ "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+ "ranger.usersync.group.search.first.enabled": "false",
+ "ranger.usersync.group.searchenabled": "false",
+ "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+ "ranger.usersync.ssl": "true",
+ "ranger.usersync.ldap.url": "",
+ "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+ "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+ "ranger.usersync.ldap.user.searchbase": "",
+ "ranger.usersync.ldap.username.caseconversion": "none",
+ "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+ "ranger.usersync.keystore.password": "UnIx529p",
+ "ranger.usersync.unix.group.file": "/etc/group",
+ "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+ "ranger.usersync.group.searchscope": "",
+ "ranger.usersync.truststore.password": "changeit",
+ "ranger.usersync.enabled": "true",
+ "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
"ranger.usersync.filesource.text.delimiter": ","
- },
+ },
"hdfs-site": {
- "dfs.namenode.checkpoint.period": "21600",
- "dfs.namenode.avoid.write.stale.datanode": "true",
- "dfs.namenode.startup.delay.block.deletion.sec": "3600",
- "dfs.namenode.checkpoint.txns": "1000000",
- "dfs.content-summary.limit": "5000",
- "dfs.support.append": "true",
- "dfs.datanode.address": "0.0.0.0:50010",
- "dfs.cluster.administrators": " hdfs",
- "dfs.namenode.audit.log.async": "true",
- "dfs.datanode.balance.bandwidthPerSec": "6250000",
- "dfs.namenode.safemode.threshold-pct": "1",
- "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
- "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
- "dfs.permissions.enabled": "true",
- "dfs.client.read.shortcircuit": "true",
- "dfs.https.port": "50470",
- "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
- "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
- "dfs.blocksize": "134217728",
- "dfs.blockreport.initialDelay": "120",
- "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
- "dfs.namenode.fslock.fair": "false",
- "dfs.datanode.max.transfer.threads": "4096",
- "dfs.heartbeat.interval": "3",
- "dfs.replication": "3",
- "dfs.namenode.handler.count": "50",
- "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
- "fs.permissions.umask-mode": "022",
- "dfs.namenode.stale.datanode.interval": "30000",
- "dfs.datanode.ipc.address": "0.0.0.0:8010",
- "dfs.datanode.failed.volumes.tolerated": "0",
- "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
- "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
- "dfs.webhdfs.enabled": "true",
- "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
- "dfs.namenode.accesstime.precision": "0",
- "dfs.datanode.https.address": "0.0.0.0:50475",
- "dfs.namenode.write.stale.datanode.ratio": "1.0f",
- "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
- "nfs.exports.allowed.hosts": "* rw",
- "dfs.datanode.http.address": "0.0.0.0:50075",
- "dfs.datanode.du.reserved": "33011188224",
- "dfs.client.read.shortcircuit.streams.cache.size": "4096",
- "dfs.http.policy": "HTTP_ONLY",
- "dfs.block.access.token.enable": "true",
- "dfs.client.retry.policy.enabled": "false",
- "dfs.namenode.name.dir.restore": "true",
- "dfs.permissions.superusergroup": "hdfs",
- "dfs.journalnode.https-address": "0.0.0.0:8481",
- "dfs.journalnode.http-address": "0.0.0.0:8480",
- "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
- "dfs.namenode.avoid.read.stale.datanode": "true",
- "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
- "dfs.datanode.data.dir.perm": "750",
- "dfs.encryption.key.provider.uri": "",
- "dfs.replication.max": "50",
+ "dfs.namenode.checkpoint.period": "21600",
+ "dfs.namenode.avoid.write.stale.datanode": "true",
+ "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+ "dfs.namenode.checkpoint.txns": "1000000",
+ "dfs.content-summary.limit": "5000",
+ "dfs.support.append": "true",
+ "dfs.datanode.address": "0.0.0.0:50010",
+ "dfs.cluster.administrators": " hdfs",
+ "dfs.namenode.audit.log.async": "true",
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
+ "dfs.namenode.safemode.threshold-pct": "1",
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+ "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+ "dfs.permissions.enabled": "true",
+ "dfs.client.read.shortcircuit": "true",
+ "dfs.https.port": "50470",
+ "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+ "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+ "dfs.blocksize": "134217728",
+ "dfs.blockreport.initialDelay": "120",
+ "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+ "dfs.namenode.fslock.fair": "false",
+ "dfs.datanode.max.transfer.threads": "4096",
+ "dfs.heartbeat.interval": "3",
+ "dfs.replication": "3",
+ "dfs.namenode.handler.count": "50",
+ "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+ "fs.permissions.umask-mode": "022",
+ "dfs.namenode.stale.datanode.interval": "30000",
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
+ "dfs.datanode.failed.volumes.tolerated": "0",
+ "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+ "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+ "dfs.webhdfs.enabled": "true",
+ "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+ "dfs.namenode.accesstime.precision": "0",
+ "dfs.datanode.https.address": "0.0.0.0:50475",
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+ "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+ "nfs.exports.allowed.hosts": "* rw",
+ "dfs.datanode.http.address": "0.0.0.0:50075",
+ "dfs.datanode.du.reserved": "33011188224",
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+ "dfs.http.policy": "HTTP_ONLY",
+ "dfs.block.access.token.enable": "true",
+ "dfs.client.retry.policy.enabled": "false",
+ "dfs.namenode.name.dir.restore": "true",
+ "dfs.permissions.superusergroup": "hdfs",
+ "dfs.journalnode.https-address": "0.0.0.0:8481",
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+ "dfs.namenode.avoid.read.stale.datanode": "true",
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+ "dfs.datanode.data.dir.perm": "750",
+ "dfs.encryption.key.provider.uri": "",
+ "dfs.replication.max": "50",
"dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
- },
+ },
"ranger-tagsync-site": {
"ranger.tagsync.dest.ranger.ssl.config.filename": "{{stack_root}}/current/ranger-tagsync/conf/ranger-policymgr-ssl.xml",
"ranger.tagsync.source.atlasrest.username": "",
- "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
- "ranger.tagsync.source.atlasrest.download.interval.millis": "",
- "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
- "ranger.tagsync.source.file.check.interval.millis": "",
- "ranger.tagsync.source.atlasrest.endpoint": "",
- "ranger.tagsync.dest.ranger.username": "rangertagsync",
- "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
- "ranger.tagsync.kerberos.principal": "",
- "ranger.tagsync.kerberos.keytab": "",
- "ranger.tagsync.source.atlas": "false",
- "ranger.tagsync.source.atlasrest": "false",
- "ranger.tagsync.source.file": "false",
+ "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+ "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+ "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+ "ranger.tagsync.source.file.check.interval.millis": "",
+ "ranger.tagsync.source.atlasrest.endpoint": "",
+ "ranger.tagsync.dest.ranger.username": "rangertagsync",
+ "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+ "ranger.tagsync.kerberos.principal": "",
+ "ranger.tagsync.kerberos.keytab": "",
+ "ranger.tagsync.source.atlas": "false",
+ "ranger.tagsync.source.atlasrest": "false",
+ "ranger.tagsync.source.file": "false",
"ranger.tagsync.source.file.filename": ""
- },
+ },
"zoo.cfg": {
- "clientPort": "2181",
- "autopurge.purgeInterval": "24",
- "syncLimit": "5",
- "dataDir": "/grid/0/hadoop/zookeeper",
- "initLimit": "10",
- "tickTime": "2000",
+ "clientPort": "2181",
+ "autopurge.purgeInterval": "24",
+ "syncLimit": "5",
+ "dataDir": "/grid/0/hadoop/zookeeper",
+ "initLimit": "10",
+ "tickTime": "2000",
"autopurge.snapRetainCount": "30"
- },
+ },
"hadoop-policy": {
- "security.job.client.protocol.acl": "*",
- "security.job.task.protocol.acl": "*",
- "security.datanode.protocol.acl": "*",
- "security.namenode.protocol.acl": "*",
- "security.client.datanode.protocol.acl": "*",
- "security.inter.tracker.protocol.acl": "*",
- "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
- "security.client.protocol.acl": "*",
- "security.refresh.policy.protocol.acl": "hadoop",
- "security.admin.operations.protocol.acl": "hadoop",
+ "security.job.client.protocol.acl": "*",
+ "security.job.task.protocol.acl": "*",
+ "security.datanode.protocol.acl": "*",
+ "security.namenode.protocol.acl": "*",
+ "security.client.datanode.protocol.acl": "*",
+ "security.inter.tracker.protocol.acl": "*",
+ "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+ "security.client.protocol.acl": "*",
+ "security.refresh.policy.protocol.acl": "hadoop",
+ "security.admin.operations.protocol.acl": "hadoop",
"security.inter.datanode.protocol.acl": "*"
- },
+ },
"hdfs-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
ange=WARN"
- },
+ },
"ranger-hdfs-plugin-properties": {
- "hadoop.rpc.protection": "authentication",
- "ranger-hdfs-plugin-enabled": "No",
- "REPOSITORY_CONFIG_USERNAME": "hadoop",
- "policy_user": "ambari-qa",
- "common.name.for.certificate": "",
+ "hadoop.rpc.protection": "authentication",
+ "ranger-hdfs-plugin-enabled": "No",
+ "REPOSITORY_CONFIG_USERNAME": "hadoop",
+ "policy_user": "ambari-qa",
+ "common.name.for.certificate": "",
"REPOSITORY_CONFIG_PASSWORD": "hadoop"
- },
+ },
"core-site": {
- "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org",
- "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
- "fs.trash.interval": "360",
- "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
- "hadoop.http.authentication.simple.anonymous.allowed": "true",
- "hadoop.security.authentication": "simple",
- "hadoop.proxyuser.root.groups": "*",
- "ipc.client.connection.maxidletime": "30000",
- "hadoop.security.key.provider.path": "",
- "mapreduce.jobtracker.webinterface.trusted": "false",
- "hadoop.security.authorization": "false",
- "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
- "ipc.server.tcpnodelay": "true",
- "ipc.client.connect.max.retries": "50",
- "hadoop.security.auth_to_local": "DEFAULT",
- "io.file.buffer.size": "131072",
- "hadoop.proxyuser.hdfs.hosts": "*",
- "hadoop.proxyuser.hdfs.groups": "*",
- "ipc.client.idlethreshold": "8000",
- "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+ "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org",
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+ "fs.trash.interval": "360",
+ "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+ "hadoop.http.authentication.simple.anonymous.allowed": "true",
+ "hadoop.security.authentication": "simple",
+ "hadoop.proxyuser.root.groups": "*",
+ "ipc.client.connection.maxidletime": "30000",
+ "hadoop.security.key.provider.path": "",
+ "mapreduce.jobtracker.webinterface.trusted": "false",
+ "hadoop.security.authorization": "false",
+ "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+ "ipc.server.tcpnodelay": "true",
+ "ipc.client.connect.max.retries": "50",
+ "hadoop.security.auth_to_local": "DEFAULT",
+ "io.file.buffer.size": "131072",
+ "hadoop.proxyuser.hdfs.hosts": "*",
+ "hadoop.proxyuser.hdfs.groups": "*",
+ "ipc.client.idlethreshold": "8000",
+ "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
"io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
- },
+ },
"hadoop-env": {
- "keyserver_port": "",
- "proxyuser_group": "users",
- "hdfs_user_nproc_limit": "65536",
- "hdfs_log_dir_prefix": "/var/log/hadoop",
- "hdfs_user_nofile_limit": "128000",
- "hdfs_user": "hdfs",
- "keyserver_host": " ",
- "namenode_opt_maxnewsize": "128m",
- "namenode_opt_maxpermsize": "256m",
- "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
N_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
ing priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n do\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
ECURE_DN_USER\" ]; then\n ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
- "namenode_heapsize": "1024m",
- "namenode_opt_newsize": "128m",
- "nfsgateway_heapsize": "1024",
- "dtnode_heapsize": "1024m",
- "hadoop_root_logger": "INFO,RFA",
- "hadoop_heapsize": "1024",
- "hadoop_pid_dir_prefix": "/var/run/hadoop",
- "namenode_opt_permsize": "128m",
+ "keyserver_port": "",
+ "proxyuser_group": "users",
+ "hdfs_user_nproc_limit": "65536",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
+ "hdfs_user_nofile_limit": "128000",
+ "hdfs_user": "hdfs",
+ "keyserver_host": " ",
+ "namenode_opt_maxnewsize": "128m",
+ "namenode_opt_maxpermsize": "256m",
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
N_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
ing priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n do\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
ECURE_DN_USER\" ]; then\n ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+ "namenode_heapsize": "1024m",
+ "namenode_opt_newsize": "128m",
+ "nfsgateway_heapsize": "1024",
+ "dtnode_heapsize": "1024m",
+ "hadoop_root_logger": "INFO,RFA",
+ "hadoop_heapsize": "1024",
+ "hadoop_pid_dir_prefix": "/var/run/hadoop",
+ "namenode_opt_permsize": "128m",
"hdfs_tmp_dir": "/tmp"
- },
+ },
"zookeeper-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
- },
+ },
"ssl-server": {
- "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
- "ssl.server.keystore.keypassword": "bigdata",
- "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
- "ssl.server.keystore.password": "bigdata",
- "ssl.server.truststore.password": "bigdata",
- "ssl.server.truststore.type": "jks",
- "ssl.server.keystore.type": "jks",
+ "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
+ "ssl.server.keystore.keypassword": "bigdata",
+ "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
+ "ssl.server.keystore.password": "bigdata",
+ "ssl.server.truststore.password": "bigdata",
+ "ssl.server.truststore.type": "jks",
+ "ssl.server.keystore.type": "jks",
"ssl.server.truststore.reload.interval": "10000"
- },
- "ranger-site": {},
+ },
+ "ranger-site": {},
"admin-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t]
%m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
- },
+ },
"tagsync-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
.log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
- },
+ },
"ranger-hdfs-security": {
- "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
- "ranger.plugin.hdfs.service.name": "{{repo_name}}",
- "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
- "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
- "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
- "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
+ "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+ "ranger.plugin.hdfs.service.name": "{{repo_name}}",
+ "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+ "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
+ "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
+ "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
"xasecure.add-hadoop-authorization": "true"
- },
- "usersync-properties": {},
+ },
+ "usersync-properties": {},
"zookeeper-env": {
- "zk_log_dir": "/var/log/zookeeper",
- "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
- "zk_server_heapsize": "1024m",
- "zk_pid_dir": "/var/run/zookeeper",
+ "zk_log_dir": "/var/log/zookeeper",
+ "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+ "zk_server_heapsize": "1024m",
+ "zk_pid_dir": "/var/run/zookeeper",
"zk_user": "zookeeper"
},
"infra-solr-env": {
@@ -682,7 +682,7 @@
"infra_solr_kerberos_name_rules": "DEFAULT",
"infra_solr_user": "infra-solr",
"infra_solr_maxmem": "1024",
- "content": "#!/bin/bash\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# By default the script will use JAVA_HOME to determine which java\n# to use, but you can set a specific path for Solr to use without\n# affecting other Java applica
tions on your server/workstation.\nSOLR_JAVA_HOME
<TRUNCATED>
[12/18] ambari git commit: AMBARI-21430. Allow Multiple Versions of
Stack Tools to Co-Exist - fix illegal import
Posted by rl...@apache.org.
AMBARI-21430. Allow Multiple Versions of Stack Tools to Co-Exist - fix illegal import
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d0f7a515
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d0f7a515
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d0f7a515
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: d0f7a51537469740e5397486b1e2c19862c26c01
Parents: f33a250
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Sun Jul 9 12:15:28 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Sun Jul 9 12:16:54 2017 +0200
----------------------------------------------------------------------
.../java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/d0f7a515/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index fa3aea3..0656f68 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -29,7 +29,7 @@ import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.hadoop.metrics2.sink.relocated.commons.lang.StringUtils;
+import org.apache.commons.lang.StringUtils;
import com.google.common.collect.Sets;
import com.google.inject.Inject;
[07/18] ambari git commit: AMBARI-21430 - Allow Multiple Versions of
Stack Tools to Co-Exist (jonathanhurley)
Posted by rl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
index 05cb78a..cafbede 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
@@ -1,55 +1,55 @@
{
"localComponents": [
- "SECONDARY_NAMENODE",
- "HDFS_CLIENT",
- "DATANODE",
- "NAMENODE",
- "ZOOKEEPER_SERVER",
- "ZOOKEEPER_CLIENT",
- "RANGER_USERSYNC",
- "RANGER_ADMIN",
+ "SECONDARY_NAMENODE",
+ "HDFS_CLIENT",
+ "DATANODE",
+ "NAMENODE",
+ "ZOOKEEPER_SERVER",
+ "ZOOKEEPER_CLIENT",
+ "RANGER_USERSYNC",
+ "RANGER_ADMIN",
"RANGER_TAGSYNC",
"RANGER_KMS_SERVER"
- ],
+ ],
"configuration_attributes": {
- "ranger-hdfs-audit": {},
- "ssl-client": {},
- "ranger-admin-site": {},
- "ranger-hdfs-policymgr-ssl": {},
- "tagsync-application-properties": {},
- "ranger-env": {},
- "usersync-log4j": {},
- "admin-properties": {},
- "ranger-ugsync-site": {},
+ "ranger-hdfs-audit": {},
+ "ssl-client": {},
+ "ranger-admin-site": {},
+ "ranger-hdfs-policymgr-ssl": {},
+ "tagsync-application-properties": {},
+ "ranger-env": {},
+ "usersync-log4j": {},
+ "admin-properties": {},
+ "ranger-ugsync-site": {},
"hdfs-site": {
"final": {
- "dfs.datanode.data.dir": "true",
- "dfs.namenode.http-address": "true",
- "dfs.datanode.failed.volumes.tolerated": "true",
- "dfs.support.append": "true",
- "dfs.namenode.name.dir": "true",
+ "dfs.datanode.data.dir": "true",
+ "dfs.namenode.http-address": "true",
+ "dfs.datanode.failed.volumes.tolerated": "true",
+ "dfs.support.append": "true",
+ "dfs.namenode.name.dir": "true",
"dfs.webhdfs.enabled": "true"
}
- },
- "ranger-tagsync-site": {},
- "zoo.cfg": {},
- "hadoop-policy": {},
- "hdfs-log4j": {},
- "ranger-hdfs-plugin-properties": {},
+ },
+ "ranger-tagsync-site": {},
+ "zoo.cfg": {},
+ "hadoop-policy": {},
+ "hdfs-log4j": {},
+ "ranger-hdfs-plugin-properties": {},
"core-site": {
"final": {
"fs.defaultFS": "true"
}
- },
- "hadoop-env": {},
- "zookeeper-log4j": {},
- "ssl-server": {},
- "ranger-site": {},
- "admin-log4j": {},
- "tagsync-log4j": {},
- "ranger-hdfs-security": {},
- "usersync-properties": {},
- "zookeeper-env": {},
+ },
+ "hadoop-env": {},
+ "zookeeper-log4j": {},
+ "ssl-server": {},
+ "ranger-site": {},
+ "admin-log4j": {},
+ "tagsync-log4j": {},
+ "ranger-hdfs-security": {},
+ "usersync-properties": {},
+ "zookeeper-env": {},
"cluster-env": {},
"dbks-site": {},
"kms-env": {},
@@ -60,744 +60,744 @@
"ranger-kms-site": {},
"ranger-kms-policymgr-ssl": {},
"ranger-kms-audit": {}
- },
- "public_hostname": "c6401.ambari.apache.org",
- "commandId": "9-1",
- "hostname": "c6401.ambari.apache.org",
- "kerberosCommandParams": [],
- "serviceName": "RANGER_KMS",
- "role": "RANGER_KMS_SERVER",
- "forceRefreshConfigTagsBeforeExecution": [],
- "requestId": 9,
+ },
+ "public_hostname": "c6401.ambari.apache.org",
+ "commandId": "9-1",
+ "hostname": "c6401.ambari.apache.org",
+ "kerberosCommandParams": [],
+ "serviceName": "RANGER_KMS",
+ "role": "RANGER_KMS_SERVER",
+ "forceRefreshConfigTagsBeforeExecution": [],
+ "requestId": 9,
"agentConfigParams": {
"agent": {
"parallel_execution": 0
}
- },
- "clusterName": "c1",
- "commandType": "EXECUTION_COMMAND",
- "taskId": 64,
- "roleParams": {},
+ },
+ "clusterName": "c1",
+ "commandType": "EXECUTION_COMMAND",
+ "taskId": 64,
+ "roleParams": {},
"configurationTags": {
"ranger-hdfs-audit": {
"tag": "version1466427664617"
- },
+ },
"ssl-client": {
"tag": "version1"
- },
+ },
"ranger-admin-site": {
"tag": "version1466427664621"
- },
+ },
"ranger-hdfs-policymgr-ssl": {
"tag": "version1466427664617"
- },
+ },
"tagsync-application-properties": {
"tag": "version1466427664621"
- },
+ },
"ranger-env": {
"tag": "version1466427664621"
- },
+ },
"usersync-log4j": {
"tag": "version1466427664621"
- },
+ },
"admin-properties": {
"tag": "version1466427664621"
- },
+ },
"ranger-ugsync-site": {
"tag": "version1466427664621"
- },
+ },
"hdfs-site": {
"tag": "version1"
- },
+ },
"ranger-tagsync-site": {
"tag": "version1466427664621"
- },
+ },
"zoo.cfg": {
"tag": "version1"
- },
+ },
"hadoop-policy": {
"tag": "version1"
- },
+ },
"hdfs-log4j": {
"tag": "version1"
- },
+ },
"ranger-hdfs-plugin-properties": {
"tag": "version1466427664617"
- },
+ },
"core-site": {
"tag": "version1"
- },
+ },
"hadoop-env": {
"tag": "version1"
- },
+ },
"zookeeper-log4j": {
"tag": "version1"
- },
+ },
"ssl-server": {
"tag": "version1"
- },
+ },
"ranger-site": {
"tag": "version1466427664621"
- },
+ },
"admin-log4j": {
"tag": "version1466427664621"
- },
+ },
"tagsync-log4j": {
"tag": "version1466427664621"
- },
+ },
"ranger-hdfs-security": {
"tag": "version1466427664617"
- },
+ },
"usersync-properties": {
"tag": "version1466427664621"
- },
+ },
"zookeeper-env": {
"tag": "version1"
- },
+ },
"cluster-env": {
"tag": "version1"
},
"dbks-site": {
- "tag": "version1"
+ "tag": "version1"
},
"kms-env": {
- "tag": "version1"
+ "tag": "version1"
},
"kms-log4j": {
- "tag": "version1"
+ "tag": "version1"
},
"kms-properties": {
- "tag": "version1"
+ "tag": "version1"
},
"kms-site": {
- "tag": "version1"
+ "tag": "version1"
},
"ranger-kms-security": {
- "tag": "version1"
+ "tag": "version1"
},
"ranger-kms-site": {
- "tag": "version1"
+ "tag": "version1"
},
"ranger-kms-policymgr-ssl": {
- "tag": "version1"
+ "tag": "version1"
},
"ranger-kms-audit": {
- "tag": "version1"
+ "tag": "version1"
}
- },
- "roleCommand": "START",
+ },
+ "roleCommand": "START",
"hostLevelParams": {
- "agent_stack_retry_on_unavailability": "false",
- "stack_name": "HDP",
+ "agent_stack_retry_on_unavailability": "false",
+ "stack_name": "HDP",
"custom_mysql_jdbc_name": "mysql-connector-java.jar",
"previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
- "host_sys_prepped": "false",
- "ambari_db_rca_username": "mapred",
- "current_version": "2.5.0.0-777",
- "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
- "agent_stack_retry_count": "5",
- "stack_version": "2.5",
- "jdk_name": "jdk-8u60-linux-x64.tar.gz",
- "ambari_db_rca_driver": "org.postgresql.Driver",
+ "host_sys_prepped": "false",
+ "ambari_db_rca_username": "mapred",
+ "current_version": "2.5.0.0-777",
+ "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+ "agent_stack_retry_count": "5",
+ "stack_version": "2.5",
+ "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+ "ambari_db_rca_driver": "org.postgresql.Driver",
"java_home": "/usr/jdk64/jdk1.7.0_45",
- "repository_version_id": "1",
- "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
- "not_managed_hdfs_path_list": "[\"/tmp\"]",
- "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
- "java_version": "8",
- "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-777\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-776\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
- "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
- "db_name": "ambari",
- "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
- "agentCacheDir": "/var/lib/ambari-agent/cache",
- "ambari_db_rca_password": "mapred",
+ "repository_version_id": "1",
+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+ "not_managed_hdfs_path_list": "[\"/tmp\"]",
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+ "java_version": "8",
+ "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-777\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-776\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+ "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
+ "db_name": "ambari",
+ "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+ "agentCacheDir": "/var/lib/ambari-agent/cache",
+ "ambari_db_rca_password": "mapred",
"jce_name": "UnlimitedJCEPolicyJDK7.zip",
- "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
- "db_driver_filename": "mysql-connector-java.jar",
- "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
+ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+ "db_driver_filename": "mysql-connector-java.jar",
+ "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
"clientsToUpdateConfigs": "[\"*\"]"
- },
+ },
"commandParams": {
- "service_package_folder": "common-services/RANGER/0.4.0/package",
- "script": "scripts/ranger_usersync.py",
- "hooks_folder": "HDP/2.0.6/hooks",
- "version": "2.5.0.0-777",
- "max_duration_for_retries": "0",
- "command_retry_enabled": "false",
- "command_timeout": "600",
+ "service_package_folder": "common-services/RANGER/0.4.0/package",
+ "script": "scripts/ranger_usersync.py",
+ "hooks_folder": "HDP/2.0.6/hooks",
+ "version": "2.5.0.0-777",
+ "max_duration_for_retries": "0",
+ "command_retry_enabled": "false",
+ "command_timeout": "600",
"script_type": "PYTHON"
- },
- "forceRefreshConfigTags": [],
- "stageId": 1,
+ },
+ "forceRefreshConfigTags": [],
+ "stageId": 1,
"clusterHostInfo": {
"snamenode_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"ambari_server_use_ssl": [
"false"
- ],
+ ],
"all_ping_ports": [
"8670"
- ],
+ ],
"ranger_tagsync_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"ranger_usersync_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"all_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"slave_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"namenode_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"ambari_server_port": [
"8080"
- ],
+ ],
"ranger_admin_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"all_racks": [
"/default-rack"
- ],
+ ],
"all_ipv4_ips": [
"172.22.125.4"
- ],
+ ],
"ambari_server_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"zookeeper_hosts": [
"c6401.ambari.apache.org"
],
"ranger_kms_server_hosts": [
"c6401.ambari.apache.org"
]
- },
+ },
"configurations": {
"ranger-hdfs-audit": {
"xasecure.audit.destination.solr.zookeepers": "c6401.ambari.apache.org:2181/ranger_audits",
- "xasecure.audit.destination.solr.urls": "",
- "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
- "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
+ "xasecure.audit.destination.solr.urls": "",
+ "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+ "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
"xasecure.audit.destination.hdfs": "true",
- "xasecure.audit.destination.solr": "true",
- "xasecure.audit.provider.summary.enabled": "false",
- "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+ "xasecure.audit.destination.solr": "true",
+ "xasecure.audit.provider.summary.enabled": "false",
+ "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
"xasecure.audit.is.enabled": "true"
- },
+ },
"ssl-client": {
- "ssl.client.truststore.reload.interval": "10000",
- "ssl.client.keystore.password": "bigdata",
- "ssl.client.truststore.type": "jks",
- "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
- "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
- "ssl.client.truststore.password": "bigdata",
+ "ssl.client.truststore.reload.interval": "10000",
+ "ssl.client.keystore.password": "bigdata",
+ "ssl.client.truststore.type": "jks",
+ "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+ "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+ "ssl.client.truststore.password": "bigdata",
"ssl.client.keystore.type": "jks"
- },
+ },
"ranger-admin-site": {
- "ranger.admin.kerberos.cookie.domain": "",
- "ranger.kms.service.user.hdfs": "hdfs",
- "ranger.spnego.kerberos.principal": "",
- "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
- "ranger.plugins.hive.serviceuser": "hive",
- "ranger.lookup.kerberos.keytab": "",
- "ranger.plugins.kms.serviceuser": "kms",
- "ranger.service.https.attrib.ssl.enabled": "false",
- "ranger.sso.browser.useragent": "Mozilla,chrome",
- "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
- "ranger.plugins.hbase.serviceuser": "hbase",
- "ranger.plugins.hdfs.serviceuser": "hdfs",
- "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
- "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
- "ranger.plugins.knox.serviceuser": "knox",
- "ranger.ldap.base.dn": "dc=example,dc=com",
- "ranger.sso.publicKey": "",
- "ranger.admin.kerberos.cookie.path": "/",
- "ranger.service.https.attrib.clientAuth": "want",
- "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
- "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
- "ranger.ldap.group.roleattribute": "cn",
- "ranger.plugins.kafka.serviceuser": "kafka",
- "ranger.admin.kerberos.principal": "",
- "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+ "ranger.admin.kerberos.cookie.domain": "",
+ "ranger.kms.service.user.hdfs": "hdfs",
+ "ranger.spnego.kerberos.principal": "",
+ "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+ "ranger.plugins.hive.serviceuser": "hive",
+ "ranger.lookup.kerberos.keytab": "",
+ "ranger.plugins.kms.serviceuser": "kms",
+ "ranger.service.https.attrib.ssl.enabled": "false",
+ "ranger.sso.browser.useragent": "Mozilla,chrome",
+ "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+ "ranger.plugins.hbase.serviceuser": "hbase",
+ "ranger.plugins.hdfs.serviceuser": "hdfs",
+ "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+ "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+ "ranger.plugins.knox.serviceuser": "knox",
+ "ranger.ldap.base.dn": "dc=example,dc=com",
+ "ranger.sso.publicKey": "",
+ "ranger.admin.kerberos.cookie.path": "/",
+ "ranger.service.https.attrib.clientAuth": "want",
+ "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+ "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+ "ranger.ldap.group.roleattribute": "cn",
+ "ranger.plugins.kafka.serviceuser": "kafka",
+ "ranger.admin.kerberos.principal": "",
+ "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
"ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
- "ranger.ldap.referral": "ignore",
- "ranger.service.http.port": "6080",
- "ranger.ldap.user.searchfilter": "(uid={0})",
- "ranger.plugins.atlas.serviceuser": "atlas",
- "ranger.truststore.password": "changeit",
- "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
- "ranger.audit.solr.password": "NONE",
- "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ranger_audits",
- "ranger.lookup.kerberos.principal": "",
- "ranger.service.https.port": "6182",
- "ranger.plugins.storm.serviceuser": "storm",
- "ranger.externalurl": "{{ranger_external_url}}",
- "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
- "ranger.kms.service.user.hive": "",
- "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
- "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
- "ranger.service.host": "{{ranger_host}}",
- "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
- "ranger.service.https.attrib.keystore.pass": "xasecure",
- "ranger.unixauth.remote.login.enabled": "true",
- "ranger.jpa.jdbc.credential.alias": "rangeradmin",
- "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
- "ranger.audit.solr.username": "ranger_solr",
- "ranger.sso.enabled": "false",
- "ranger.audit.solr.urls": "",
- "ranger.ldap.ad.domain": "",
- "ranger.plugins.yarn.serviceuser": "yarn",
- "ranger.audit.source.type": "solr",
- "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
- "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
- "ranger.authentication.method": "UNIX",
- "ranger.service.http.enabled": "true",
- "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
- "ranger.ldap.ad.referral": "ignore",
- "ranger.ldap.ad.base.dn": "dc=example,dc=com",
- "ranger.jpa.jdbc.password": "_",
- "ranger.spnego.kerberos.keytab": "",
- "ranger.sso.providerurl": "",
- "ranger.unixauth.service.hostname": "{{ugsync_host}}",
- "ranger.admin.kerberos.keytab": "",
- "ranger.admin.kerberos.token.valid.seconds": "30",
- "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
+ "ranger.ldap.referral": "ignore",
+ "ranger.service.http.port": "6080",
+ "ranger.ldap.user.searchfilter": "(uid={0})",
+ "ranger.plugins.atlas.serviceuser": "atlas",
+ "ranger.truststore.password": "changeit",
+ "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+ "ranger.audit.solr.password": "NONE",
+ "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ranger_audits",
+ "ranger.lookup.kerberos.principal": "",
+ "ranger.service.https.port": "6182",
+ "ranger.plugins.storm.serviceuser": "storm",
+ "ranger.externalurl": "{{ranger_external_url}}",
+ "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+ "ranger.kms.service.user.hive": "",
+ "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+ "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+ "ranger.service.host": "{{ranger_host}}",
+ "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+ "ranger.service.https.attrib.keystore.pass": "xasecure",
+ "ranger.unixauth.remote.login.enabled": "true",
+ "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+ "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+ "ranger.audit.solr.username": "ranger_solr",
+ "ranger.sso.enabled": "false",
+ "ranger.audit.solr.urls": "",
+ "ranger.ldap.ad.domain": "",
+ "ranger.plugins.yarn.serviceuser": "yarn",
+ "ranger.audit.source.type": "solr",
+ "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+ "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+ "ranger.authentication.method": "UNIX",
+ "ranger.service.http.enabled": "true",
+ "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+ "ranger.ldap.ad.referral": "ignore",
+ "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+ "ranger.jpa.jdbc.password": "_",
+ "ranger.spnego.kerberos.keytab": "",
+ "ranger.sso.providerurl": "",
+ "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+ "ranger.admin.kerberos.keytab": "",
+ "ranger.admin.kerberos.token.valid.seconds": "30",
+ "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
"ranger.unixauth.service.port": "5151"
- },
+ },
"ranger-hdfs-policymgr-ssl": {
- "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
- "xasecure.policymgr.clientssl.truststore.password": "changeit",
- "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
- "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
- "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+ "xasecure.policymgr.clientssl.truststore.password": "changeit",
+ "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+ "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
- },
+ },
"tagsync-application-properties": {
- "atlas.kafka.entities.group.id": "ranger_entities_consumer",
- "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+ "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+ "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
"atlas.kafka.bootstrap.servers": "localhost:6667"
- },
+ },
"ranger-env": {
- "ranger_solr_shards": "1",
- "ranger_solr_config_set": "ranger_audits",
- "ranger_user": "ranger",
- "xml_configurations_supported": "true",
- "ranger-atlas-plugin-enabled": "No",
- "ranger-hbase-plugin-enabled": "No",
- "ranger-yarn-plugin-enabled": "No",
- "bind_anonymous": "false",
- "ranger_admin_username": "amb_ranger_admin",
- "admin_password": "admin",
- "is_solrCloud_enabled": "true",
- "ranger-storm-plugin-enabled": "No",
- "ranger-hdfs-plugin-enabled": "No",
- "ranger_group": "ranger",
- "ranger-knox-plugin-enabled": "No",
- "ranger_admin_log_dir": "/var/log/ranger/admin",
- "ranger-kafka-plugin-enabled": "No",
- "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
- "ranger-hive-plugin-enabled": "No",
- "xasecure.audit.destination.solr": "true",
- "ranger_pid_dir": "/var/run/ranger",
- "xasecure.audit.destination.hdfs": "true",
- "admin_username": "admin",
- "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
- "create_db_dbuser": "true",
- "ranger_solr_collection_name": "ranger_audits",
- "ranger_admin_password": "P1!qLEQwP24KVlWY",
+ "ranger_solr_shards": "1",
+ "ranger_solr_config_set": "ranger_audits",
+ "ranger_user": "ranger",
+ "xml_configurations_supported": "true",
+ "ranger-atlas-plugin-enabled": "No",
+ "ranger-hbase-plugin-enabled": "No",
+ "ranger-yarn-plugin-enabled": "No",
+ "bind_anonymous": "false",
+ "ranger_admin_username": "amb_ranger_admin",
+ "admin_password": "admin",
+ "is_solrCloud_enabled": "true",
+ "ranger-storm-plugin-enabled": "No",
+ "ranger-hdfs-plugin-enabled": "No",
+ "ranger_group": "ranger",
+ "ranger-knox-plugin-enabled": "No",
+ "ranger_admin_log_dir": "/var/log/ranger/admin",
+ "ranger-kafka-plugin-enabled": "No",
+ "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+ "ranger-hive-plugin-enabled": "No",
+ "xasecure.audit.destination.solr": "true",
+ "ranger_pid_dir": "/var/run/ranger",
+ "xasecure.audit.destination.hdfs": "true",
+ "admin_username": "admin",
+ "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+ "create_db_dbuser": "true",
+ "ranger_solr_collection_name": "ranger_audits",
+ "ranger_admin_password": "P1!qLEQwP24KVlWY",
"ranger_usersync_log_dir": "/var/log/ranger/usersync"
- },
+ },
"usersync-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
- },
+ },
"admin-properties": {
- "db_user": "rangeradmin01",
- "DB_FLAVOR": "MYSQL",
- "db_password": "rangeradmin01",
- "db_root_user": "root",
- "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
- "db_name": "ranger01",
- "db_host": "c6401.ambari.apache.org",
- "db_root_password": "vagrant",
+ "db_user": "rangeradmin01",
+ "DB_FLAVOR": "MYSQL",
+ "db_password": "rangeradmin01",
+ "db_root_user": "root",
+ "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+ "db_name": "ranger01",
+ "db_host": "c6401.ambari.apache.org",
+ "db_root_password": "vagrant",
"SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
- },
+ },
"ranger-ugsync-site": {
- "ranger.usersync.ldap.binddn": "",
- "ranger.usersync.policymgr.username": "rangerusersync",
- "ranger.usersync.policymanager.mockrun": "false",
- "ranger.usersync.group.searchbase": "",
- "ranger.usersync.ldap.bindalias": "testldapalias",
- "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
- "ranger.usersync.port": "5151",
- "ranger.usersync.pagedresultssize": "500",
- "ranger.usersync.group.memberattributename": "",
- "ranger.usersync.kerberos.principal": "",
- "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
- "ranger.usersync.ldap.referral": "ignore",
- "ranger.usersync.group.searchfilter": "",
- "ranger.usersync.ldap.user.objectclass": "person",
- "ranger.usersync.logdir": "{{usersync_log_dir}}",
- "ranger.usersync.ldap.user.searchfilter": "",
- "ranger.usersync.ldap.groupname.caseconversion": "none",
- "ranger.usersync.ldap.ldapbindpassword": "",
- "ranger.usersync.unix.minUserId": "500",
- "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
- "ranger.usersync.group.nameattribute": "",
- "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
- "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
- "ranger.usersync.user.searchenabled": "false",
- "ranger.usersync.group.usermapsyncenabled": "true",
- "ranger.usersync.ldap.bindkeystore": "",
- "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
- "ranger.usersync.kerberos.keytab": "",
- "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
- "ranger.usersync.group.objectclass": "",
- "ranger.usersync.ldap.user.searchscope": "sub",
- "ranger.usersync.unix.password.file": "/etc/passwd",
- "ranger.usersync.ldap.user.nameattribute": "",
- "ranger.usersync.pagedresultsenabled": "true",
- "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
- "ranger.usersync.group.search.first.enabled": "false",
- "ranger.usersync.group.searchenabled": "false",
- "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
- "ranger.usersync.ssl": "true",
- "ranger.usersync.ldap.url": "",
- "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
- "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
- "ranger.usersync.ldap.user.searchbase": "",
- "ranger.usersync.ldap.username.caseconversion": "none",
- "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
- "ranger.usersync.keystore.password": "UnIx529p",
- "ranger.usersync.unix.group.file": "/etc/group",
- "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
- "ranger.usersync.group.searchscope": "",
- "ranger.usersync.truststore.password": "changeit",
- "ranger.usersync.enabled": "true",
- "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
+ "ranger.usersync.ldap.binddn": "",
+ "ranger.usersync.policymgr.username": "rangerusersync",
+ "ranger.usersync.policymanager.mockrun": "false",
+ "ranger.usersync.group.searchbase": "",
+ "ranger.usersync.ldap.bindalias": "testldapalias",
+ "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+ "ranger.usersync.port": "5151",
+ "ranger.usersync.pagedresultssize": "500",
+ "ranger.usersync.group.memberattributename": "",
+ "ranger.usersync.kerberos.principal": "",
+ "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+ "ranger.usersync.ldap.referral": "ignore",
+ "ranger.usersync.group.searchfilter": "",
+ "ranger.usersync.ldap.user.objectclass": "person",
+ "ranger.usersync.logdir": "{{usersync_log_dir}}",
+ "ranger.usersync.ldap.user.searchfilter": "",
+ "ranger.usersync.ldap.groupname.caseconversion": "none",
+ "ranger.usersync.ldap.ldapbindpassword": "",
+ "ranger.usersync.unix.minUserId": "500",
+ "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+ "ranger.usersync.group.nameattribute": "",
+ "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+ "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+ "ranger.usersync.user.searchenabled": "false",
+ "ranger.usersync.group.usermapsyncenabled": "true",
+ "ranger.usersync.ldap.bindkeystore": "",
+ "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+ "ranger.usersync.kerberos.keytab": "",
+ "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+ "ranger.usersync.group.objectclass": "",
+ "ranger.usersync.ldap.user.searchscope": "sub",
+ "ranger.usersync.unix.password.file": "/etc/passwd",
+ "ranger.usersync.ldap.user.nameattribute": "",
+ "ranger.usersync.pagedresultsenabled": "true",
+ "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+ "ranger.usersync.group.search.first.enabled": "false",
+ "ranger.usersync.group.searchenabled": "false",
+ "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+ "ranger.usersync.ssl": "true",
+ "ranger.usersync.ldap.url": "",
+ "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+ "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+ "ranger.usersync.ldap.user.searchbase": "",
+ "ranger.usersync.ldap.username.caseconversion": "none",
+ "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+ "ranger.usersync.keystore.password": "UnIx529p",
+ "ranger.usersync.unix.group.file": "/etc/group",
+ "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+ "ranger.usersync.group.searchscope": "",
+ "ranger.usersync.truststore.password": "changeit",
+ "ranger.usersync.enabled": "true",
+ "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
"ranger.usersync.filesource.text.delimiter": ","
- },
+ },
"hdfs-site": {
- "dfs.namenode.checkpoint.period": "21600",
- "dfs.namenode.avoid.write.stale.datanode": "true",
- "dfs.namenode.startup.delay.block.deletion.sec": "3600",
- "dfs.namenode.checkpoint.txns": "1000000",
- "dfs.content-summary.limit": "5000",
- "dfs.support.append": "true",
- "dfs.datanode.address": "0.0.0.0:50010",
- "dfs.cluster.administrators": " hdfs",
- "dfs.namenode.audit.log.async": "true",
- "dfs.datanode.balance.bandwidthPerSec": "6250000",
- "dfs.namenode.safemode.threshold-pct": "1",
- "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
- "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
- "dfs.permissions.enabled": "true",
- "dfs.client.read.shortcircuit": "true",
- "dfs.https.port": "50470",
- "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
- "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
- "dfs.blocksize": "134217728",
- "dfs.blockreport.initialDelay": "120",
- "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
- "dfs.namenode.fslock.fair": "false",
- "dfs.datanode.max.transfer.threads": "4096",
- "dfs.heartbeat.interval": "3",
- "dfs.replication": "3",
- "dfs.namenode.handler.count": "50",
- "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
- "fs.permissions.umask-mode": "022",
- "dfs.namenode.stale.datanode.interval": "30000",
- "dfs.datanode.ipc.address": "0.0.0.0:8010",
- "dfs.datanode.failed.volumes.tolerated": "0",
- "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
- "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
- "dfs.webhdfs.enabled": "true",
- "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
- "dfs.namenode.accesstime.precision": "0",
- "dfs.datanode.https.address": "0.0.0.0:50475",
- "dfs.namenode.write.stale.datanode.ratio": "1.0f",
- "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
- "nfs.exports.allowed.hosts": "* rw",
- "dfs.datanode.http.address": "0.0.0.0:50075",
- "dfs.datanode.du.reserved": "33011188224",
- "dfs.client.read.shortcircuit.streams.cache.size": "4096",
- "dfs.http.policy": "HTTP_ONLY",
- "dfs.block.access.token.enable": "true",
- "dfs.client.retry.policy.enabled": "false",
- "dfs.namenode.name.dir.restore": "true",
- "dfs.permissions.superusergroup": "hdfs",
- "dfs.journalnode.https-address": "0.0.0.0:8481",
- "dfs.journalnode.http-address": "0.0.0.0:8480",
- "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
- "dfs.namenode.avoid.read.stale.datanode": "true",
- "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
- "dfs.datanode.data.dir.perm": "750",
- "dfs.encryption.key.provider.uri": "",
- "dfs.replication.max": "50",
+ "dfs.namenode.checkpoint.period": "21600",
+ "dfs.namenode.avoid.write.stale.datanode": "true",
+ "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+ "dfs.namenode.checkpoint.txns": "1000000",
+ "dfs.content-summary.limit": "5000",
+ "dfs.support.append": "true",
+ "dfs.datanode.address": "0.0.0.0:50010",
+ "dfs.cluster.administrators": " hdfs",
+ "dfs.namenode.audit.log.async": "true",
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
+ "dfs.namenode.safemode.threshold-pct": "1",
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+ "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+ "dfs.permissions.enabled": "true",
+ "dfs.client.read.shortcircuit": "true",
+ "dfs.https.port": "50470",
+ "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+ "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+ "dfs.blocksize": "134217728",
+ "dfs.blockreport.initialDelay": "120",
+ "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+ "dfs.namenode.fslock.fair": "false",
+ "dfs.datanode.max.transfer.threads": "4096",
+ "dfs.heartbeat.interval": "3",
+ "dfs.replication": "3",
+ "dfs.namenode.handler.count": "50",
+ "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+ "fs.permissions.umask-mode": "022",
+ "dfs.namenode.stale.datanode.interval": "30000",
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
+ "dfs.datanode.failed.volumes.tolerated": "0",
+ "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+ "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+ "dfs.webhdfs.enabled": "true",
+ "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+ "dfs.namenode.accesstime.precision": "0",
+ "dfs.datanode.https.address": "0.0.0.0:50475",
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+ "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+ "nfs.exports.allowed.hosts": "* rw",
+ "dfs.datanode.http.address": "0.0.0.0:50075",
+ "dfs.datanode.du.reserved": "33011188224",
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+ "dfs.http.policy": "HTTP_ONLY",
+ "dfs.block.access.token.enable": "true",
+ "dfs.client.retry.policy.enabled": "false",
+ "dfs.namenode.name.dir.restore": "true",
+ "dfs.permissions.superusergroup": "hdfs",
+ "dfs.journalnode.https-address": "0.0.0.0:8481",
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+ "dfs.namenode.avoid.read.stale.datanode": "true",
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+ "dfs.datanode.data.dir.perm": "750",
+ "dfs.encryption.key.provider.uri": "",
+ "dfs.replication.max": "50",
"dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
- },
+ },
"ranger-tagsync-site": {
- "ranger.tagsync.atlas.to.ranger.service.mapping": "",
- "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
- "ranger.tagsync.source.file.check.interval.millis": "",
- "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
- "ranger.tagsync.source.atlasrest.download.interval.millis": "",
- "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
- "ranger.tagsync.source.atlasrest.endpoint": "",
- "ranger.tagsync.dest.ranger.username": "rangertagsync",
- "ranger.tagsync.kerberos.principal": "",
- "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
- "ranger.tagsync.atlas.custom.resource.mappers": "",
- "ranger.tagsync.kerberos.keytab": "",
- "ranger.tagsync.source.atlas": "false",
- "ranger.tagsync.source.atlasrest": "false",
- "ranger.tagsync.source.file": "false",
+ "ranger.tagsync.atlas.to.ranger.service.mapping": "",
+ "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+ "ranger.tagsync.source.file.check.interval.millis": "",
+ "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+ "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+ "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+ "ranger.tagsync.source.atlasrest.endpoint": "",
+ "ranger.tagsync.dest.ranger.username": "rangertagsync",
+ "ranger.tagsync.kerberos.principal": "",
+ "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+ "ranger.tagsync.atlas.custom.resource.mappers": "",
+ "ranger.tagsync.kerberos.keytab": "",
+ "ranger.tagsync.source.atlas": "false",
+ "ranger.tagsync.source.atlasrest": "false",
+ "ranger.tagsync.source.file": "false",
"ranger.tagsync.source.file.filename": ""
- },
+ },
"zoo.cfg": {
- "clientPort": "2181",
- "autopurge.purgeInterval": "24",
- "syncLimit": "5",
- "dataDir": "/grid/0/hadoop/zookeeper",
- "initLimit": "10",
- "tickTime": "2000",
+ "clientPort": "2181",
+ "autopurge.purgeInterval": "24",
+ "syncLimit": "5",
+ "dataDir": "/grid/0/hadoop/zookeeper",
+ "initLimit": "10",
+ "tickTime": "2000",
"autopurge.snapRetainCount": "30"
- },
+ },
"hadoop-policy": {
- "security.job.client.protocol.acl": "*",
- "security.job.task.protocol.acl": "*",
- "security.datanode.protocol.acl": "*",
- "security.namenode.protocol.acl": "*",
- "security.client.datanode.protocol.acl": "*",
- "security.inter.tracker.protocol.acl": "*",
- "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
- "security.client.protocol.acl": "*",
- "security.refresh.policy.protocol.acl": "hadoop",
- "security.admin.operations.protocol.acl": "hadoop",
+ "security.job.client.protocol.acl": "*",
+ "security.job.task.protocol.acl": "*",
+ "security.datanode.protocol.acl": "*",
+ "security.namenode.protocol.acl": "*",
+ "security.client.datanode.protocol.acl": "*",
+ "security.inter.tracker.protocol.acl": "*",
+ "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+ "security.client.protocol.acl": "*",
+ "security.refresh.policy.protocol.acl": "hadoop",
+ "security.admin.operations.protocol.acl": "hadoop",
"security.inter.datanode.protocol.acl": "*"
- },
+ },
"hdfs-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
ange=WARN"
- },
+ },
"ranger-hdfs-plugin-properties": {
- "hadoop.rpc.protection": "authentication",
- "ranger-hdfs-plugin-enabled": "No",
- "REPOSITORY_CONFIG_USERNAME": "hadoop",
- "policy_user": "ambari-qa",
- "common.name.for.certificate": "",
+ "hadoop.rpc.protection": "authentication",
+ "ranger-hdfs-plugin-enabled": "No",
+ "REPOSITORY_CONFIG_USERNAME": "hadoop",
+ "policy_user": "ambari-qa",
+ "common.name.for.certificate": "",
"REPOSITORY_CONFIG_PASSWORD": "hadoop"
- },
+ },
"core-site": {
- "hadoop.proxyuser.root.hosts": "*",
- "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
- "fs.trash.interval": "360",
- "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
- "hadoop.http.authentication.simple.anonymous.allowed": "true",
- "hadoop.security.authentication": "simple",
- "hadoop.proxyuser.root.groups": "*",
- "ipc.client.connection.maxidletime": "30000",
- "hadoop.security.key.provider.path": "",
- "mapreduce.jobtracker.webinterface.trusted": "false",
- "hadoop.security.authorization": "false",
- "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
- "ipc.server.tcpnodelay": "true",
- "ipc.client.connect.max.retries": "50",
- "hadoop.security.auth_to_local": "DEFAULT",
- "io.file.buffer.size": "131072",
- "hadoop.proxyuser.hdfs.hosts": "*",
- "hadoop.proxyuser.hdfs.groups": "*",
- "ipc.client.idlethreshold": "8000",
- "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+ "hadoop.proxyuser.root.hosts": "*",
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+ "fs.trash.interval": "360",
+ "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+ "hadoop.http.authentication.simple.anonymous.allowed": "true",
+ "hadoop.security.authentication": "simple",
+ "hadoop.proxyuser.root.groups": "*",
+ "ipc.client.connection.maxidletime": "30000",
+ "hadoop.security.key.provider.path": "",
+ "mapreduce.jobtracker.webinterface.trusted": "false",
+ "hadoop.security.authorization": "false",
+ "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+ "ipc.server.tcpnodelay": "true",
+ "ipc.client.connect.max.retries": "50",
+ "hadoop.security.auth_to_local": "DEFAULT",
+ "io.file.buffer.size": "131072",
+ "hadoop.proxyuser.hdfs.hosts": "*",
+ "hadoop.proxyuser.hdfs.groups": "*",
+ "ipc.client.idlethreshold": "8000",
+ "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
"io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
- },
+ },
"hadoop-env": {
- "keyserver_port": "",
- "proxyuser_group": "users",
- "hdfs_user_nproc_limit": "65536",
- "hdfs_log_dir_prefix": "/var/log/hadoop",
- "hdfs_user_nofile_limit": "128000",
- "hdfs_user": "hdfs",
- "keyserver_host": " ",
- "namenode_opt_maxnewsize": "128m",
- "namenode_opt_maxpermsize": "256m",
- "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
N_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
ing priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n do\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
ECURE_DN_USER\" ]; then\n ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
- "namenode_heapsize": "1024m",
- "namenode_opt_newsize": "128m",
- "nfsgateway_heapsize": "1024",
- "dtnode_heapsize": "1024m",
- "hadoop_root_logger": "INFO,RFA",
- "hadoop_heapsize": "1024",
- "hadoop_pid_dir_prefix": "/var/run/hadoop",
- "namenode_opt_permsize": "128m",
+ "keyserver_port": "",
+ "proxyuser_group": "users",
+ "hdfs_user_nproc_limit": "65536",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
+ "hdfs_user_nofile_limit": "128000",
+ "hdfs_user": "hdfs",
+ "keyserver_host": " ",
+ "namenode_opt_maxnewsize": "128m",
+ "namenode_opt_maxpermsize": "256m",
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
N_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
ing priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n do\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
ECURE_DN_USER\" ]; then\n ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+ "namenode_heapsize": "1024m",
+ "namenode_opt_newsize": "128m",
+ "nfsgateway_heapsize": "1024",
+ "dtnode_heapsize": "1024m",
+ "hadoop_root_logger": "INFO,RFA",
+ "hadoop_heapsize": "1024",
+ "hadoop_pid_dir_prefix": "/var/run/hadoop",
+ "namenode_opt_permsize": "128m",
"hdfs_tmp_dir": "/tmp"
- },
+ },
"zookeeper-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
- },
+ },
"ssl-server": {
- "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
- "ssl.server.keystore.keypassword": "bigdata",
- "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
- "ssl.server.keystore.password": "bigdata",
- "ssl.server.truststore.password": "bigdata",
- "ssl.server.truststore.type": "jks",
- "ssl.server.keystore.type": "jks",
+ "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
+ "ssl.server.keystore.keypassword": "bigdata",
+ "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
+ "ssl.server.keystore.password": "bigdata",
+ "ssl.server.truststore.password": "bigdata",
+ "ssl.server.truststore.type": "jks",
+ "ssl.server.keystore.type": "jks",
"ssl.server.truststore.reload.interval": "10000"
- },
- "ranger-site": {},
+ },
+ "ranger-site": {},
"admin-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t]
%m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
- },
+ },
"tagsync-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
.log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
- },
+ },
"ranger-hdfs-security": {
- "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
- "ranger.plugin.hdfs.service.name": "{{repo_name}}",
- "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
- "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
- "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
- "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
+ "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+ "ranger.plugin.hdfs.service.name": "{{repo_name}}",
+ "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+ "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
+ "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
+ "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
"xasecure.add-hadoop-authorization": "true"
- },
- "usersync-properties": {},
+ },
+ "usersync-properties": {},
"zookeeper-env": {
- "zk_log_dir": "/var/log/zookeeper",
- "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
- "zk_server_heapsize": "1024m",
- "zk_pid_dir": "/var/run/zookeeper",
+ "zk_log_dir": "/var/log/zookeeper",
+ "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk
<TRUNCATED>
[10/18] ambari git commit: AMBARI-21430 - Allow Multiple Versions of
Stack Tools to Co-Exist (jonathanhurley)
Posted by rl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
index 535b9d9..62562f8 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
@@ -1,4 +1,14 @@
{
- "stack_selector": ["distro-select", "/usr/bin/distro-select", "distro-select"],
- "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file
+ "PERF": {
+ "stack_selector": [
+ "distro-select",
+ "/usr/bin/distro-select",
+ "distro-select"
+ ],
+ "conf_selector": [
+ "conf-select",
+ "/usr/bin/conf-select",
+ "conf-select"
+ ]
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index ca579ea..bade238 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -46,9 +46,11 @@ import org.apache.ambari.server.controller.KerberosHelper;
import org.apache.ambari.server.controller.StackConfigurationResponse;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.PropertyDependencyInfo;
import org.apache.ambari.server.state.PropertyInfo;
import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.ValueAttributesInfo;
import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
import org.apache.ambari.server.topology.AdvisedConfiguration;
@@ -66,6 +68,7 @@ import org.apache.ambari.server.topology.HostGroupInfo;
import org.apache.ambari.server.topology.InvalidTopologyException;
import org.apache.ambari.server.topology.TopologyRequest;
import org.apache.commons.lang.StringUtils;
+import org.easymock.EasyMock;
import org.easymock.EasyMockRule;
import org.easymock.EasyMockSupport;
import org.easymock.Mock;
@@ -95,6 +98,10 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
private static final Configuration EMPTY_CONFIG = new Configuration(Collections.<String, Map<String, String>>emptyMap(), Collections.<String, Map<String, Map<String, String>>>emptyMap());
private final Map<String, Collection<String>> serviceComponents = new HashMap<>();
+ private final Map<String, Map<String, String>> stackProperties = new HashMap<>();
+
+ private final String STACK_NAME = "testStack";
+ private final String STACK_VERSION = "1";
@Rule
public EasyMockRule mocks = new EasyMockRule(this);
@@ -129,13 +136,16 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
@Mock
private TopologyRequest topologyRequestMock;
+ @Mock(type = MockType.NICE)
+ private ConfigHelper configHelper;
+
@Before
public void init() throws Exception {
expect(bp.getStack()).andReturn(stack).anyTimes();
expect(bp.getName()).andReturn("test-bp").anyTimes();
- expect(stack.getName()).andReturn("testStack").anyTimes();
- expect(stack.getVersion()).andReturn("1").anyTimes();
+ expect(stack.getName()).andReturn(STACK_NAME).atLeastOnce();
+ expect(stack.getVersion()).andReturn(STACK_VERSION).atLeastOnce();
// return false for all components since for this test we don't care about the value
expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes();
expect(stack.getConfigurationPropertiesWithMetadata(anyObject(String.class), anyObject(String.class))).andReturn(Collections.<String, Stack.ConfigProperty>emptyMap()).anyTimes();
@@ -225,6 +235,11 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
Set<String> emptySet = Collections.emptySet();
expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
+
+ expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+ expect(configHelper.getDefaultStackProperties(
+ EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
expect(ambariContext.isClusterKerberosEnabled(1)).andReturn(true).once();
expect(ambariContext.getClusterName(1L)).andReturn("clusterName").anyTimes();
PowerMock.mockStatic(AmbariServer.class);
@@ -234,14 +249,14 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
expect(controller.getKerberosHelper()).andReturn(kerberosHelper).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
expect(kerberosHelper.getKerberosDescriptor(cluster)).andReturn(kerberosDescriptor).anyTimes();
- Set<String> properties = new HashSet<String>();
+ Set<String> properties = new HashSet<>();
properties.add("core-site/hadoop.security.auth_to_local");
expect(kerberosDescriptor.getAllAuthToLocalProperties()).andReturn(properties).anyTimes();
}
@After
public void tearDown() {
- reset(bp, serviceInfo, stack, ambariContext);
+ reset(bp, serviceInfo, stack, ambariContext, configHelper);
}
@Test
@@ -6322,13 +6337,16 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
reset(stack);
+ expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+ expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes();
Set<String> emptySet = Collections.emptySet();
expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
-
replay(stack);
+
// WHEN
Set<String> configTypeUpdated = configProcessor.doUpdateForClusterCreate();
// THEN
@@ -6379,13 +6397,17 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
reset(stack);
+ expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+ expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes();
Set<String> emptySet = Collections.emptySet();
expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
replay(stack);
+
// WHEN
configProcessor.doUpdateForClusterCreate();
// THEN
@@ -8050,6 +8072,10 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
@Test
public void testValuesTrimming() throws Exception {
reset(stack);
+
+ expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+ expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
+
Map<String, Map<String, String>> properties = new HashMap<>();
Map<String, String> hdfsSite = new HashMap<>();
@@ -8073,6 +8099,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, Collections.singleton(PropertyInfo.PropertyType.PASSWORD), null, null, null)));
propertyConfigs.put("test.host", new Stack.ConfigProperty(
new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, null, null, valueAttributesInfoHost, null)));
+
expect(stack.getServiceForConfigType("hdfs-site")).andReturn("HDFS").anyTimes();
expect(stack.getConfigurationPropertiesWithMetadata("HDFS", "hdfs-site")).andReturn(propertyConfigs).anyTimes();
@@ -8144,7 +8171,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
throws InvalidTopologyException {
- replay(stack, serviceInfo, ambariContext, controller, kerberosHelper, kerberosDescriptor, clusters, cluster);
+ replay(stack, serviceInfo, ambariContext, configHelper, controller, kerberosHelper, kerberosDescriptor, clusters, cluster);
Map<String, HostGroupInfo> hostGroupInfo = new HashMap<>();
Collection<String> allServices = new HashSet<>();
@@ -8207,7 +8234,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
this.name = name;
this.components = components;
this.hosts = hosts;
- this.configuration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
+ configuration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
Collections.<String, Map<String, Map<String, String>>>emptyMap());
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index 32a5358..39aee82 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -1414,8 +1414,8 @@ public class ClusterStackVersionResourceProviderTest {
expect(cluster.getClusterId()).andReturn(1L).anyTimes();
expect(cluster.getHosts()).andReturn(hostsForCluster.values()).atLeastOnce();
expect(cluster.getServices()).andReturn(serviceMap).anyTimes();
- expect(cluster.getServiceComponentHosts(anyObject(String.class))).andReturn(
- serviceComponentHosts).anyTimes();
+ expect(cluster.getServiceComponentHosts(anyObject(String.class))).andReturn(serviceComponentHosts).anyTimes();
+ expect(cluster.getCurrentStackVersion()).andReturn(stackId).atLeastOnce();
expect(repositoryVersionDAOMock.findByStackAndVersion(anyObject(StackId.class),
anyObject(String.class))).andReturn(repoVersionEntity);
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
index 8b08dc4..5535256 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
@@ -50,6 +50,8 @@ import org.apache.ambari.server.controller.internal.Stack;
import org.apache.ambari.server.serveraction.kerberos.KerberosInvalidConfigurationException;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.StackId;
import org.easymock.Capture;
import org.easymock.CaptureType;
import org.easymock.EasyMock;
@@ -103,6 +105,13 @@ public class ClusterConfigurationRequestTest {
@Mock(type = MockType.NICE)
private KerberosHelper kerberosHelper;
+ @Mock(type = MockType.NICE)
+ private ConfigHelper configHelper;
+
+ private final String STACK_NAME = "testStack";
+ private final String STACK_VERSION = "1";
+ private final Map<String, Map<String, String>> stackProperties = new HashMap<>();
+
/**
* testConfigType config type should be in updatedConfigTypes, as no custom property in Blueprint
* ==> Kerberos config property should be updated
@@ -221,6 +230,8 @@ public class ClusterConfigurationRequestTest {
expect(clusters.getCluster("testCluster")).andReturn(cluster).anyTimes();
expect(blueprint.getStack()).andReturn(stack).anyTimes();
+ expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+ expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
expect(stack.getServiceForConfigType("testConfigType")).andReturn("KERBEROS").anyTimes();
expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.singletonList("testConfigType")
).anyTimes();
@@ -246,6 +257,7 @@ public class ClusterConfigurationRequestTest {
expect(blueprint.getComponents("KERBEROS")).andReturn(kerberosComponents).anyTimes();
expect(blueprint.getComponents("ZOOKEPER")).andReturn(zookeeperComponents).anyTimes();
+ expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
expect(blueprint.isValidConfigType("testConfigType")).andReturn(true).anyTimes();
@@ -256,10 +268,14 @@ public class ClusterConfigurationRequestTest {
expect(topology.getHostGroupsForComponent(anyString())).andReturn(Collections.<String>emptyList())
.anyTimes();
- expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
+ expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+ expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
expect(ambariContext.createConfigurationRequests(EasyMock.<Map<String, Object>>anyObject())).andReturn(Collections
.<ConfigurationRequest>emptyList()).anyTimes();
+ expect(configHelper.getDefaultStackProperties(
+ EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
if (kerberosConfig == null) {
kerberosConfig = new HashMap<>();
Map<String, String> properties = new HashMap<>();
@@ -277,15 +293,14 @@ public class ClusterConfigurationRequestTest {
(captureUpdatedConfigTypes));
expectLastCall();
- PowerMock.replay(stack, blueprint, topology, controller, clusters, kerberosHelper, ambariContext,
- AmbariContext
- .class);
+ PowerMock.replay(stack, blueprint, topology, controller, clusters, kerberosHelper,
+ ambariContext, AmbariContext.class, configHelper);
ClusterConfigurationRequest clusterConfigurationRequest = new ClusterConfigurationRequest(
ambariContext, topology, false, stackAdvisorBlueprintProcessor, true);
clusterConfigurationRequest.process();
- verify(blueprint, topology, ambariContext, controller, kerberosHelper);
+ verify(blueprint, topology, ambariContext, controller, kerberosHelper, configHelper);
String clusterName = captureClusterName.getValue();
@@ -308,8 +323,9 @@ public class ClusterConfigurationRequestTest {
expect(clusters.getCluster("testCluster")).andReturn(cluster).anyTimes();
expect(blueprint.getStack()).andReturn(stack).anyTimes();
- expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.singletonList("testConfigType")
- ).anyTimes();
+ expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+ expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
+ expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.<String>singletonList("testConfigType")).anyTimes();
expect(stack.getExcludedConfigurationTypes(anyString())).andReturn(Collections.<String>emptySet()).anyTimes();
expect(stack.getConfigurationPropertiesWithMetadata(anyString(), anyString())).andReturn(Collections.<String,
Stack.ConfigProperty>emptyMap()).anyTimes();
@@ -331,25 +347,29 @@ public class ClusterConfigurationRequestTest {
expect(blueprint.getComponents("KERBEROS")).andReturn(kerberosComponents).anyTimes();
expect(blueprint.getComponents("ZOOKEPER")).andReturn(zookeeperComponents).anyTimes();
+ expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
expect(topology.getConfiguration()).andReturn(stackConfig).anyTimes();
expect(topology.getHostGroupInfo()).andReturn(Collections.<String, HostGroupInfo>emptyMap()).anyTimes();
expect(topology.getClusterId()).andReturn(Long.valueOf(1)).anyTimes();
+
+ expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
expect(ambariContext.createConfigurationRequests(EasyMock.<Map<String, Object>>anyObject())).andReturn(Collections
.<ConfigurationRequest>emptyList()).anyTimes();
+ expect(configHelper.getDefaultStackProperties(
+ EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
PowerMock.replay(stack, blueprint, topology, controller, clusters, ambariContext,
- AmbariContext
- .class);
+ AmbariContext.class, configHelper);
ClusterConfigurationRequest clusterConfigurationRequest = new ClusterConfigurationRequest(
ambariContext, topology, false, stackAdvisorBlueprintProcessor);
clusterConfigurationRequest.process();
- verify(blueprint, topology, ambariContext, controller);
+ verify(blueprint, topology, ambariContext, controller, configHelper);
}
@@ -365,6 +385,7 @@ public class ClusterConfigurationRequestTest {
hg1.setConfiguration(createConfigurationsForHostGroup());
hostGroupInfoMap.put("hg1", hg1);
+ expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
expect(topology.getConfiguration()).andReturn(configuration).anyTimes();
expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
@@ -377,7 +398,12 @@ public class ClusterConfigurationRequestTest {
expect(blueprint.isValidConfigType("cluster-env")).andReturn(true).anyTimes();
expect(blueprint.isValidConfigType("global")).andReturn(true).anyTimes();
- EasyMock.replay(stack, blueprint, topology);
+ expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+
+ expect(configHelper.getDefaultStackProperties(
+ EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
+ EasyMock.replay(stack, blueprint, topology, ambariContext, configHelper);
// WHEN
new ClusterConfigurationRequest(ambariContext, topology, false, stackAdvisorBlueprintProcessor);
// THEN
@@ -388,7 +414,7 @@ public class ClusterConfigurationRequestTest {
assertFalse("SPARK service not present in topology host group config thus 'spark-env' config type should be removed from config.", hg1.getConfiguration().getFullAttributes().containsKey("spark-env"));
assertTrue("HDFS service is present in topology host group config thus 'hdfs-site' config type should be left in the config.", hg1.getConfiguration().getFullAttributes().containsKey("hdfs-site"));
- verify(stack, blueprint, topology);
+ verify(stack, blueprint, topology, ambariContext, configHelper);
}
@Test
@@ -409,6 +435,7 @@ public class ClusterConfigurationRequestTest {
hg1.setConfiguration(createConfigurationsForHostGroup());
hostGroupInfoMap.put("hg1", hg1);
+ expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
expect(topology.getConfiguration()).andReturn(configuration).anyTimes();
expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
@@ -419,7 +446,12 @@ public class ClusterConfigurationRequestTest {
expect(blueprint.isValidConfigType("cluster-env")).andReturn(true).anyTimes();
expect(blueprint.isValidConfigType("global")).andReturn(true).anyTimes();
- EasyMock.replay(stack, blueprint, topology);
+ expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+
+ expect(configHelper.getDefaultStackProperties(
+ EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
+ EasyMock.replay(stack, blueprint, topology, ambariContext, configHelper);
// When
@@ -431,7 +463,7 @@ public class ClusterConfigurationRequestTest {
assertFalse("SPARK service not present in topology host group config thus 'spark-env' config type should be removed from config.", hg1.getConfiguration().getFullAttributes().containsKey("spark-env"));
assertTrue("HDFS service is present in topology host group config thus 'hdfs-site' config type should be left in the config.", hg1.getConfiguration().getFullAttributes().containsKey("hdfs-site"));
- verify(stack, blueprint, topology);
+ verify(stack, blueprint, topology, ambariContext, configHelper);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/common-services/configs/hawq_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/common-services/configs/hawq_default.json b/ambari-server/src/test/python/common-services/configs/hawq_default.json
index 79864a9..1b6fafb 100644
--- a/ambari-server/src/test/python/common-services/configs/hawq_default.json
+++ b/ambari-server/src/test/python/common-services/configs/hawq_default.json
@@ -73,7 +73,11 @@
"cluster-env": {
"managed_hdfs_resource_property_names": "",
"security_enabled": "false",
- "user_group": "hadoop"
+ "user_group": "hadoop",
+ "stack_name": "PHD",
+ "stack_root": "{\"PHD\": \"/usr/phd\"}",
+ "stack_tools": "{\n \"PHD\": { \"stack_selector\": [\"phd-select\", \"/usr/bin/phd-select\", \"phd-select\"],\n \"conf_selector\": [\"conf-select\", \"/usr/bin/conf-select\", \"conf-select\"]\n}\n}",
+ "stack_features": "{\"PHD\":{\"stack_features\":[{\"name\":\"express_upgrade\",\"description\":\"Express upgrade support\",\"min_version\":\"3.0.0.0\"},{\"name\":\"rolling_upgrade\",\"description\":\"Rolling upgrade support\",\"min_version\":\"3.0.0.0\"},{\"name\":\"config_versioning\",\"description\":\"Configurable versions support\",\"min_version\":\"3.0.0.0\"}]\n}\n}"
}
},
"clusterHostInfo": {
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py b/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
index 0d47061..e6cce98 100644
--- a/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
+++ b/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
@@ -41,7 +41,11 @@ class TestAlertDiskSpace(RMFTestCase):
total = 21673930752L, used = 5695861760L,
free = 15978068992L, path="/")
- res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+ configurations = {'{{cluster-env/stack_name}}': 'HDP',
+ '{{cluster-env/stack_root}}': '{"HDP":"/usr/hdp"}'}
+
+ res = alert_disk_space.execute(configurations=configurations)
+
self.assertEqual(res,
('OK', ['Capacity Used: [26.28%, 5.7 GB], Capacity Total: [21.7 GB], path=/']))
@@ -50,7 +54,7 @@ class TestAlertDiskSpace(RMFTestCase):
total = 21673930752L, used = 14521533603L,
free = 7152397149L, path="/")
- res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+ res = alert_disk_space.execute(configurations = configurations)
self.assertEqual(res, (
'WARNING',
['Capacity Used: [67.00%, 14.5 GB], Capacity Total: [21.7 GB], path=/']))
@@ -60,7 +64,7 @@ class TestAlertDiskSpace(RMFTestCase):
total = 21673930752L, used = 20590234214L,
free = 1083696538, path="/")
- res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+ res = alert_disk_space.execute(configurations = configurations)
self.assertEqual(res, ('CRITICAL',
['Capacity Used: [95.00%, 20.6 GB], Capacity Total: [21.7 GB], path=/']))
@@ -69,7 +73,7 @@ class TestAlertDiskSpace(RMFTestCase):
total = 5418482688L, used = 1625544806L,
free = 3792937882L, path="/")
- res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+ res = alert_disk_space.execute(configurations = configurations)
self.assertEqual(res, ('WARNING', [
'Capacity Used: [30.00%, 1.6 GB], Capacity Total: [5.4 GB], path=/. Total free space is less than 5.0 GB']))
@@ -81,7 +85,7 @@ class TestAlertDiskSpace(RMFTestCase):
total = 21673930752L, used = 5695861760L,
free = 15978068992L, path="/usr/hdp")
- res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+ res = alert_disk_space.execute(configurations = configurations)
self.assertEqual(res,
('OK', ['Capacity Used: [26.28%, 5.7 GB], Capacity Total: [21.7 GB], path=/usr/hdp']))
@@ -90,6 +94,6 @@ class TestAlertDiskSpace(RMFTestCase):
total = 5418482688L, used = 1625544806L,
free = 3792937882L, path="/usr/hdp")
- res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+ res = alert_disk_space.execute(configurations = configurations)
self.assertEqual(res, (
'WARNING', ["Capacity Used: [30.00%, 1.6 GB], Capacity Total: [5.4 GB], path=/usr/hdp. Total free space is less than 5.0 GB"]))
[15/18] ambari git commit: AMBARI-21210 ADDENDUM Add ability to Log
Search to test a log entry if it is parseable (mgergely)
Posted by rl...@apache.org.
AMBARI-21210 ADDENDUM Add ability to Log Search to test a log entry if it is parseable (mgergely)
Change-Id: Icb847dc5cc9b6f63eb02cffe8046c78be0e585dc
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c0882898
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c0882898
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c0882898
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: c0882898deed4b6f0ecbd6f12cd935dc6b75cfdf
Parents: 3c9f125
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Mon Jul 10 14:45:41 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Mon Jul 10 14:45:41 2017 +0200
----------------------------------------------------------------------
.../org/apache/ambari/logfeeder/common/LogEntryParseTester.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/c0882898/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java
index 97bc3a2..5356159 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java
@@ -76,7 +76,7 @@ public class LogEntryParseTester {
ConfigHandler configHandler = new ConfigHandler();
Input input = configHandler.getTestInput(inputConfig, logId);
final Map<String, Object> result = new HashMap<>();
- input.init();
+ input.getFirstFilter().init();
input.addOutput(new Output() {
@Override
public void write(String block, InputMarker inputMarker) throws Exception {
[14/18] ambari git commit: AMBARI-21427. Assigning hosts concurrently
to same config group may fail with
'org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException:
Config group already exist'. (stoader)
Posted by rl...@apache.org.
AMBARI-21427. Assigning hosts concurrently to same config group may fail with 'org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist'. (stoader)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3c9f125c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3c9f125c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3c9f125c
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 3c9f125cc08269558f35a971c321777d331de1ca
Parents: 7f3d3b2
Author: Toader, Sebastian <st...@hortonworks.com>
Authored: Mon Jul 10 13:02:20 2017 +0200
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Mon Jul 10 13:02:45 2017 +0200
----------------------------------------------------------------------
.../ambari/server/topology/AmbariContext.java | 28 +++++++++++++++++---
1 file changed, 24 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/3c9f125c/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 9b64edc..dee0e6c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -30,6 +30,7 @@ import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
import javax.annotation.Nullable;
import javax.inject.Inject;
@@ -81,6 +82,7 @@ import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Striped;
import com.google.inject.Provider;
@@ -121,6 +123,16 @@ public class AmbariContext {
private final static Logger LOG = LoggerFactory.getLogger(AmbariContext.class);
+
+ /**
+ * When config groups are created using Blueprints these are created when
+ * hosts join a hostgroup and are added to the corresponding config group.
+ * Since hosts join in parallel there might be a race condition in creating
+ * the config group a host is to be added to. Thus we need to synchronize
+ * the creation of config groups with the same name.
+ */
+ private Striped<Lock> configGroupCreateLock = Striped.lazyWeakLock(1);
+
public boolean isClusterKerberosEnabled(long clusterId) {
Cluster cluster;
try {
@@ -341,11 +353,17 @@ public class AmbariContext {
}
public void registerHostWithConfigGroup(final String hostName, final ClusterTopology topology, final String groupName) {
+ String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
+
+ Lock configGroupLock = configGroupCreateLock.get(qualifiedGroupName);
+
try {
+ configGroupLock.lock();
+
boolean hostAdded = RetryHelper.executeWithRetry(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
- return addHostToExistingConfigGroups(hostName, topology, groupName);
+ return addHostToExistingConfigGroups(hostName, topology, qualifiedGroupName);
}
});
if (!hostAdded) {
@@ -355,6 +373,9 @@ public class AmbariContext {
LOG.error("Unable to register config group for host: ", e);
throw new RuntimeException("Unable to register config group for host: " + hostName);
}
+ finally {
+ configGroupLock.unlock();
+ }
}
public RequestStatusResponse installHost(String hostName, String clusterName, Collection<String> skipInstallForComponents, Collection<String> dontSkipInstallForComponents, boolean skipFailure) {
@@ -562,7 +583,7 @@ public class AmbariContext {
/**
* Add the new host to an existing config group.
*/
- private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String groupName) {
+ private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String configGroupName) {
boolean addedHost = false;
Clusters clusters;
Cluster cluster;
@@ -576,9 +597,8 @@ public class AmbariContext {
// I don't know of a method to get config group by name
//todo: add a method to get config group by name
Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
- String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
for (ConfigGroup group : configGroups.values()) {
- if (group.getName().equals(qualifiedGroupName)) {
+ if (group.getName().equals(configGroupName)) {
try {
Host host = clusters.getHost(hostName);
addedHost = true;
[04/18] ambari git commit: AMBARI-21430 - Allow Multiple Versions of
Stack Tools to Co-Exist (jonathanhurley)
Posted by rl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
index fa791c1..64e7d52 100644
--- a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
+++ b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
@@ -1,101 +1,101 @@
{
"localComponents": [
- "SECONDARY_NAMENODE",
- "HDFS_CLIENT",
- "DATANODE",
- "NAMENODE",
- "RANGER_ADMIN",
- "RANGER_TAGSYNC",
- "RANGER_USERSYNC",
- "ZOOKEEPER_SERVER",
- "ZOOKEEPER_CLIENT",
+ "SECONDARY_NAMENODE",
+ "HDFS_CLIENT",
+ "DATANODE",
+ "NAMENODE",
+ "RANGER_ADMIN",
+ "RANGER_TAGSYNC",
+ "RANGER_USERSYNC",
+ "ZOOKEEPER_SERVER",
+ "ZOOKEEPER_CLIENT",
"KERBEROS_CLIENT",
"LOGSEARCH_SOLR",
"LOGSEARCH_SOLR_CLIENT"
- ],
+ ],
"configuration_attributes": {
- "ranger-hdfs-audit": {},
- "ssl-client": {},
- "ranger-admin-site": {},
- "ranger-hdfs-policymgr-ssl": {},
- "tagsync-application-properties": {},
- "ranger-env": {},
- "usersync-log4j": {},
- "ranger-hdfs-plugin-properties": {},
- "kerberos-env": {},
- "admin-properties": {},
- "ranger-ugsync-site": {},
+ "ranger-hdfs-audit": {},
+ "ssl-client": {},
+ "ranger-admin-site": {},
+ "ranger-hdfs-policymgr-ssl": {},
+ "tagsync-application-properties": {},
+ "ranger-env": {},
+ "usersync-log4j": {},
+ "ranger-hdfs-plugin-properties": {},
+ "kerberos-env": {},
+ "admin-properties": {},
+ "ranger-ugsync-site": {},
"hdfs-site": {
"final": {
- "dfs.datanode.data.dir": "true",
- "dfs.namenode.http-address": "true",
- "dfs.datanode.failed.volumes.tolerated": "true",
- "dfs.support.append": "true",
- "dfs.namenode.name.dir": "true",
+ "dfs.datanode.data.dir": "true",
+ "dfs.namenode.http-address": "true",
+ "dfs.datanode.failed.volumes.tolerated": "true",
+ "dfs.support.append": "true",
+ "dfs.namenode.name.dir": "true",
"dfs.webhdfs.enabled": "true"
}
- },
+ },
"ranger-tagsync-site": {},
- "ranger-tagsync-policymgr-ssl": {},
+ "ranger-tagsync-policymgr-ssl": {},
"zoo.cfg": {},
"hadoop-policy": {},
- "hdfs-log4j": {},
- "krb5-conf": {},
+ "hdfs-log4j": {},
+ "krb5-conf": {},
"core-site": {
"final": {
"fs.defaultFS": "true"
}
- },
- "hadoop-env": {},
- "zookeeper-log4j": {},
- "ssl-server": {},
- "ranger-site": {},
- "admin-log4j": {},
- "tagsync-log4j": {},
- "ranger-hdfs-security": {},
+ },
+ "hadoop-env": {},
+ "zookeeper-log4j": {},
+ "ssl-server": {},
+ "ranger-site": {},
+ "admin-log4j": {},
+ "tagsync-log4j": {},
+ "ranger-hdfs-security": {},
"ranger-solr-configuration": {},
"usersync-properties": {},
"zookeeper-env": {},
"infra-solr-env": {},
"infra-solr-client-log4j": {},
"cluster-env": {}
- },
- "public_hostname": "c6401.ambari.apache.org",
- "commandId": "41-2",
- "hostname": "c6401.ambari.apache.org",
- "kerberosCommandParams": [],
- "serviceName": "RANGER",
- "role": "RANGER_ADMIN",
- "forceRefreshConfigTagsBeforeExecution": [],
- "requestId": 41,
+ },
+ "public_hostname": "c6401.ambari.apache.org",
+ "commandId": "41-2",
+ "hostname": "c6401.ambari.apache.org",
+ "kerberosCommandParams": [],
+ "serviceName": "RANGER",
+ "role": "RANGER_ADMIN",
+ "forceRefreshConfigTagsBeforeExecution": [],
+ "requestId": 41,
"agentConfigParams": {
"agent": {
"parallel_execution": 0
}
- },
- "clusterName": "test_Cluster01",
- "commandType": "EXECUTION_COMMAND",
- "taskId": 186,
- "roleParams": {},
+ },
+ "clusterName": "test_Cluster01",
+ "commandType": "EXECUTION_COMMAND",
+ "taskId": 186,
+ "roleParams": {},
"configurationTags": {
"ranger-hdfs-audit": {
"tag": "version1466705299922"
- },
+ },
"ssl-client": {
"tag": "version1"
- },
+ },
"ranger-admin-site": {
"tag": "version1467016680635"
- },
+ },
"ranger-hdfs-policymgr-ssl": {
"tag": "version1466705299922"
- },
+ },
"tagsync-application-properties": {
"tag": "version1467016680511"
- },
+ },
"ranger-env": {
"tag": "version1466705299949"
- },
+ },
"ranger-ugsync-site": {
"tag": "version1467016680537"
},
@@ -104,52 +104,52 @@
},
"ranger-hdfs-plugin-properties": {
"tag": "version1466705299922"
- },
+ },
"kerberos-env": {
"tag": "version1467016537243"
- },
+ },
"admin-properties": {
"tag": "version1466705299949"
- },
+ },
"hdfs-site": {
"tag": "version1467016680401"
- },
+ },
"ranger-tagsync-site": {
"tag": "version1467016680586"
- },
+ },
"zoo.cfg": {
"tag": "version1"
- },
+ },
"hadoop-policy": {
"tag": "version1"
- },
+ },
"hdfs-log4j": {
"tag": "version1"
- },
+ },
"usersync-log4j": {
"tag": "version1466705299949"
- },
+ },
"krb5-conf": {
"tag": "version1467016537243"
- },
+ },
"core-site": {
"tag": "version1467016680612"
- },
+ },
"hadoop-env": {
"tag": "version1467016680446"
- },
+ },
"zookeeper-log4j": {
"tag": "version1"
- },
+ },
"ssl-server": {
"tag": "version1"
- },
+ },
"ranger-site": {
"tag": "version1466705299949"
- },
+ },
"admin-log4j": {
"tag": "version1466705299949"
- },
+ },
"tagsync-log4j": {
"tag": "version1466705299949"
},
@@ -158,10 +158,10 @@
},
"ranger-hdfs-security": {
"tag": "version1466705299922"
- },
+ },
"usersync-properties": {
"tag": "version1466705299949"
- },
+ },
"zookeeper-env": {
"tag": "version1467016680492"
},
@@ -174,116 +174,116 @@
"cluster-env": {
"tag": "version1467016680567"
}
- },
- "roleCommand": "START",
+ },
+ "roleCommand": "START",
"hostLevelParams": {
- "agent_stack_retry_on_unavailability": "false",
- "stack_name": "HDP",
+ "agent_stack_retry_on_unavailability": "false",
+ "stack_name": "HDP",
"package_version": "2_6_0_0_*",
"custom_mysql_jdbc_name": "mysql-connector-java.jar",
"previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
- "host_sys_prepped": "false",
- "ambari_db_rca_username": "mapred",
+ "host_sys_prepped": "false",
+ "ambari_db_rca_username": "mapred",
"current_version": "2.6.0.0-801",
"mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
"agent_stack_retry_count": "5",
"stack_version": "2.6",
- "jdk_name": "jdk-8u60-linux-x64.tar.gz",
- "ambari_db_rca_driver": "org.postgresql.Driver",
+ "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+ "ambari_db_rca_driver": "org.postgresql.Driver",
"java_home": "/usr/jdk64/jdk1.7.0_45",
"repository_version_id": "1",
"jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
- "not_managed_hdfs_path_list": "[\"/tmp\"]",
- "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
- "java_version": "8",
+ "not_managed_hdfs_path_list": "[\"/tmp\"]",
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+ "java_version": "8",
"repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.6.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
"package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
"db_name": "ambari",
- "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
- "agentCacheDir": "/var/lib/ambari-agent/cache",
- "ambari_db_rca_password": "mapred",
- "jce_name": "jce_policy-8.zip",
- "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
- "db_driver_filename": "mysql-connector-java.jar",
- "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
+ "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+ "agentCacheDir": "/var/lib/ambari-agent/cache",
+ "ambari_db_rca_password": "mapred",
+ "jce_name": "jce_policy-8.zip",
+ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+ "db_driver_filename": "mysql-connector-java.jar",
+ "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
"clientsToUpdateConfigs": "[\"*\"]"
- },
+ },
"commandParams": {
- "service_package_folder": "common-services/RANGER/0.4.0/package",
- "script": "scripts/ranger_admin.py",
- "hooks_folder": "HDP/2.0.6/hooks",
+ "service_package_folder": "common-services/RANGER/0.4.0/package",
+ "script": "scripts/ranger_admin.py",
+ "hooks_folder": "HDP/2.0.6/hooks",
"version": "2.6.0.0-801",
"max_duration_for_retries": "0",
"command_retry_enabled": "false",
- "command_timeout": "600",
+ "command_timeout": "600",
"script_type": "PYTHON"
- },
- "forceRefreshConfigTags": [],
- "stageId": 2,
+ },
+ "forceRefreshConfigTags": [],
+ "stageId": 2,
"clusterHostInfo": {
"snamenode_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"ambari_server_use_ssl": [
"false"
- ],
+ ],
"all_ping_ports": [
"8670"
- ],
+ ],
"ranger_tagsync_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"ranger_usersync_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"all_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"slave_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"namenode_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"ambari_server_port": [
"8080"
- ],
+ ],
"ranger_admin_hosts": [
"c6401.ambari.apache.org"
- ],
+ ],
"all_racks": [
"/default-rack"
- ],
+ ],
"all_ipv4_ips": [
"172.22.83.73"
- ],
+ ],
"ambari_server_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"zookeeper_hosts": [
"c6401.ambari.apache.org"
],
"infra_solr_hosts": [
"c6401.ambari.apache.org"
]
- },
+ },
"configurations": {
"ranger-hdfs-audit": {
- "xasecure.audit.destination.solr.zookeepers": "NONE",
- "xasecure.audit.destination.solr.urls": "",
- "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+ "xasecure.audit.destination.solr.zookeepers": "NONE",
+ "xasecure.audit.destination.solr.urls": "",
+ "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
"xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
- "xasecure.audit.destination.hdfs": "true",
+ "xasecure.audit.destination.hdfs": "true",
"xasecure.audit.destination.solr": "false",
- "xasecure.audit.provider.summary.enabled": "false",
- "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+ "xasecure.audit.provider.summary.enabled": "false",
+ "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
"xasecure.audit.is.enabled": "true"
},
"ranger-tagsync-policymgr-ssl": {
- "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks",
- "xasecure.policymgr.clientssl.truststore.password": "changeit",
+ "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks",
+ "xasecure.policymgr.clientssl.truststore.password": "changeit",
"xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
- "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks",
+ "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks",
"xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
},
@@ -296,186 +296,186 @@
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
},
"ssl-client": {
- "ssl.client.truststore.reload.interval": "10000",
- "ssl.client.keystore.password": "bigdata",
- "ssl.client.truststore.type": "jks",
- "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
- "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
- "ssl.client.truststore.password": "bigdata",
+ "ssl.client.truststore.reload.interval": "10000",
+ "ssl.client.keystore.password": "bigdata",
+ "ssl.client.truststore.type": "jks",
+ "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+ "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+ "ssl.client.truststore.password": "bigdata",
"ssl.client.keystore.type": "jks"
- },
+ },
"ranger-admin-site": {
"ranger.is.solr.kerberised": "true",
- "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
- "ranger.kms.service.user.hdfs": "hdfs",
- "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
- "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
- "ranger.plugins.hive.serviceuser": "hive",
- "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
- "ranger.plugins.kms.serviceuser": "kms",
- "ranger.service.https.attrib.ssl.enabled": "false",
- "ranger.sso.browser.useragent": "Mozilla,chrome",
- "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
- "ranger.plugins.hbase.serviceuser": "hbase",
- "ranger.plugins.hdfs.serviceuser": "hdfs",
- "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
- "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
- "ranger.plugins.knox.serviceuser": "knox",
- "ranger.ldap.base.dn": "dc=example,dc=com",
- "ranger.sso.publicKey": "",
- "ranger.admin.kerberos.cookie.path": "/",
- "ranger.service.https.attrib.clientAuth": "want",
- "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
- "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
- "ranger.ldap.group.roleattribute": "cn",
- "ranger.plugins.kafka.serviceuser": "kafka",
- "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
- "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+ "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
+ "ranger.kms.service.user.hdfs": "hdfs",
+ "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+ "ranger.plugins.hive.serviceuser": "hive",
+ "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
+ "ranger.plugins.kms.serviceuser": "kms",
+ "ranger.service.https.attrib.ssl.enabled": "false",
+ "ranger.sso.browser.useragent": "Mozilla,chrome",
+ "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+ "ranger.plugins.hbase.serviceuser": "hbase",
+ "ranger.plugins.hdfs.serviceuser": "hdfs",
+ "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+ "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+ "ranger.plugins.knox.serviceuser": "knox",
+ "ranger.ldap.base.dn": "dc=example,dc=com",
+ "ranger.sso.publicKey": "",
+ "ranger.admin.kerberos.cookie.path": "/",
+ "ranger.service.https.attrib.clientAuth": "want",
+ "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+ "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+ "ranger.ldap.group.roleattribute": "cn",
+ "ranger.plugins.kafka.serviceuser": "kafka",
+ "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
+ "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
"ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
- "ranger.ldap.referral": "ignore",
- "ranger.service.http.port": "6080",
- "ranger.ldap.user.searchfilter": "(uid={0})",
- "ranger.plugins.atlas.serviceuser": "atlas",
- "ranger.truststore.password": "changeit",
- "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
- "ranger.audit.solr.password": "NONE",
+ "ranger.ldap.referral": "ignore",
+ "ranger.service.http.port": "6080",
+ "ranger.ldap.user.searchfilter": "(uid={0})",
+ "ranger.plugins.atlas.serviceuser": "atlas",
+ "ranger.truststore.password": "changeit",
+ "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+ "ranger.audit.solr.password": "NONE",
"ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/infra-solr",
"ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
- "ranger.service.https.port": "6182",
- "ranger.plugins.storm.serviceuser": "storm",
- "ranger.externalurl": "{{ranger_external_url}}",
- "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
- "ranger.kms.service.user.hive": "",
- "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
- "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
- "ranger.service.host": "{{ranger_host}}",
- "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
- "ranger.service.https.attrib.keystore.pass": "xasecure",
- "ranger.unixauth.remote.login.enabled": "true",
+ "ranger.service.https.port": "6182",
+ "ranger.plugins.storm.serviceuser": "storm",
+ "ranger.externalurl": "{{ranger_external_url}}",
+ "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+ "ranger.kms.service.user.hive": "",
+ "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+ "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+ "ranger.service.host": "{{ranger_host}}",
+ "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+ "ranger.service.https.attrib.keystore.pass": "xasecure",
+ "ranger.unixauth.remote.login.enabled": "true",
"ranger.jpa.jdbc.credential.alias": "rangeradmin",
- "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
- "ranger.audit.solr.username": "ranger_solr",
- "ranger.sso.enabled": "false",
- "ranger.audit.solr.urls": "",
- "ranger.ldap.ad.domain": "",
- "ranger.plugins.yarn.serviceuser": "yarn",
- "ranger.audit.source.type": "solr",
- "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
- "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
- "ranger.authentication.method": "UNIX",
- "ranger.service.http.enabled": "true",
- "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
- "ranger.ldap.ad.referral": "ignore",
- "ranger.ldap.ad.base.dn": "dc=example,dc=com",
- "ranger.jpa.jdbc.password": "_",
- "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
- "ranger.sso.providerurl": "",
- "ranger.unixauth.service.hostname": "{{ugsync_host}}",
- "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
- "ranger.admin.kerberos.token.valid.seconds": "30",
- "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
+ "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+ "ranger.audit.solr.username": "ranger_solr",
+ "ranger.sso.enabled": "false",
+ "ranger.audit.solr.urls": "",
+ "ranger.ldap.ad.domain": "",
+ "ranger.plugins.yarn.serviceuser": "yarn",
+ "ranger.audit.source.type": "solr",
+ "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+ "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+ "ranger.authentication.method": "UNIX",
+ "ranger.service.http.enabled": "true",
+ "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+ "ranger.ldap.ad.referral": "ignore",
+ "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+ "ranger.jpa.jdbc.password": "_",
+ "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "ranger.sso.providerurl": "",
+ "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+ "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
+ "ranger.admin.kerberos.token.valid.seconds": "30",
+ "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
"ranger.unixauth.service.port": "5151"
- },
+ },
"ranger-hdfs-policymgr-ssl": {
- "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
- "xasecure.policymgr.clientssl.truststore.password": "changeit",
- "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
- "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
- "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+ "xasecure.policymgr.clientssl.truststore.password": "changeit",
+ "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+ "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+ "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
- },
+ },
"tagsync-application-properties": {
- "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
- "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
- "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
- "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
- "atlas.kafka.entities.group.id": "ranger_entities_consumer",
- "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
- "atlas.jaas.KafkaClient.option.serviceName": "kafka",
- "atlas.kafka.bootstrap.servers": "localhost:6667",
- "atlas.jaas.KafkaClient.option.useKeyTab": "true",
- "atlas.jaas.KafkaClient.option.storeKey": "true",
- "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
+ "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+ "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
+ "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
+ "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
+ "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+ "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+ "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+ "atlas.kafka.bootstrap.servers": "localhost:6667",
+ "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+ "atlas.jaas.KafkaClient.option.storeKey": "true",
+ "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
"atlas.kafka.sasl.kerberos.service.name": "kafka"
- },
+ },
"ranger-env": {
- "ranger_solr_shards": "1",
- "ranger_solr_config_set": "ranger_audits",
- "ranger_user": "ranger",
+ "ranger_solr_shards": "1",
+ "ranger_solr_config_set": "ranger_audits",
+ "ranger_user": "ranger",
"ranger_solr_replication_factor": "1",
- "xml_configurations_supported": "true",
- "ranger-atlas-plugin-enabled": "No",
- "ranger-hbase-plugin-enabled": "No",
- "ranger-yarn-plugin-enabled": "No",
- "bind_anonymous": "false",
- "ranger_admin_username": "amb_ranger_admin",
- "admin_password": "admin",
- "is_solrCloud_enabled": "true",
- "ranger-storm-plugin-enabled": "No",
- "ranger-hdfs-plugin-enabled": "No",
- "ranger_group": "ranger",
- "ranger-knox-plugin-enabled": "No",
- "ranger_admin_log_dir": "/var/log/ranger/admin",
- "ranger-kafka-plugin-enabled": "No",
- "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
- "ranger-hive-plugin-enabled": "No",
- "xasecure.audit.destination.solr": "true",
- "ranger_pid_dir": "/var/run/ranger",
- "xasecure.audit.destination.hdfs": "true",
- "admin_username": "admin",
- "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
- "create_db_dbuser": "true",
- "ranger_solr_collection_name": "ranger_audits",
- "ranger_admin_password": "P1!q9xa96SMi5NCl",
+ "xml_configurations_supported": "true",
+ "ranger-atlas-plugin-enabled": "No",
+ "ranger-hbase-plugin-enabled": "No",
+ "ranger-yarn-plugin-enabled": "No",
+ "bind_anonymous": "false",
+ "ranger_admin_username": "amb_ranger_admin",
+ "admin_password": "admin",
+ "is_solrCloud_enabled": "true",
+ "ranger-storm-plugin-enabled": "No",
+ "ranger-hdfs-plugin-enabled": "No",
+ "ranger_group": "ranger",
+ "ranger-knox-plugin-enabled": "No",
+ "ranger_admin_log_dir": "/var/log/ranger/admin",
+ "ranger-kafka-plugin-enabled": "No",
+ "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+ "ranger-hive-plugin-enabled": "No",
+ "xasecure.audit.destination.solr": "true",
+ "ranger_pid_dir": "/var/run/ranger",
+ "xasecure.audit.destination.hdfs": "true",
+ "admin_username": "admin",
+ "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+ "create_db_dbuser": "true",
+ "ranger_solr_collection_name": "ranger_audits",
+ "ranger_admin_password": "P1!q9xa96SMi5NCl",
"ranger_usersync_log_dir": "/var/log/ranger/usersync"
- },
+ },
"usersync-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
- },
+ },
"ranger-hdfs-plugin-properties": {
- "hadoop.rpc.protection": "authentication",
- "ranger-hdfs-plugin-enabled": "No",
- "REPOSITORY_CONFIG_USERNAME": "hadoop",
- "policy_user": "ambari-qa",
- "common.name.for.certificate": "",
+ "hadoop.rpc.protection": "authentication",
+ "ranger-hdfs-plugin-enabled": "No",
+ "REPOSITORY_CONFIG_USERNAME": "hadoop",
+ "policy_user": "ambari-qa",
+ "common.name.for.certificate": "",
"REPOSITORY_CONFIG_PASSWORD": "hadoop"
- },
+ },
"kerberos-env": {
- "kdc_hosts": "c6401.ambari.apache.org",
- "manage_auth_to_local": "true",
- "install_packages": "true",
- "realm": "EXAMPLE.COM",
- "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
- "ad_create_attributes_template": "\n{\n \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n \"cn\": \"$principal_name\",\n #if( $is_service )\n \"servicePrincipalName\": \"$principal_name\",\n #end\n \"userPrincipalName\": \"$normalized_principal\",\n \"unicodePwd\": \"$password\",\n \"accountExpires\": \"0\",\n \"userAccountControl\": \"66048\"\n}",
- "kdc_create_attributes": "",
- "admin_server_host": "c6401.ambari.apache.org",
- "group": "ambari-managed-principals",
- "password_length": "20",
- "ldap_url": "",
- "manage_identities": "true",
- "password_min_lowercase_letters": "1",
- "create_ambari_principal": "true",
- "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
- "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
- "password_chat_timeout": "5",
- "kdc_type": "mit-kdc",
- "set_password_expiry": "false",
- "password_min_punctuation": "1",
- "container_dn": "",
- "case_insensitive_username_rules": "false",
- "password_min_whitespace": "0",
- "password_min_uppercase_letters": "1",
+ "kdc_hosts": "c6401.ambari.apache.org",
+ "manage_auth_to_local": "true",
+ "install_packages": "true",
+ "realm": "EXAMPLE.COM",
+ "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+ "ad_create_attributes_template": "\n{\n \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n \"cn\": \"$principal_name\",\n #if( $is_service )\n \"servicePrincipalName\": \"$principal_name\",\n #end\n \"userPrincipalName\": \"$normalized_principal\",\n \"unicodePwd\": \"$password\",\n \"accountExpires\": \"0\",\n \"userAccountControl\": \"66048\"\n}",
+ "kdc_create_attributes": "",
+ "admin_server_host": "c6401.ambari.apache.org",
+ "group": "ambari-managed-principals",
+ "password_length": "20",
+ "ldap_url": "",
+ "manage_identities": "true",
+ "password_min_lowercase_letters": "1",
+ "create_ambari_principal": "true",
+ "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+ "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+ "password_chat_timeout": "5",
+ "kdc_type": "mit-kdc",
+ "set_password_expiry": "false",
+ "password_min_punctuation": "1",
+ "container_dn": "",
+ "case_insensitive_username_rules": "false",
+ "password_min_whitespace": "0",
+ "password_min_uppercase_letters": "1",
"password_min_digits": "1"
- },
+ },
"admin-properties": {
- "db_user": "rangeradmin01",
- "DB_FLAVOR": "MYSQL",
- "db_password": "rangeradmin01",
- "db_root_user": "root",
- "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
- "db_name": "ranger01",
- "db_host": "c6401.ambari.apache.org",
- "db_root_password": "vagrant",
+ "db_user": "rangeradmin01",
+ "DB_FLAVOR": "MYSQL",
+ "db_password": "rangeradmin01",
+ "db_root_user": "root",
+ "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+ "db_name": "ranger01",
+ "db_host": "c6401.ambari.apache.org",
+ "db_root_password": "vagrant",
"SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
},
"ranger-solr-configuration": {
@@ -484,261 +484,261 @@
"ranger_audit_logs_merge_factor": "5"
},
"ranger-ugsync-site": {
- "ranger.usersync.ldap.binddn": "",
- "ranger.usersync.policymgr.username": "rangerusersync",
- "ranger.usersync.policymanager.mockrun": "false",
- "ranger.usersync.group.searchbase": "",
- "ranger.usersync.ldap.bindalias": "testldapalias",
- "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
- "ranger.usersync.port": "5151",
- "ranger.usersync.pagedresultssize": "500",
- "ranger.usersync.group.memberattributename": "",
- "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
- "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
- "ranger.usersync.ldap.referral": "ignore",
- "ranger.usersync.group.searchfilter": "",
- "ranger.usersync.ldap.user.objectclass": "person",
- "ranger.usersync.logdir": "{{usersync_log_dir}}",
- "ranger.usersync.ldap.user.searchfilter": "",
- "ranger.usersync.ldap.groupname.caseconversion": "none",
- "ranger.usersync.ldap.ldapbindpassword": "",
- "ranger.usersync.unix.minUserId": "500",
- "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
- "ranger.usersync.group.nameattribute": "",
- "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
- "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
- "ranger.usersync.user.searchenabled": "false",
- "ranger.usersync.group.usermapsyncenabled": "true",
- "ranger.usersync.ldap.bindkeystore": "",
- "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
- "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
- "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
- "ranger.usersync.group.objectclass": "",
- "ranger.usersync.ldap.user.searchscope": "sub",
- "ranger.usersync.unix.password.file": "/etc/passwd",
- "ranger.usersync.ldap.user.nameattribute": "",
- "ranger.usersync.pagedresultsenabled": "true",
- "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
- "ranger.usersync.group.search.first.enabled": "false",
- "ranger.usersync.group.searchenabled": "false",
- "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
- "ranger.usersync.ssl": "true",
- "ranger.usersync.ldap.url": "",
- "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
- "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
- "ranger.usersync.ldap.user.searchbase": "",
- "ranger.usersync.ldap.username.caseconversion": "none",
- "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
- "ranger.usersync.keystore.password": "UnIx529p",
- "ranger.usersync.unix.group.file": "/etc/group",
- "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
- "ranger.usersync.group.searchscope": "",
- "ranger.usersync.truststore.password": "changeit",
- "ranger.usersync.enabled": "true",
- "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
+ "ranger.usersync.ldap.binddn": "",
+ "ranger.usersync.policymgr.username": "rangerusersync",
+ "ranger.usersync.policymanager.mockrun": "false",
+ "ranger.usersync.group.searchbase": "",
+ "ranger.usersync.ldap.bindalias": "testldapalias",
+ "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+ "ranger.usersync.port": "5151",
+ "ranger.usersync.pagedresultssize": "500",
+ "ranger.usersync.group.memberattributename": "",
+ "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
+ "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+ "ranger.usersync.ldap.referral": "ignore",
+ "ranger.usersync.group.searchfilter": "",
+ "ranger.usersync.ldap.user.objectclass": "person",
+ "ranger.usersync.logdir": "{{usersync_log_dir}}",
+ "ranger.usersync.ldap.user.searchfilter": "",
+ "ranger.usersync.ldap.groupname.caseconversion": "none",
+ "ranger.usersync.ldap.ldapbindpassword": "",
+ "ranger.usersync.unix.minUserId": "500",
+ "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+ "ranger.usersync.group.nameattribute": "",
+ "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+ "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+ "ranger.usersync.user.searchenabled": "false",
+ "ranger.usersync.group.usermapsyncenabled": "true",
+ "ranger.usersync.ldap.bindkeystore": "",
+ "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+ "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
+ "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+ "ranger.usersync.group.objectclass": "",
+ "ranger.usersync.ldap.user.searchscope": "sub",
+ "ranger.usersync.unix.password.file": "/etc/passwd",
+ "ranger.usersync.ldap.user.nameattribute": "",
+ "ranger.usersync.pagedresultsenabled": "true",
+ "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+ "ranger.usersync.group.search.first.enabled": "false",
+ "ranger.usersync.group.searchenabled": "false",
+ "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+ "ranger.usersync.ssl": "true",
+ "ranger.usersync.ldap.url": "",
+ "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+ "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+ "ranger.usersync.ldap.user.searchbase": "",
+ "ranger.usersync.ldap.username.caseconversion": "none",
+ "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+ "ranger.usersync.keystore.password": "UnIx529p",
+ "ranger.usersync.unix.group.file": "/etc/group",
+ "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+ "ranger.usersync.group.searchscope": "",
+ "ranger.usersync.truststore.password": "changeit",
+ "ranger.usersync.enabled": "true",
+ "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
"ranger.usersync.filesource.text.delimiter": ","
- },
+ },
"hdfs-site": {
- "dfs.namenode.checkpoint.period": "21600",
- "dfs.namenode.avoid.write.stale.datanode": "true",
- "dfs.permissions.superusergroup": "hdfs",
- "dfs.namenode.startup.delay.block.deletion.sec": "3600",
- "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
- "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
- "dfs.heartbeat.interval": "3",
- "dfs.content-summary.limit": "5000",
- "dfs.support.append": "true",
- "dfs.datanode.address": "0.0.0.0:1019",
- "dfs.cluster.administrators": " hdfs",
- "dfs.namenode.audit.log.async": "true",
- "dfs.datanode.balance.bandwidthPerSec": "6250000",
- "dfs.namenode.safemode.threshold-pct": "1",
- "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
- "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
- "dfs.permissions.enabled": "true",
- "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
- "dfs.client.read.shortcircuit": "true",
- "dfs.https.port": "50470",
- "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
- "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
- "dfs.blocksize": "134217728",
- "dfs.blockreport.initialDelay": "120",
- "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
- "dfs.namenode.fslock.fair": "false",
- "dfs.datanode.max.transfer.threads": "4096",
- "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
- "dfs.replication": "3",
- "dfs.namenode.handler.count": "50",
- "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
- "fs.permissions.umask-mode": "022",
- "dfs.namenode.stale.datanode.interval": "30000",
- "dfs.datanode.ipc.address": "0.0.0.0:8010",
- "dfs.datanode.failed.volumes.tolerated": "0",
- "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
- "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
- "dfs.webhdfs.enabled": "true",
- "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
- "dfs.namenode.accesstime.precision": "0",
- "dfs.namenode.write.stale.datanode.ratio": "1.0f",
- "dfs.datanode.https.address": "0.0.0.0:50475",
- "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
- "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
- "nfs.exports.allowed.hosts": "* rw",
- "dfs.namenode.checkpoint.txns": "1000000",
- "dfs.datanode.http.address": "0.0.0.0:1022",
- "dfs.datanode.du.reserved": "33011188224",
- "dfs.client.read.shortcircuit.streams.cache.size": "4096",
- "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
- "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
- "dfs.http.policy": "HTTP_ONLY",
- "dfs.block.access.token.enable": "true",
- "dfs.client.retry.policy.enabled": "false",
- "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
- "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
- "dfs.namenode.name.dir.restore": "true",
- "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
- "dfs.journalnode.https-address": "0.0.0.0:8481",
- "dfs.journalnode.http-address": "0.0.0.0:8480",
- "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
- "dfs.namenode.avoid.read.stale.datanode": "true",
- "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
- "dfs.datanode.data.dir.perm": "750",
- "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
- "dfs.replication.max": "50",
+ "dfs.namenode.checkpoint.period": "21600",
+ "dfs.namenode.avoid.write.stale.datanode": "true",
+ "dfs.permissions.superusergroup": "hdfs",
+ "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+ "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+ "dfs.heartbeat.interval": "3",
+ "dfs.content-summary.limit": "5000",
+ "dfs.support.append": "true",
+ "dfs.datanode.address": "0.0.0.0:1019",
+ "dfs.cluster.administrators": " hdfs",
+ "dfs.namenode.audit.log.async": "true",
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
+ "dfs.namenode.safemode.threshold-pct": "1",
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+ "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+ "dfs.permissions.enabled": "true",
+ "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "dfs.client.read.shortcircuit": "true",
+ "dfs.https.port": "50470",
+ "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+ "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+ "dfs.blocksize": "134217728",
+ "dfs.blockreport.initialDelay": "120",
+ "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+ "dfs.namenode.fslock.fair": "false",
+ "dfs.datanode.max.transfer.threads": "4096",
+ "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.replication": "3",
+ "dfs.namenode.handler.count": "50",
+ "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "fs.permissions.umask-mode": "022",
+ "dfs.namenode.stale.datanode.interval": "30000",
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
+ "dfs.datanode.failed.volumes.tolerated": "0",
+ "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+ "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+ "dfs.webhdfs.enabled": "true",
+ "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+ "dfs.namenode.accesstime.precision": "0",
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+ "dfs.datanode.https.address": "0.0.0.0:50475",
+ "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+ "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+ "nfs.exports.allowed.hosts": "* rw",
+ "dfs.namenode.checkpoint.txns": "1000000",
+ "dfs.datanode.http.address": "0.0.0.0:1022",
+ "dfs.datanode.du.reserved": "33011188224",
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+ "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.http.policy": "HTTP_ONLY",
+ "dfs.block.access.token.enable": "true",
+ "dfs.client.retry.policy.enabled": "false",
+ "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+ "dfs.namenode.name.dir.restore": "true",
+ "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.journalnode.https-address": "0.0.0.0:8481",
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+ "dfs.namenode.avoid.read.stale.datanode": "true",
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+ "dfs.datanode.data.dir.perm": "750",
+ "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
+ "dfs.replication.max": "50",
"dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
- },
+ },
"ranger-tagsync-site": {
"ranger.tagsync.dest.ranger.ssl.config.filename": "{{stack_root}}/current/ranger-tagsync/conf/ranger-policymgr-ssl.xml",
"ranger.tagsync.source.atlasrest.username": "",
"ranger.tagsync.logdir": "/var/log/ranger/tagsync",
- "ranger.tagsync.source.atlasrest.download.interval.millis": "",
- "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
- "ranger.tagsync.source.file.check.interval.millis": "",
- "ranger.tagsync.source.atlasrest.endpoint": "",
- "ranger.tagsync.dest.ranger.username": "rangertagsync",
- "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
- "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
- "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
- "ranger.tagsync.source.atlas": "false",
- "ranger.tagsync.source.atlasrest": "false",
- "ranger.tagsync.source.file": "false",
+ "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+ "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+ "ranger.tagsync.source.file.check.interval.millis": "",
+ "ranger.tagsync.source.atlasrest.endpoint": "",
+ "ranger.tagsync.dest.ranger.username": "rangertagsync",
+ "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+ "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
+ "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
+ "ranger.tagsync.source.atlas": "false",
+ "ranger.tagsync.source.atlasrest": "false",
+ "ranger.tagsync.source.file": "false",
"ranger.tagsync.source.file.filename": ""
- },
+ },
"zoo.cfg": {
- "clientPort": "2181",
- "autopurge.purgeInterval": "24",
- "syncLimit": "5",
- "dataDir": "/grid/0/hadoop/zookeeper",
- "initLimit": "10",
- "tickTime": "2000",
+ "clientPort": "2181",
+ "autopurge.purgeInterval": "24",
+ "syncLimit": "5",
+ "dataDir": "/grid/0/hadoop/zookeeper",
+ "initLimit": "10",
+ "tickTime": "2000",
"autopurge.snapRetainCount": "30"
- },
+ },
"hadoop-policy": {
- "security.job.client.protocol.acl": "*",
- "security.job.task.protocol.acl": "*",
- "security.datanode.protocol.acl": "*",
- "security.namenode.protocol.acl": "*",
- "security.client.datanode.protocol.acl": "*",
- "security.inter.tracker.protocol.acl": "*",
- "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
- "security.client.protocol.acl": "*",
- "security.refresh.policy.protocol.acl": "hadoop",
- "security.admin.operations.protocol.acl": "hadoop",
+ "security.job.client.protocol.acl": "*",
+ "security.job.task.protocol.acl": "*",
+ "security.datanode.protocol.acl": "*",
+ "security.namenode.protocol.acl": "*",
+ "security.client.datanode.protocol.acl": "*",
+ "security.inter.tracker.protocol.acl": "*",
+ "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+ "security.client.protocol.acl": "*",
+ "security.refresh.policy.protocol.acl": "hadoop",
+ "security.admin.operations.protocol.acl": "hadoop",
"security.inter.datanode.protocol.acl": "*"
- },
+ },
"hdfs-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
ange=WARN"
- },
+ },
"krb5-conf": {
- "domains": "",
- "manage_krb5_conf": "true",
- "content": "\n[libdefaults]\n renew_lifetime = 7d\n forwardable = true\n default_realm = {{realm}}\n ticket_lifetime = 24h\n dns_lookup_realm = false\n dns_lookup_kdc = false\n default_ccache_name = /tmp/krb5cc_%{uid}\n #default_tgs_enctypes = {{encryption_types}}\n #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n default = FILE:/var/log/krb5kdc.log\n admin_server = FILE:/var/log/kadmind.log\n kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',') -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n }\n\
n{# Append additional realm declarations below #}",
+ "domains": "",
+ "manage_krb5_conf": "true",
+ "content": "\n[libdefaults]\n renew_lifetime = 7d\n forwardable = true\n default_realm = {{realm}}\n ticket_lifetime = 24h\n dns_lookup_realm = false\n dns_lookup_kdc = false\n default_ccache_name = /tmp/krb5cc_%{uid}\n #default_tgs_enctypes = {{encryption_types}}\n #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n default = FILE:/var/log/krb5kdc.log\n admin_server = FILE:/var/log/kadmind.log\n kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',') -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n }\n\
n{# Append additional realm declarations below #}",
"conf_dir": "/etc"
- },
+ },
"core-site": {
- "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
- "hadoop.proxyuser.hdfs.groups": "*",
- "fs.trash.interval": "360",
- "ipc.server.tcpnodelay": "true",
- "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
- "ipc.client.idlethreshold": "8000",
- "io.file.buffer.size": "131072",
- "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
- "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
- "hadoop.security.authentication": "kerberos",
- "mapreduce.jobtracker.webinterface.trusted": "false",
- "hadoop.proxyuser.hdfs.hosts": "*",
- "hadoop.proxyuser.HTTP.groups": "users",
- "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
- "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
- "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
- "hadoop.security.authorization": "true",
- "hadoop.http.authentication.simple.anonymous.allowed": "true",
- "ipc.client.connect.max.retries": "50",
- "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
- "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
+ "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+ "hadoop.proxyuser.hdfs.groups": "*",
+ "fs.trash.interval": "360",
+ "ipc.server.tcpnodelay": "true",
+ "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+ "ipc.client.idlethreshold": "8000",
+ "io.file.buffer.size": "131072",
+ "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+ "hadoop.security.authentication": "kerberos",
+ "mapreduce.jobtracker.webinterface.trusted": "false",
+ "hadoop.proxyuser.hdfs.hosts": "*",
+ "hadoop.proxyuser.HTTP.groups": "users",
+ "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+ "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+ "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
+ "hadoop.security.authorization": "true",
+ "hadoop.http.authentication.simple.anonymous.allowed": "true",
+ "ipc.client.connect.max.retries": "50",
+ "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+ "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
"ipc.client.connection.maxidletime": "30000"
- },
+ },
"hadoop-env": {
- "keyserver_port": "",
- "proxyuser_group": "users",
- "hdfs_user_nproc_limit": "65536",
- "hdfs_log_dir_prefix": "/var/log/hadoop",
- "hdfs_user_nofile_limit": "128000",
- "hdfs_user": "hdfs",
- "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM",
- "keyserver_host": " ",
- "namenode_opt_maxnewsize": "128m",
- "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
- "namenode_opt_maxpermsize": "256m",
- "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
N_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
ing priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n do\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
ECURE_DN_USER\" ]; then\n ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
- "namenode_heapsize": "1024m",
- "namenode_opt_newsize": "128m",
- "nfsgateway_heapsize": "1024",
- "dtnode_heapsize": "1024m",
- "hadoop_root_logger": "INFO,RFA",
- "hadoop_heapsize": "1024",
- "hadoop_pid_dir_prefix": "/var/run/hadoop",
- "namenode_opt_permsize": "128m",
+ "keyserver_port": "",
+ "proxyuser_group": "users",
+ "hdfs_user_nproc_limit": "65536",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
+ "hdfs_user_nofile_limit": "128000",
+ "hdfs_user": "hdfs",
+ "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM",
+ "keyserver_host": " ",
+ "namenode_opt_maxnewsize": "128m",
+ "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+ "namenode_opt_maxpermsize": "256m",
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
N_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
ing priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n do\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
ECURE_DN_USER\" ]; then\n ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+ "namenode_heapsize": "1024m",
+ "namenode_opt_newsize": "128m",
+ "nfsgateway_heapsize": "1024",
+ "dtnode_heapsize": "1024m",
+ "hadoop_root_logger": "INFO,RFA",
+ "hadoop_heapsize": "1024",
+ "hadoop_pid_dir_prefix": "/var/run/hadoop",
+ "namenode_opt_permsize": "128m",
"hdfs_tmp_dir": "/tmp"
- },
+ },
"zookeeper-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: consol
<TRUNCATED>